diff --git "a/4876.jsonl" "b/4876.jsonl" new file mode 100644--- /dev/null +++ "b/4876.jsonl" @@ -0,0 +1,669 @@ +{"seq_id":"645961062","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\nimport shopkeepercontrol\r\nfrom product_db import DataBase\r\nimport Qdialog\r\nfrom login_db import DataBase as db\r\nfrom shopkeeper_db import DataBase as db2\r\nfrom admin_db import acceptPR\r\n\r\nclass Ui_MainWindowaddproducts(object):\r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(800, 600)\r\n MainWindow.setMinimumSize(QtCore.QSize(800, 800))\r\n MainWindow.setMaximumSize(QtCore.QSize(800, 800))\r\n MainWindow.setStyleSheet(\"background-color:rgb(0, 0, 0)\")\r\n self.MainWindow = MainWindow\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)\r\n self.lineEdit.setGeometry(QtCore.QRect(-10, -20, 811, 71))\r\n self.lineEdit.setStyleSheet(\"background-color: rgb(26, 179, 179);\")\r\n self.lineEdit.setObjectName(\"lineEdit\")\r\n self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)\r\n self.lineEdit_2.setGeometry(QtCore.QRect(-10, 30, 651, 41))\r\n self.lineEdit_2.setStyleSheet(\"background-color:rgb(255, 170, 0);\")\r\n self.lineEdit_2.setObjectName(\"lineEdit_2\")\r\n self.label_7 = QtWidgets.QLabel(self.centralwidget)\r\n self.label_7.setGeometry(QtCore.QRect(610, 80, 181, 81))\r\n self.label_7.setMinimumSize(QtCore.QSize(141, 61))\r\n self.label_7.setText(\"\")\r\n self.label_7.setPixmap(QtGui.QPixmap(\"logo22.jpg\"))\r\n self.label_7.setScaledContents(True)\r\n self.label_7.setObjectName(\"label_7\")\r\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\r\n self.pushButton.setGeometry(QtCore.QRect(610, 630, 111, 61))\r\n font = QtGui.QFont()\r\n font.setPointSize(14)\r\n self.pushButton.setFont(font)\r\n self.pushButton.setStyleSheet(\"QPushButton{\\n\"\r\n\" border-radius: 15px;\\n\"\r\n\"\\n\"\r\n\" background-color:rgb(255, 170, 0);\\n\"\r\n\"\\n\"\r\n\" color:black;\\n\"\r\n\"}\")\r\n self.pushButton.setObjectName(\"pushButton\")\r\n self.lineEdit_3 = QtWidgets.QLineEdit(self.centralwidget)\r\n self.lineEdit_3.setGeometry(QtCore.QRect(80, 250, 641, 351))\r\n font = QtGui.QFont()\r\n font.setPointSize(10)\r\n self.lineEdit_3.setFont(font)\r\n self.lineEdit_3.setStyleSheet(\"QLineEdit{\\n\"\r\n\" border-radius: 15px;\\n\"\r\n\"\\n\"\r\n\" background-color: rgb(143, 214, 214);\\n\"\r\n\"\\n\"\r\n\" color: rgb(255, 255, 255);\\n\"\r\n\"}\\n\"\r\n\"\\n\"\r\n\"\")\r\n self.lineEdit_3.setObjectName(\"lineEdit_3\")\r\n self.label = QtWidgets.QLabel(self.centralwidget)\r\n self.label.setGeometry(QtCore.QRect(80, 150, 301, 71))\r\n font = QtGui.QFont()\r\n font.setPointSize(14)\r\n self.label.setFont(font)\r\n self.label.setStyleSheet(\"QLabel{\\n\"\r\n\" border-radius: 15px;\\n\"\r\n\"\\n\"\r\n\" background-color:rgb(255, 170, 0);\\n\"\r\n\"\\n\"\r\n\" color:black;\\n\"\r\n\"}\")\r\n self.label.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label.setObjectName(\"label\")\r\n self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)\r\n self.pushButton_2.setGeometry(QtCore.QRect(480, 630, 111, 61))\r\n font = QtGui.QFont()\r\n font.setPointSize(14)\r\n self.pushButton_2.setFont(font)\r\n self.pushButton_2.setStyleSheet(\"QPushButton{\\n\"\r\n\" border-radius: 15px;\\n\"\r\n\"\\n\"\r\n\" background-color:rgb(143, 214, 214);\\n\"\r\n\"\\n\"\r\n\" color:black;\\n\"\r\n\"}\")\r\n self.pushButton_2.setObjectName(\"pushButton_2\")\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.menubar = QtWidgets.QMenuBar(MainWindow)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 18))\r\n self.menubar.setObjectName(\"menubar\")\r\n MainWindow.setMenuBar(self.menubar)\r\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n MainWindow.setStatusBar(self.statusbar)\r\n\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\r\n self.pushButton.setText(_translate(\"MainWindow\", \"send\"))\r\n self.pushButton.clicked.connect(self.add_product)\r\n self.label.setText(_translate(\"MainWindow\", \"adding products\"))\r\n self.pushButton_2.setText(_translate(\"MainWindow\", \"back\"))\r\n self.pushButton_2.clicked.connect(self.gotowin16)\r\n self.lineEdit_3.setText(_translate(\"ManiWindow\",\"{\\\"name\\\":\\\"your products name\\\",\\\"price\\\":\\\"your products price\\\",\\\"discription\\\":\\\"your products discription\\\",\\\"location\\\":\\\"your location\\\"}\"))\r\n\r\n def gotowin16(self): #back b shopkeepercontrol\r\n self.mw14 = QtWidgets.QMainWindow()\r\n self.win16 = shopkeepercontrol.Ui_MainWindowshopkeepercontrol()\r\n self.win16.setupUi(self.mw14)\r\n self.MainWindow.hide()\r\n self.mw14.show() \r\n\r\n def add_product(self,ID):\r\n text = self.lineEdit_3.text()\r\n dict = eval(text)\r\n dict[\"ID\"]= DataBase().counter()\r\n acceptPR().request(dict)\r\n if dict[\"name\"] != \"your products name\":\r\n self.mw1 = QtWidgets.QMainWindow()\r\n self.win3 = Qdialog.Ui_MainWindow()\r\n self.win3.setupUi(self.mw1)\r\n self.mw1.show()\r\n else:\r\n pass\r\n \r\n\r\n \r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n MainWindow = QtWidgets.QMainWindow()\r\n ui = Ui_MainWindowaddproducts()\r\n ui.setupUi(MainWindow)\r\n MainWindow.show()\r\n sys.exit(app.exec_())\r\n ","sub_path":"addproducts.py","file_name":"addproducts.py","file_ext":"py","file_size_in_byte":5788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"386721174","text":"import pygame\nimport math\nimport time\nimport threading\nimport statistics\nimport random\n\nfrom swarmz_simulator.vector import Vector\nfrom swarmz_simulator.drone import Drone\nfrom swarmz_simulator.simulator import Simulator\nfrom swarmz_simulator.display import Display, EventDisplay\nfrom swarmz_simulator.object import Object\nfrom swarmz_simulator.environment import Environment\nfrom swarmz_simulator.radar import Radar \n\nclass MyDrone(Drone):\n \"\"\"How create a specific drone\n \"\"\"\n def __init__(self,position:Vector):\n super().__init__(position, Vector(0.5,0), 0.2,name=\"R2D2\",color=(50,50,100))\n self.radar=Radar(10,[0,math.pi/2,math.pi, -math.pi/2, math.pi/6, -math.pi/6])\n \n def IA(self,**kwargs):\n \"\"\"create one specific IA\n \"\"\"\n dt=kwargs.get('dt', None)\n \n if(self.arrive):\n self.next_speed.setNorm(0)\n self.color=(0,0,0)\n else:\n if(self.Dt>5):\n self.Dt=0\n if(self.goal!=None):\n self.next_speed.setCap(Vector(self.goal.x-self.next_position.x, self.goal.y-self.next_position.y).cap()+(2*random.random()-1)*math.pi/4)\n else:\n self.next_speed=self.speed\n if(min(self.radar.rays)<2):\n if(self.speed.norm_2()<0.1):\n self.next_speed.setNorm(0.1)\n else:\n self.next_speed.setNorm(self.speed.norm_2()*0.9)\n\n else:\n if(self.speed.norm_2()<1):\n self.next_speed.setNorm(self.speed.norm_2()*1.1)\n else:\n self.next_speed.setNorm(1)\n\n self.Dt+=dt\n\n if(not self.arrive):\n self.T+=dt\n\n\n\n\nif __name__ == '__main__':\n \n if(True):\n drone1=MyDrone(Vector(0,0))\n drone2=Drone(Vector(1,1),Vector(0.5,0.5),0.2)\n drone3=Drone(Vector(0,1),Vector(0,-0.5),0.2)\n drone4=Drone(Vector(4,1),Vector(0,0.5),0.2)\n drone5=Drone(Vector(4,0),Vector(0.5,0),0.2)\n drone6=Drone(Vector(2,1),Vector(0.5,0),0.2)\n\n\n #creation des obstacles, liste des coins\n obj=Object([Vector(5,5), Vector(3,5), Vector(3,3), Vector(5,3), Vector(7,5)])\n obj1=Object([Vector(-10,5), Vector(-13,5), Vector(-5,3)])\n obj2=Object([Vector(-5,-5), Vector(-3,-5), Vector(-3,-3), Vector(-5,-3), Vector(-7,-5)])\n\n goal=Object([Vector(10,10), Vector(8,10), Vector(8,8), Vector(10,8)])\n\n #creation du sim\n env=Environment([drone1, drone2,drone3, drone4,drone5, drone6], [obj, obj1, obj2], goal)\n env.save(\"env_1\")\n else:\n env=Environment()\n env.load(\"env_1\")\n \n eventFenetre=EventDisplay()\n eventFenetre.coefTime=1/10\n\n simu=Simulator(env, eventFenetre)\n\n fenetre = Display(env, eventFenetre)\n \n simu.start()\n fenetre.run()\n simu.join()\n pygame.quit()\n \n\n","sub_path":"Simulator/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"254598840","text":"#!/usr/bin/python3\n# 打开文件\nfo = open(\"PSO.txt\", \"r\")\n\nll = []\nfor line in fo.readlines(): # 依次读取每行\n\tl = line.split()\n\tls = []\n\tfor e in l:\n\t\tls.append(float(e))\n\tll.append(ls)\n\nfo.close()\n\nlp = []\nfor i in range(len(ll)):\n\tls = ll[i]\n\tinterval = len(ls) / 100\n\tcindex = 0\n\tcurC = 0\n\tlc = []\n\tfor index in range(len(ls)):\n\t\tif int(index / interval) != cindex:\n\t\t\tlc.append(curC)\n\t\t\tcurC = 0\n\t\t\tcindex = int(index / interval)\n\t\telse:\n\t\t\tcurC = curC + ls[index]\n\tlc.append(curC)\n\t# print(len(lc))\n\t# print(lc)\n\tlp.append(lc)\n\n\ncc = []\nfor i in range(100):\n\tc = 0\n\tfor j in range(100):\n\t\tc = c + lp[j][i]\n\tc = c / 100\n\tcc.append(c)\n\nf = open(\"PSO_processed.txt\", \"a\")\ns = ''\nfor c in cc:\n\ts = s + str(c) + ' '\nprint(s, file=f)\nf.close()\n\n# f = open(\"montage_processed.txt\", \"a\")\n#\n#\n#\n# for i in range(len(lp)):\n# \tlc = lp[i]\n# \ts = ''\n# \tfor c in lc:\n# \t\ts = s + str(c) + ' '\n# \tprint(s, file=f)\n#\n# f.close()\n#\n#\n#","sub_path":"heatmap/PSO_dataprocess.py","file_name":"PSO_dataprocess.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"521986036","text":"from mlxtend.data import loadlocal_mnist\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.layers import Activation, Dense, Flatten, BatchNormalization, Conv2D, MaxPool2D\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.metrics import categorical_crossentropy\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom sklearn.metrics import confusion_matrix\nimport itertools\nimport shutil\nimport random\nimport glob\nimport warnings\nimport os\nfrom hilbert_flatten import *\n\ntrain_path = 'data/mnist/train'\ntest_path = 'data/mnist/test'\nclasses = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n\n# ONLY RUN THESE IF HAVENT ALREADY CREATED IMAGES\nif not os.path.isdir(train_path):\n train_image_data, train_labels = loadlocal_mnist(\n images_path='data/mnist/train-images.idx3-ubyte',\n labels_path='data/mnist/train-labels.idx1-ubyte')\n\n assert len(train_image_data) == 60000\n assert len(train_image_data[0]) == 784\n\n os.chdir('data/mnist')\n os.makedirs('train')\n for i in range(10):\n os.makedirs('train/'+str(i))\n\n for i in range(1000):\n img = Image.new('L', (28, 28))\n img.putdata(train_image_data[i])\n img.save('train/'+str(train_labels[i]) +\n '/'+str(train_labels[i])+'.'+str(i)+'.jpg')\n\n os.chdir('../..')\n\nif not os.path.isdir(test_path):\n test_image_data, test_labels = loadlocal_mnist(\n images_path='data/mnist/t10k-images.idx3-ubyte',\n labels_path='data/mnist/t10k-labels.idx1-ubyte')\n\n assert len(test_image_data) == 10000\n assert len(test_image_data[0]) == 784\n\n os.chdir('data/mnist')\n os.makedirs('test')\n for i in range(10):\n os.makedirs('test/'+str(i))\n\n for i in range(100):\n img = Image.new('L', (28, 28))\n img.putdata(test_image_data[i])\n img.save('test/'+str(test_labels[i])+'/' +\n str(test_labels[i])+'.'+str(i)+'.jpg')\n\n os.chdir('../..')\n\n\ndatagen = ImageDataGenerator(rescale=1./255)\n\ntrain_batches = datagen.flow_from_directory(directory=train_path,\n target_size=(28, 28),\n color_mode=\"grayscale\",\n classes=classes,\n batch_size=10)\ntest_batches = datagen.flow_from_directory(directory=test_path,\n target_size=(28, 28),\n color_mode=\"grayscale\",\n classes=classes,\n batch_size=10,\n shuffle=False)\n\nassert train_batches.n > 0\nassert test_batches.n > 0\nassert train_batches.num_classes == test_batches.num_classes == 10\n\nif not os.path.isfile(\"models/mnist.h5\") or True:\n # https://www.kaggle.com/cdeotte/how-to-choose-cnn-architecture-mnist\n model = Sequential([\n Conv2D(24, kernel_size=5, padding='same', activation='relu',\n input_shape=(28, 28, 1)),\n MaxPool2D(),\n Conv2D(48, kernel_size=3, padding='same', activation='relu'),\n MaxPool2D(),\n HilbertFlatten(),\n Dense(64, activation='relu'),\n Dense(train_batches.num_classes, activation='softmax'),\n ])\n\n model.summary()\n print(\"compiling model\")\n model.compile(optimizer=Adam(learning_rate=0.001),\n loss='categorical_crossentropy', metrics=['accuracy'])\n print(\"training model\")\n model.fit(x=train_batches, validation_data=test_batches,\n epochs=10, verbose=2)\n model.save(\"models/mnist.h5\")\nelse:\n print(\"loading model\")\n model = load_model(\"models/mnist.h5\")\n","sub_path":"src/mnist_class.py","file_name":"mnist_class.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"325172574","text":"from tools.db import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pyecharts import Bar\nfrom pyecharts import Pie\nimport os\n\n'''数据分析、机器学习、人工智能相关职位在各大城市的分布'''\ndef city(keywords):\n\n Post = lagou_post_info\n\n\n a = '%' + keywords[0] + '%'\n b = '%' + keywords[1] + '%'\n c = '%' + keywords[2] + '%'\n\n\n mydata = Post.select(Post.city, fn.COUNT(Post.id).alias('num')).where(Post.post_name % a| Post.post_name % b| Post.post_name % c).group_by(Post.city)\n labels = ['北京', '上海', '广州', '深圳', '杭州', '天津', '西安', '苏州', '武汉', '厦门', '长沙', '成都']\n number = [0]*len(labels)\n for item in mydata:\n if item.city in labels:\n dex = labels.index(item.city)\n number[dex] = item.num\n print(number)\n\n #柱状图\n title = u'{}职位在各大城市的分布'.format(keywords[0])\n bar = Bar(title, title_text_size=25, title_pos=\"center\", width=800, height=500)\n bar.add(\"\", labels, number, is_random=True, xaxis_label_textsize=20, yaxis_label_textsize=20, is_label_show=True, label_text_size=15, legend_text_size=15, legend_orient='vertical',legend_pos='right', levisual_text_color=\"#fff\", symbol_size=15)\n bar.show_config()\n\n bar.render('./templates/city_bar.html')\n # bar.render('a.jpg')\n #饼图\n pie = Pie(title, title_text_size=25, title_pos=\"center\", width=800, height=500)\n pie.add(\"\", labels, number, is_label_show=True, label_text_size=15, legend_text_size=15, legend_orient='vertical',legend_pos='right', levisual_text_color=\"#fff\", symbol_size=15)\n pie.show_config()\n # pie.render('../templates/city_pie.html')\n pie.render('./templates/city_pie.html')\n # bar.render('b.jpg')\n\n # plt.savefig(\"../static/img/ana2.png\")\n\nif __name__ == '__main__':\n keywords = ['数据', '人工智能', '机器学习']\n # word_cloud(keywords)\n city(keywords)\n\n","sub_path":"analysis/city_analyse.py","file_name":"city_analyse.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"27016042","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/12/20 14:35\n# @Author : 何涌\n# @File : test_renewMonthTicketProcess.py\n\nimport pytest\nimport allure\nfrom common.utils import YmlUtils\nfrom Api.parkingManage_service.monthTicket_service.monthTicketBill import MonthTicketBill\nfrom Api.parkingManage_service.monthTicket_service.monthTicketConfig import MonthTicketConfig\nfrom Api.sentry_service.carInOutHandle import CarInOutHandle\nfrom Api.information_service.information import Information\nfrom common.Assert import Assertions\nfrom Api.cloudparking_service import cloudparking_service\n\nargs_item = \"send_data,expect\"\ntest_data,case_desc = YmlUtils(\"/test_data/parkingManage/monthTicket/renewMonthTicketProcess.yml\").getData\n@pytest.mark.parametrize(args_item, test_data)\n@allure.feature(\"智泊云-月票管理模块\")\n@allure.story('月票过期-续费(续费日期包含当前时间)-车辆进出就月票车')\nclass TestRenewMmonthTicketProcess():\n \"\"\"月票过期,车辆进出,不是月票,然后执行月票续费(续费日期包含当前时间),车辆进出,是月票\"\"\"\n\n def test_createMonthTicketConfig(self, userLogin, send_data, expect):\n \"\"\"创建自定义月票类型\"\"\"\n re = MonthTicketConfig(userLogin).createMonthTicketConfig(send_data['parkName'], send_data['ticketTypeName'], send_data['renewMethod'], send_data['validTo'])\n result = re\n Assertions().assert_in_text(result, expect[\"createMonthTicketConfigMsg\"])\n\n def test_openMonthTicketBill(self, userLogin, send_data, expect):\n \"\"\"用自定义月票类型开通月票-已过期\"\"\"\n re = MonthTicketBill(userLogin).openMonthTicketBill(send_data['carNum'], send_data['ticketTypeName'], send_data['timeperiodListStr'])\n result = re\n Assertions().assert_in_text(result, expect[\"openMonthTicketBillMsg\"])\n\n def test_mockCarIn(self,sentryLogin,send_data,expect):\n \"\"\"模拟车辆进场\"\"\"\n re = cloudparking_service().mockCarInOut(send_data[\"carNum\"],0,send_data[\"inClientID\"])\n result = re\n Assertions().assert_in_text(result['screen'], expect[\"mockCarInScreenMsg\"])\n Assertions().assert_in_text(result['voice'], expect[\"mockCarInVoiceMsg\"])\n\n def test_mockCarOut(self,send_data, expect):\n \"\"\"模拟车辆出场\"\"\"\n re = cloudparking_service().mockCarInOut(send_data[\"carNum\"], 1, send_data[\"outClientID\"])\n result = re\n Assertions().assert_in_text(result['screen'], expect[\"mockCarOutScreenMsg\"])\n Assertions().assert_in_text(result['voice'], expect[\"mockCarOutVoiceMsg\"])\n\n def test_sentryPay(self,sentryLogin,send_data,expect):\n \"\"\"岗亭收费处收费-查看车辆离场信息\"\"\"\n re = CarInOutHandle(sentryLogin).carInOutHandle(send_data['carNum'],send_data['carOutHandleType'],'${mytest.carOut_jobId}')\n result = re\n Assertions().assert_in_text(result['screen'], expect['sentryPayMsg'])\n\n def test_renewMonthTicketBill(self, userLogin, send_data, expect):\n \"\"\"月票续费\"\"\"\n re = MonthTicketBill(userLogin).renewMonthTicketBill(send_data['parkName'], send_data['carNum'], send_data['status'])\n result = re\n Assertions().assert_in_text(result, expect[\"renewMonthTicketBillMsg\"])\n\n def test_mockMonthTicketCarIn(self,send_data,expect):\n \"\"\"模拟月票车辆进场\"\"\"\n re = cloudparking_service().mockCarInOut(send_data[\"carNum\"],0,send_data[\"inClientID\"])\n result = re\n Assertions().assert_in_text(result['screen'], expect[\"mockMonthTicketCarInScreenMsg\"])\n Assertions().assert_in_text(result['voice'], expect[\"mockMonthTicketCarInVoiceMsg\"])\n\n def test_mockMonthTicketCarOut(self, send_data, expect):\n \"\"\"模拟月票车辆离场\"\"\"\n re = cloudparking_service().mockCarInOut(send_data[\"carNum\"],1,send_data[\"outClientID\"])\n result = re\n Assertions().assert_in_text(result['screen'], expect[\"mockMonthTicketCarOutScreenMsg\"])\n Assertions().assert_in_text(result['voice'], expect[\"mockMonthTicketCarOutVoiceMsg\"])\n\n def test_checkCarInOutHistoryVIPType(self,userLogin,send_data,expect):\n \"\"\"查看进出场记录中查看到VIP类型\"\"\"\n re = Information(userLogin).getCarLeaveHistory(send_data[\"parkName\"],send_data[\"carNum\"])\n result = re[0]\n Assertions().assert_in_text(result['enterVipTypeStr'], expect[\"checkCarInOutHistoryVIPTypeMsg\"])\n Assertions().assert_in_text(result['leaveVipTypeStr'], expect[\"checkCarInOutHistoryVIPTypeMsg\"])\n","sub_path":"test_suite/parkingManage/monthTicket/test_renewMonthTicketProcess.py","file_name":"test_renewMonthTicketProcess.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"646601323","text":"from contextlib import contextmanager\nfrom sqlalchemy import create_engine, MetaData\nfrom sqlalchemy.engine import Engine, Connection\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, Session, scoped_session\n\nfrom nivo_api.settings import Config\n\n\n# Global Instance of metadata\nmetadata: MetaData = MetaData()\ndb_engine: Engine = create_engine(Config.DB_URL)\nBase = declarative_base(metadata=metadata)\nDBSession = scoped_session(sessionmaker(bind=db_engine))\n\n\n@contextmanager\ndef connection_scope(engine: Engine = None) -> Connection:\n engine = engine or db_engine\n conn = engine.connect()\n try:\n yield conn\n except Exception:\n raise\n finally:\n conn.close()\n\n\n@contextmanager\ndef session_scope():\n sess: Session = DBSession()\n try:\n yield sess\n except:\n sess.rollback()\n raise\n finally:\n DBSession.remove()\n","sub_path":"api/nivo_api/core/db/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"512411927","text":"import numpy as np\nimport pandas as pd\nfrom randomForest import single_decision_tree, limit_max_depth, with_dataset\n\n\n# Use a single decision tree\ndef no_maximumDepth(rseed: int, x, y):\n tree = single_decision_tree(rseed, x, y)\n print(f'Decision tree has {tree.tree_.node_count} nodes without maximum depth {tree.tree_.max_depth}.')\n print(f'Model Accuracy: {tree.score(x, y)}')\n\n# Give a maximum depth\ndef maximunDepth(rseed: int, max_depth: int, x, y):\n tree = limit_max_depth(rseed, max_depth, x, y)\n print(f'Decision tree has {tree.tree_.node_count} nodes with maximum depth {tree.tree_.max_depth}.')\n print(f'Model Accuracy: {tree.score(x, y)}')\n\n# Read dataset from .data or csv file\ndef readDataset(path: str, rseed: int):\n result, result1, result2 = with_dataset(path, rseed)\n print(\"Confusion Matrix:\")\n print(result)\n print(\"Classification Report:\",)\n print (result1)\n print(\"Accuracy:\",result2)\n\n\n# Returns collumns from csv file and identify missing data/anomalies\ndef infoDataAndCollumns(path: str, numberOfLines: int):\n features = pd.read_csv(path)\n collumns = features.head(numberOfLines)\n shape = features.shape\n # Descriptive statistics for each column\n describe = features.describe()\n return collumns, shape, describe\n \n\n\ndef main():\n #set random seed to ensure reproductible runs\n rseed = 50\n x = np.array([[2, 2], [2, 1], [2, 3], [1, 2], [1, 1], [3, 3]])\n y = np.array([0, 1, 1, 1, 0, 1])\n max_depth = 2\n \n no_maximumDepth(rseed, x, y)\n \n maximunDepth(rseed, max_depth, x, y)\n \n path = \"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\"\n readDataset(path, rseed)\n\n numberOfLines = 5\n print(\"Information about collumns with %d\" %(numberOfLines) + \" lines\")\n collumnsInformation, shape, describe = infoDataAndCollumns(path, numberOfLines)\n print(collumnsInformation)\n\n print(\"Identify Anomalies/ Missing Data:\")\n print(shape)\n\n print(\"Descriptive statistics for each column\")\n print(describe)\n\n\n\n\nmain()\n","sub_path":"randomForest/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"618710432","text":"from flask import request, render_template, redirect, url_for\n\nfrom project import app\nfrom project.com.controller.LoginController import adminLoginSession, adminLogoutSession\nfrom project.com.dao.BloodGroupDAO import BloodGroupDAO\nfrom project.com.vo.BloodGroupVO import BloodGroupVO\n\n\n@app.route('/admin/loadBloodGroup', methods=['GET'])\ndef adminLoadBloodGroup():\n try:\n if adminLoginSession() == \"admin\":\n return render_template('admin/addBloodGroup.html')\n else:\n return adminLogoutSession()\n except Exception as ex:\n print(ex)\n\n\n@app.route('/admin/insertBloodGroup', methods=['POST', 'GET'])\ndef adminInsertBloodGroup():\n try:\n if adminLoginSession() == \"admin\":\n bloodGroupName = request.form['bloodGroupName']\n\n bloodGroupVO = BloodGroupVO()\n bloodGroupDAO = BloodGroupDAO()\n\n bloodGroupVO.bloodGroupName = bloodGroupName\n\n bloodGroupDAO.insertBloodGroup(bloodGroupVO)\n\n return redirect(url_for('adminViewBloodGroup'))\n else:\n return adminLogoutSession()\n\n except Exception as ex:\n print(ex)\n\n\n@app.route('/admin/viewBloodGroup', methods=['GET'])\ndef adminViewBloodGroup():\n try:\n if adminLoginSession() == \"admin\":\n bloodGroupDAO = BloodGroupDAO()\n bloodGroupVOList = bloodGroupDAO.viewBloodGroup()\n print(\"-------------\", bloodGroupVOList)\n return render_template('admin/viewBloodGroup.html', bloodGroupVOList=bloodGroupVOList)\n else:\n return adminLogoutSession()\n except Exception as ex:\n print(ex)\n\n\n@app.route('/admin/deleteBloodGroup', methods=['GET'])\ndef adminDeleteBloodGroup():\n try:\n if adminLoginSession() == \"admin\":\n bloodGroupVO = BloodGroupVO()\n bloodGroupDAO = BloodGroupDAO()\n\n bloodGroupId = request.args.get('bloodGroupId')\n\n bloodGroupVO.bloodGroupId = bloodGroupId\n bloodGroupDAO.deleteBloodGroup(bloodGroupVO)\n return redirect(url_for('adminViewBloodGroup'))\n else:\n return adminLogoutSession()\n except Exception as ex:\n print(ex)\n\n\n@app.route('/admin/editBloodGroup', methods=['GET'])\ndef adminEditBloodGroup():\n try:\n if adminLoginSession() == \"admin\":\n bloodGroupVO = BloodGroupVO()\n\n bloodGroupDAO = BloodGroupDAO()\n\n bloodGroupId = request.args.get('bloodGroupId')\n print(bloodGroupId)\n\n bloodGroupVO.bloodGroupId = bloodGroupId\n\n bloodGroupVOList = bloodGroupDAO.editBloodGroup(bloodGroupVO)\n\n print(\"=======categoryVOList=======\", bloodGroupVOList)\n\n print(\"=======type of categoryVOList=======\", type(bloodGroupVOList))\n\n return render_template('admin/editBloodGroup.html', bloodGroupVOList=bloodGroupVOList)\n else:\n return adminLogoutSession()\n except Exception as ex:\n print(ex)\n\n\n@app.route('/admin/updateBloodGroup', methods=['POST', 'GET'])\ndef adminUpdateBloodGroup():\n try:\n if adminLoginSession() == \"admin\":\n bloodGroupId = request.form['bloodGroupId']\n bloodGroupName = request.form['bloodGroupName']\n\n bloodGroupVO = BloodGroupVO()\n bloodGroupDAO = BloodGroupDAO()\n\n bloodGroupVO.bloodGroupId = bloodGroupId\n bloodGroupVO.bloodGroupName = bloodGroupName\n\n bloodGroupDAO.updateBloodgroup(bloodGroupVO)\n\n return redirect(url_for('adminViewBloodGroup'))\n else:\n return adminLogoutSession()\n except Exception as ex:\n print(ex)\n","sub_path":"project/com/controller/BloodGroupController.py","file_name":"BloodGroupController.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"42033939","text":"from django.shortcuts import render\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n# Create your views here.\n\n\ndef sendanemail(request):\n if request.method == \"POST\":\n to = request.POST.get('toemail')\n content = request.POST.get ('content')\n send_mail(\n #subject\n 'testing',\n #msg\n content,\n #from email \n settings.EMAIL_HOST_USER,\n #rec list\n [to],\n fail_silently=True\n )\n print(to,\"\\n\" ,content)\n return render(\n request,\n 'email.html',\n {\n 'title': 'send an email'\n }\n )\n else:\n return render(\n request,\n 'email.html',\n {\n 'title': 'send an email'\n }\n )","sub_path":"contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"477240029","text":"fname = input(\"Enter file name: \")\nfh = open(fname)\nlst = list()\nfor line in fh:\n x = line.split()\n if x in lst:\n continue\n else:\n for i in x:\n if i not in lst:\n \tlst.append(i)\n\nlst.sort()\nprint(lst)\n","sub_path":"Python Data Structures/Assignment 8-4.py","file_name":"Assignment 8-4.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"570925926","text":"import numpy as np\r\nimport cv2\r\n\r\ndef convolve(dataMat,kernel):\r\n m,n = dataMat.shape\r\n mk,nk = kernel.shape\r\n newMat = np.ones(((m - mk + 1),(n - nk + 1)))\r\n tempMat = np.ones(((mk),(nk)))\r\n for row in range(m - mk + 1):\r\n for col in range(n - nk + 1):\r\n for m_k in range(mk):\r\n for n_k in range(nk):\r\n tempMat[m_k,n_k] = dataMat[(row + m_k),(col + n_k)] * kernel[m_k,n_k]\r\n newMat[row,col] = np.sum(tempMat)\r\n return newMat\r\n\r\nimg = cv2.imread(\"lena.jpg\",0)\r\nkernel = np.array([[-1,-1,-1],\r\n [-1,8,-1],\r\n [-1,-1,-1]])\r\n\r\nlightImg = convolve(img,kernel)\r\ncv2.imshow(\"img\",lightImg)\r\ncv2.waitKey(0)","sub_path":"05/conv.py","file_name":"conv.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"437079546","text":"########################################################################\n# $Header: /var/local/cvsroot/4Suite/Ft/Xml/Xslt/__init__.py,v 1.39.2.1 2006-08-23 14:41:32 uogbuji Exp $\n\"\"\"\n4XSLT initialization and XSLT pattern tools\n\nThese are the most common steps for using this XSLT engine:\n\n 1. Create an Ft.Xml.Xslt.Processor.Processor instance:\n\n from Ft.Xml.Xslt import Processor\n processor = Processor.Processor()\n\n 2. Prepare Ft.Xml.InputSource instances (via their factory)\n for the source XML and for the stylesheet.\n\n 3. Call the Processor's appendStylesheet() method, passing it\n the stylesheet's InputSource.\n\n 4. Call the Processor's run() method, passing it the source\n document's InputSource.\n\nYou can call run() multiple times on different InputSources. When you're\ndone, the processor's reset() method can be used to restore a clean slate\n(at which point you would have to append stylesheets to the processor\nagain), but in most circumstances it is actually less expensive to just\ncreate a new Processor instance.\n\nCopyright 2003 Fourthought, Inc. (USA).\nDetailed license and copyright information: http://4suite.org/COPYRIGHT\nProject home, documentation, distributions: http://4suite.org/\n\"\"\"\n\n# the order of imports here is very important\n\nXSL_NAMESPACE = u'http://www.w3.org/1999/XSL/Transform'\n\nimport MessageSource\nError = MessageSource.Error\n\n# -- XSLT exceptions -------------------------------------------------\n\nfrom Ft import FtException\nclass XsltException(FtException):\n def __init__(self, errorCode, *args):\n FtException.__init__(self, errorCode,\n MessageSource.g_errorMessages, args)\n return\n\nclass XsltParserException(XsltException):\n \"\"\"\n The exception raised when an error is encountered during the parsing\n of a stylesheet. This eliminates the need for location information\n within each error message.\n \"\"\"\n def __init__(self, code, locator, *args):\n XsltException.__init__(self, code, *args)\n\n # Add location information to the message\n msg = MessageSource.POSITION_INFO % (locator.getSystemId(),\n locator.getLineNumber(),\n locator.getColumnNumber(),\n self.message)\n self.message = msg\n return\n\nclass XsltRuntimeException(XsltException):\n def __init__(self, code, xsltelement, *args):\n XsltException.__init__(self, code, *args)\n\n # Add location information to the message\n baseUri = xsltelement.baseUri or '?'\n line = xsltelement.lineNumber or '?'\n col = xsltelement.columnNumber or '?'\n msg = MessageSource.POSITION_INFO % (baseUri, line, col, self.message)\n self.message = msg\n return\n\n\n# -- element classifications -----------------------------------------\n\nclass CategoryTypes:\n \"\"\"Collection of constants making up the categories of XSLT element\"\"\"\n INSTRUCTION = 0\n TOP_LEVEL_ELEMENT = 1\n RESULT_ELEMENT = 2\n\nimport XPatternParserc as XPatternParser\nparser = XPatternParser\n\n# -- XPattern API ----------------------------------------------------\n\nfrom xml.dom import Node\nclass PatternList:\n \"\"\"\n PatternList is a class that is useful for people writing code to\n process XSLT patterns, especially in groups.\n \"\"\"\n PARSER = parser.new()\n def __init__(self, patterns, namespaces=None):\n \"\"\"\n patterns - A list of strings that make up either compiled pattern\n objects or valid XSLT patterns in string form.\n It must be either all of one form or all of another\n namespaces - A namespace dictionary - { prefix: uri, ... } -\n to be used for setting up expressions in the pattern\n \"\"\"\n self.namespaces = namespaces or {}\n if hasattr(patterns[0], \"match\"):\n self._patterns = patterns\n else:\n self._patterns = [ self.PARSER.parse(p) for p in patterns ]\n self._shortcuts = [ p.getShortcuts(self.namespaces) for p in self._patterns ]\n self._lookup_table = {}\n self._patternMapping = {}\n i = 0\n for shortcut_list in self._shortcuts:\n for ((subpattern, axis_type), (node_type, expanded_name)) in shortcut_list:\n self._patternMapping[subpattern] = self._patterns[i]\n if node_type == Node.ELEMENT_NODE:\n if not self._lookup_table.has_key(node_type):\n self._lookup_table[node_type] = {}\n if not self._lookup_table[node_type].has_key(expanded_name):\n self._lookup_table[node_type][expanded_name] = []\n self._lookup_table[node_type][expanded_name].append((subpattern, axis_type, self._patterns[i]))\n else:\n if not self._lookup_table.has_key(node_type):\n self._lookup_table[node_type] = []\n self._lookup_table[node_type].append((subpattern, axis_type, self._patterns[i]))\n i = i + 1\n self.length = len(self._patterns)\n return\n\n #def matchAll(nodes):\n # \"\"\"Returns the subset of given nodes that match all patterns\"\"\"\n # return [ n for n in nodes if [ ]\n # ]\n\n def lookup(self, node, context=None):\n \"\"\"Return the patterns that match the node (as a list)\"\"\"\n if node.nodeType == Node.ELEMENT_NODE:\n #lookup result is a dict for elements\n narrowed_namecheck = self._lookup_table.get(Node.ELEMENT_NODE, {})\n narrowed = narrowed_namecheck.get((node.namespaceURI, node.localName), [])\n #lookup of (ns,local) None is for the element wildcard case '*'\n narrowed.extend(narrowed_namecheck.get(None, []))\n else:\n #lookup result is a list for non-elements\n narrowed = self._lookup_table.get(node.nodeType, [])\n if node.nodeType not in [ Node.DOCUMENT_NODE, Node.ATTRIBUTE_NODE ]:\n #lookup of nodeType None is for the wildcard case 'node()'\n narrowed.extend(self._lookup_table.get(None, []))\n if not narrowed: return []\n if not context:\n from Ft.Xml.XPath.Context import Context\n context = Context(node.ownerDocument, 1, 1, processorNss=self.namespaces)\n return [ p[2] for p in narrowed if p[0].match(context, node, p[1]) ]\n\n def lookupAsSet(self, node, context=None):\n \"\"\"Returns the patterns that match the node (as a set [dictionary])\"\"\"\n if node.nodeType == Node.ELEMENT_NODE:\n #lookup result is a dict for elements\n narrowed_namecheck = self._lookup_table.get(Node.ELEMENT_NODE, {})\n narrowed = narrowed_namecheck.get((node.namespaceURI, node.localName), [])\n #lookup of (ns,local) None is for the element wildcard case '*'\n narrowed.extend(narrowed_namecheck.get(None, []))\n else:\n #lookup result is a list for non-elements\n narrowed = self._lookup_table.get(node.nodeType, [])\n if node.nodeType not in [ Node.DOCUMENT_NODE, Node.ATTRIBUTE_NODE ]:\n #lookup of nodeType None is for the wildcard case 'node()'\n narrowed.extend(self._lookup_table.get(None, []))\n if not narrowed: return {}\n matched_patterns = {}\n if not context: context = Context(node.ownerDocument, 1, 1)\n for p in narrowed:\n if p[0].match(context, node, p[1]):\n matched_patterns[p[2]] = None\n return matched_patterns\n\n def xsltKeyPrep(self, context, node):\n \"\"\"\n A special utility used for XSLT key preparation.\n A list of lists is returned. The outer list corresponds\n to the patterns. Each inner list is either [node] or []\n depending on whether or not the node matched the corresponding\n pattern.\n \"\"\"\n matching_patterns = self.lookupAsSet(node, context)\n return [ [node]*matching_patterns.has_key(p) for p in self._patterns ]\n #return [ [node]*(s[1][0] == node.nodeType and (s[1][0] != Node.ELEMENT_NODE or s[1][1] == (node.namespaceURI, node.localName)) and s[0][0].match(context, node)) for s in self._shortcuts ]\n\n# -- Convenience API ----------------------------------------------------\n\nimport os\n\ndef _AttachStylesheetToProcessor(stylesheet, processor):\n from Ft.Lib import Uri, Uuid\n from Ft.Xml import InputSource\n from Ft.Xml.Catalog import IsXml\n if isinstance(stylesheet, InputSource.InputSource):\n processor.appendStylesheet(stylesheet)\n #elif stylesheet.find(XSL_NAMESPACE) > 0 and IsXml(stylesheet):\n #Note: this would break in pathological cases such as a user\n #passing in a stylesheet string with only an XInclude to the actual XSLT\n elif IsXml(stylesheet):\n #Create dummy Uri to use as base\n dummy_uri = 'urn:uuid:'+Uuid.UuidAsString(Uuid.GenerateUuid())\n processor.appendStylesheet(\n InputSource.DefaultFactory.fromString(stylesheet, dummy_uri)\n )\n elif hasattr(stylesheet, 'read'):\n #Create dummy Uri to use as base\n dummy_uri = 'urn:uuid:'+Uuid.UuidAsString(Uuid.GenerateUuid())\n processor.appendStylesheet(\n InputSource.DefaultFactory.fromStream(stylesheet, dummy_uri)\n )\n elif Uri.IsAbsolute(stylesheet): # or not os.path.isfile(stylesheet):\n processor.appendStylesheet(\n InputSource.DefaultFactory.fromUri(stylesheet)\n )\n else:\n processor.appendStylesheet(\n InputSource.DefaultFactory.fromUri(Uri.OsPathToUri(stylesheet))\n )\n return\n\ndef Transform(source, stylesheet, params=None, output=None):\n \"\"\"\n Convenience function for applying an XSLT transform. Returns\n a string.\n\n source - XML source document in the form of a a string (not Unicode\n object), file-like object (stream), file path, URI or\n Ft.Xml.InputSource.InputSource instance. If string or stream\n it must be self-contained XML (i.e. not requiring access to\n any other resource such as external entities or includes)\n stylesheet - XSLT document in the form of a string, stream, URL,\n file path or Ft.Xml.InputSource.InputSource instance\n params - optional dictionary of stylesheet parameters, the keys of\n which may be given as unicode objects if they have no namespace,\n or as (uri, localname) tuples if they do.\n output - optional file-like object to which output is written (incrementally, as processed)\n \"\"\"\n #do the imports within the function: a tad bit less efficient, but\n #avoid circular crap\n from Ft.Xml.Xslt import Processor\n from Ft.Xml import InputSource\n from Ft.Lib import Uri, Uuid\n from Ft.Xml.Lib.XmlString import IsXml\n\n params = params or {}\n processor = Processor.Processor()\n _AttachStylesheetToProcessor(stylesheet, processor)\n if isinstance(source, InputSource.InputSource):\n pass\n elif hasattr(source, 'read'):\n #Create dummy Uri to use as base\n dummy_uri = 'urn:uuid:'+Uuid.UuidAsString(Uuid.GenerateUuid())\n source = InputSource.DefaultFactory.fromStream(source, dummy_uri)\n elif IsXml(source):\n dummy_uri = 'urn:uuid:'+Uuid.UuidAsString(Uuid.GenerateUuid())\n source = InputSource.DefaultFactory.fromString(source, dummy_uri)\n elif Uri.IsAbsolute(source): # or not os.path.isfile(source):\n source = InputSource.DefaultFactory.fromUri(source)\n else:\n source = InputSource.DefaultFactory.fromUri(Uri.OsPathToUri(source))\n return processor.run(source, topLevelParams=params, outputStream=output)\n\n\ndef TransformPath(source, stylesheet):\n import warnings\n warnings.warn(\"You are using the deprecated Ft.Xml.Xslt.TransformPath function, Please use Ft.Xml.Xslt.Transform instead\", DeprecationWarning, 2)\n\n return Transform(source, stylesheet)\n\n\n# this import must come after all the above\nfrom StylesheetTree import XsltElement\n\n","sub_path":"dependencies/src/4Suite-XML-1.0.2/Ft/Xml/Xslt/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"439120243","text":"\"\"\"\nbyceps.services.tourney.avatar.service\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n:Copyright: 2006-2021 Jochen Kupperschmidt\n:License: Revised BSD (see `LICENSE` file for details)\n\"\"\"\n\nfrom uuid import UUID\nfrom typing import BinaryIO, Set\n\nfrom ....database import db\nfrom ....typing import PartyID, UserID\nfrom ....util.image import create_thumbnail\nfrom ....util.image.models import Dimensions, ImageType\nfrom ....util import upload\n\nfrom ...image import service as image_service\nfrom ...image.service import ImageTypeProhibited # Provide to view functions.\nfrom ...user import service as user_service\n\nfrom .dbmodels import Avatar\n\n\nMAXIMUM_DIMENSIONS = Dimensions(512, 512)\n\n\ndef create_avatar_image(\n party_id: PartyID,\n creator_id: UserID,\n stream: BinaryIO,\n allowed_types: Set[ImageType],\n *,\n maximum_dimensions: Dimensions = MAXIMUM_DIMENSIONS,\n) -> Avatar:\n \"\"\"Create a new avatar image.\n\n Raise `ImageTypeProhibited` if the stream data is not of one the\n allowed types.\n \"\"\"\n creator = user_service.find_active_user(creator_id)\n if creator is None:\n raise user_service.UserIdRejected(creator_id)\n\n image_type = image_service.determine_image_type(stream, allowed_types)\n image_dimensions = image_service.determine_dimensions(stream)\n\n image_too_large = image_dimensions > maximum_dimensions\n if image_too_large or not image_dimensions.is_square:\n stream = create_thumbnail(\n stream, image_type.name, maximum_dimensions, force_square=True\n )\n\n avatar = Avatar(party_id, creator_id, image_type)\n db.session.add(avatar)\n db.session.commit()\n\n # Might raise `FileExistsError`.\n upload.store(stream, avatar.path, create_parent_path_if_nonexistent=True)\n\n return avatar\n\n\ndef delete_avatar_image(avatar_id: UUID) -> None:\n \"\"\"Delete the avatar image.\"\"\"\n avatar = Avatar.query.get(avatar_id)\n\n if avatar is None:\n raise ValueError('Unknown avatar ID')\n\n # Delete file.\n upload.delete(avatar.path)\n\n # Delete database record.\n db.session.delete(avatar)\n db.session.commit()\n","sub_path":"byceps/services/tourney/avatar/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"36282490","text":"def solve(P, groups):\n mod = [0]*P\n for i in groups: mod[i%P] += 1\n ans = 0\n ans += mod[0]\n x = P-1\n while x >= P/2:\n y = P-x\n if x == y: \n if mod[x]%2:\n ans += (mod[x]-1)//2\n mod[x] = 1\n else:\n ans += mod[x]//2\n mod[x] = 0\n else:\n pairs = min(mod[x], mod[y])\n ans += pairs\n mod[x] -= pairs\n mod[y] -= pairs\n x -= 1\n leftover = 0\n st, end, inc = 1, P, 1\n if P == 4 and mod[1] != 0:\n st, end, inc = 3, 0, -1\n for i in range(st, end, inc):\n if mod[i] == 0: continue\n while mod[i] > 0:\n if leftover == 0: \n ans += 1\n leftover += i\n leftover %= P\n mod[i] -= 1\n return ans\n\nfor case in range(1, eval(input()) + 1):\n N, P = map(int, input().split())\n groups = list(map(int, input().split()))\n print(\"Case #{}: {}\".format(case, solve(P, groups)))\n","sub_path":"codejam/2017/Round2/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"384390687","text":"from itertools import cycle\ndef optimalPoint(m,d):\n start = 0\n i = 1\n j=0\n p = m[0] - d[0] + m[1]\n while i< len(m)-1:\n j = 0\n p = p - d[i] + m[i + 1]\n if i == len(m)-1:\n p = p - d[i + 1]\n if p<=0:\n start+=1\n j=-1\n else:\n return start\n i+=1\n\n if j==-1:\n return -1\n\n # for i in range(1,len(m)-1):\n # p = p -d[i]+m[i+1]\n # print(p)\n # # for k in range(i):\n # # p = p - d[k]+m[k+1]\n # # print(p)\n # p = p - d[i+1]\n # if i\n # print(p)\n\n\n# magic = [10,6,3,8,1]\n# dist = [1,3,8,4,3]\n# magic = [2,4,5,2]\n# dist = [4,3,1,3]\nmagic = [8,4,1,9]\ndist = [10,9,3,5]\nr = optimalPoint(magic,dist)\nprint(\"Result=\",r)","sub_path":"Learning/vanhack/Aladdin.py","file_name":"Aladdin.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"324472457","text":"import os\n\nfrom PyQt5.QtCore import QSize\nfrom PyQt5.QtGui import QMovie\nfrom PyQt5.QtWidgets import QHBoxLayout\nfrom PyQt5.QtWidgets import QLabel\nfrom PyQt5.QtWidgets import QWidget\n\ndir = os.path.dirname(os.path.abspath(__file__))\n\n\nclass BuddingWidget(QWidget):\n\n def __init__(self, parent):\n super(BuddingWidget, self).__init__(parent)\n\n self.currentMovieIndex = -1\n self.totalMovieSize = 5\n\n self.layout = QHBoxLayout(self)\n self.movieLabel = QLabel(self)\n self.showNextMovie()\n self.layout.addWidget(self.movieLabel)\n self.setLayout(self.layout)\n\n def mousePressEvent(self, event):\n print(\"pressed\")\n self.showNextMovie()\n\n def showNextMovie(self):\n movie = self.getNextMovie()\n movie.setScaledSize(QSize(200, 200))\n self.movieLabel.setMovie(movie)\n movie.start()\n\n def mouseReleaseEvent(self, event):\n print(\"released\")\n\n def getNextMovie(self):\n self.currentMovieIndex += 1\n if self.currentMovieIndex == self.totalMovieSize:\n self.currentMovieIndex = 0\n filename = \"test_\" + str(self.currentMovieIndex) + \".gif\"\n return QMovie(os.path.join(dir, filename))\n","sub_path":"buddingWidget.py","file_name":"buddingWidget.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"66018621","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCopyright 2017 Beyond Blockchain Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport sys\nimport os\nimport datetime\nimport binascii\nimport json\nfrom datetime import datetime\n\nsys.path.extend([\"../../../\"])\nfrom bbc1.common import bbclib\nfrom bbc1.app import bbc_app\nfrom bbc1.core.bbc_config import DEFAULT_CORE_PORT\nfrom bbc1.common.message_key_types import KeyType\nfrom bbc1.common.bbc_error import *\n\nPRIVATE_KEY = \".private_key\"\nPUBLIC_KEY = \".public_key\"\n\ndomain_id = bbclib.get_new_id(\"coindomain\", include_timestamp=False)\nasset_group_id = bbclib.get_new_id(\"coin_asset_group\", include_timestamp=False)\nuser_id = None\n\nkey_pair = None\nbbc_app_client = None\n\n\ndef domain_setup():\n tmpclient = bbc_app.BBcAppClient(port=DEFAULT_CORE_PORT, loglevel=\"all\")\n tmpclient.domain_setup(domain_id, \"simple_cluster\")\n tmpclient.callback.synchronize()\n tmpclient.unregister_from_core()\n print(\"Domain %s is created.\" % (binascii.b2a_hex(domain_id[:4]).decode()))\n print(\"Setup is done.\")\n\n\ndef setup_bbc_client():\n bbc_app_client = bbc_app.BBcAppClient(port=DEFAULT_CORE_PORT, loglevel=\"all\")\n bbc_app_client.set_user_id(user_id)\n bbc_app_client.set_domain_id(domain_id)\n bbc_app_client.set_asset_group_id(asset_group_id)\n bbc_app_client.set_callback(bbc_app.Callback())\n ret = bbc_app_client.register_to_core()\n assert ret\n return bbc_app_client\n\n\ndef store_proc(data, approver_id, txid=None):\n bbc_app_client = setup_bbc_client()\n transaction = bbclib.make_transaction_for_base_asset(asset_group_id=asset_group_id, event_num=1)\n transaction.events[0].add(mandatory_approver=approver_id, asset_group_id=asset_group_id)\n transaction.events[0].asset.add(user_id=user_id, asset_body=data)\n if txid:\n bbc_app_client.search_transaction(txid)\n response_data = bbc_app_client.callback.synchronize()\n if response_data[KeyType.status] < ESUCCESS:\n print(\"ERROR: \", response_data[KeyType.reason].decode())\n sys.exit(0)\n prev_tx = bbclib.recover_transaction_object_from_rawdata(response_data[KeyType.transaction_data])\n reference = bbclib.add_reference_to_transaction(asset_group_id, transaction, prev_tx, 0)\n sig = transaction.sign(key_type=bbclib.KeyType.ECDSA_SECP256k1,\n private_key=key_pair.private_key,\n public_key=key_pair.public_key)\n transaction.references[0].add_signature(user_id=user_id, signature=sig)\n else:\n sig = transaction.sign(key_type=bbclib.KeyType.ECDSA_SECP256k1,\n private_key=key_pair.private_key,\n public_key=key_pair.public_key)\n transaction.add_signature(signature=sig)\n transaction.digest()\n print(transaction)\n\n ret = bbc_app_client.insert_transaction(transaction)\n assert ret\n response_data = bbc_app_client.callback.synchronize()\n if response_data[KeyType.status] < ESUCCESS:\n print(\"ERROR: \", response_data[KeyType.reason].decode())\n sys.exit(0)\n print(\"TxID: %s\", binascii.b2a_hex(response_data[KeyType.transaction_id]))\n print(\"AsID: %s\", binascii.b2a_hex(transaction.events[0].asset.asset_id))\n\n bbc_app.store_id_mappings(data, asset_group_id,\n transaction_id=response_data[KeyType.transaction_id],\n asset_ids=transaction.events[0].asset.asset_id)\n txinfo = [transaction.transaction_id, transaction.events[0].asset.asset_id]\n return txinfo\n\n\ndef get_coindata(asid):\n bbc_app_client = setup_bbc_client()\n asid = binascii.unhexlify(asid)\n ret = bbc_app_client.search_asset(asset_group_id, asid)\n assert ret\n response_data = bbc_app_client.callback.synchronize()\n if response_data[KeyType.status] < ESUCCESS:\n print(\"ERROR: \", response_data[KeyType.reason].decode())\n sys.exit(0)\n get_transaction = bbclib.BBcTransaction()\n get_transaction.deserialize(response_data[KeyType.transaction_data])\n\n retdata = get_transaction.events[0].asset.asset_body\n refdata = get_transaction.references\n print(\"get: %s\" % retdata)\n print(\"ref: %s\" % refdata)\n return retdata\n\ndef create_keypair():\n keypair = bbclib.KeyPair()\n keypair.generate()\n with open(PRIVATE_KEY, \"wb\") as fout:\n fout.write(keypair.private_key)\n with open(PUBLIC_KEY, \"wb\") as fout:\n fout.write(keypair.public_key)\n print(\"created private_key and public_key : %s, %s\" % (PRIVATE_KEY, PUBLIC_KEY))\n\ndef registration(price):\n data = {\"owner\":binascii.b2a_hex(user_id).decode(\"UTF-8\"),\"price\":price,\"date\":datetime.now().strftime('%s')}\n jsondata = json.dumps(data)\n store_proc(data=jsondata, approver_id=user_id ,txid=None)\n print(\"Coin is generated!: %s\" % jsondata)\n\ndef chown(new_owner,asid):\n asset = json.loads(get_coindata(asid).decode(\"UTF-8\"))\n if asset[\"owner\"] != binascii.b2a_hex(user_id).decode(\"UTF-8\"):\n print(\"Owner of this coin is not you\")\n return 0\n asset[\"owner\"] = new_owner\n asset[\"date\"] = datetime.now().strftime('%s')\n data = json.dumps(asset)\n\n bbc_app_client = setup_bbc_client()\n ret = bbc_app_client.search_asset(asset_group_id, binascii.unhexlify(asid))\n assert ret\n response_data = bbc_app_client.callback.synchronize()\n if response_data[KeyType.status] < ESUCCESS:\n print(\"ERROR: \", response_data[KeyType.reason].decode())\n sys.exit(0)\n get_transaction = bbclib.BBcTransaction()\n get_transaction.deserialize(response_data[KeyType.transaction_data])\n transaction_id = get_transaction.transaction_id\n transaction_info = store_proc(data, approver_id=binascii.unhexlify(new_owner),txid=transaction_id)\n bbc_app_client.send_message(transaction_info, binascii.unhexlify(new_owner))\n print(\"Transfer is done.....\")\n\n\nif __name__ == '__main__':\n if(not os.path.exists(PRIVATE_KEY) and not os.path.exists(PUBLIC_KEY)):\n create_keypair()\n with open(PRIVATE_KEY, \"rb\") as fin:\n private_key = fin.read()\n with open(PUBLIC_KEY, \"rb\") as fin:\n public_key = fin.read()\n\n domain_setup()\n\n key_pair = bbclib.KeyPair(privkey=private_key, pubkey=public_key)\n user_id = bbclib.get_new_id(str(binascii.b2a_hex(key_pair.public_key)), include_timestamp=False)\n print(\"welcome to sample coin manage!\")\n print(\"Your id: %s\" % binascii.b2a_hex(user_id))\n print(\"Type command(help to see command list)\")\n while(True):\n command = input('>> ')\n if command == \"help\":\n print(\"generate - generate coin\")\n print(\"get - get coin info\")\n print(\"send - send coin\")\n print(\"recieve - wait for recieve coin\")\n print(\"exit - exit coin manage\")\n elif command == \"generate\":\n print(\"Type price generate coin\")\n address = input('>> ')\n registration(address)\n elif command == \"get\":\n print(\"Type AsID of coin\")\n asid = input('>> ')\n get_coindata(asid)\n elif command == \"send\":\n print(\"Type AsID of coin\")\n asid = input('>> ')\n asset = json.loads(get_coindata(asid).decode(\"UTF-8\"))\n assert asset\n print(\"You want send coin(%s)\"% asid)\n print(\"Type new owner ID\")\n new_owner = input('>> ')\n chown(new_owner,asid)\n elif command == \"exit\":\n print(\"bye\")\n sys.exit(0)\n else:\n print(\"command \\\"\"+command+\"\\\" is not found\")\n","sub_path":"examples/escrow/user2/coin.py","file_name":"coin.py","file_ext":"py","file_size_in_byte":8100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"570006423","text":"import FWCore.ParameterSet.Config as cms\nimport os,glob,sys\n\nprocess = cms.Process('ANA')\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 10000\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n\nprocess.source = cms.Source(\"PoolSource\",\n #fileNames=cms.untracked.vstring('/store/mc/RunIISpring16MiniAODv2/TTJets_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8/MINIAODSIM/PUSpring16RAWAODSIM_reHLT_80X_mcRun2_asymptotic_v14-v1/00000/001AFDCE-C33B-E611-B032-0025905D1C54.root',\n #fileNames=cms.untracked.vstring('/store/mc/RunIISpring16MiniAODv2/DYJetsToLL_Pt-400To650_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8/MINIAODSIM/PUSpring16RAWAODSIM_reHLT_80X_mcRun2_asymptotic_v14_ext1-v1/50000/20BE2D8A-E758-E611-99DE-002590DC03AC.root',\n fileNames=cms.untracked.vstring('file:pickevents_highMlljj.root',\n 'file:pickevents2.root'\n )\n)\n\noutfile = 'highMlljj.root'\n\nprocess.TFileService = cms.Service('TFileService', fileName = cms.string(outfile))\n\n\n# import of standard configurations\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Configuration.EventContent.EventContent_cff')\nprocess.load('Configuration.StandardSequences.GeometryRecoDB_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_38T_cff')\nprocess.load('Configuration.StandardSequences.EndOfProcess_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')\n\nprocess.GlobalTag.globaltag = '80X_dataRun2_2016SeptRepro_v7'\n\njetID=\" (neutralHadronEnergyFraction<0.90 && neutralEmEnergyFraction<0.9 && (chargedMultiplicity+neutralMultiplicity)>1 && muonEnergyFraction<0.8) && ((abs(eta)<=2.4 && chargedHadronEnergyFraction>0 && chargedMultiplicity>0 && chargedEmEnergyFraction<0.90) || abs(eta)>2.4)\"\n\n\nfrom PhysicsTools.PatAlgos.tools.jetTools import updateJetCollection\n\nupdateJetCollection(\n process,\n jetSource = cms.InputTag('slimmedJets'),\n labelName = 'UpdatedJEC',\n jetCorrections = ('AK4PFchs', cms.vstring(['L1FastJet', 'L2Relative', 'L3Absolute']), 'None') # Do not forget 'L2L3Residual' on data!\n )\n\nupdateJetCollection.jetCorrections = ('AK4PFchs', cms.vstring(['L1FastJet', 'L2Relative', 'L2L3Residual', 'L3Absolute']), 'None')\n\nprocess.tightJets = cms.EDFilter(\"PATJetSelector\",\n src = cms.InputTag(\"updatedPatJetsUpdatedJEC\"),\n cut = cms.string(jetID),\n)\n\nmuonIDIso=' isolationR03().sumPt/pt < 0.1 && userInt(\"highPtID\") == 1'\n\nprocess.tunePMuons = cms.EDProducer(\"TunePMuonProducer\",\n\t\t #src = cms.InputTag(\"removeBadAndCloneGlobalMuons\")\n\t\t src = cms.InputTag(\"slimmedMuons\")\n\t\t)\n\nprocess.tunePIDIsoMuons = cms.EDFilter(\"PATMuonSelector\",\n src = cms.InputTag(\"tunePMuons\"),\n cut = cms.string(muonIDIso),\n )\n\n\nprocess.highMlljj = cms.EDAnalyzer('highMlljj',\n muons_src = cms.InputTag('tunePIDIsoMuons'),\n jets_src = cms.InputTag('tightJets'),\n electrons_src = cms.InputTag('slimmedElectrons'),\n photons_src = cms.InputTag('slimmedPhotons'),\n met_src = cms.InputTag('slimmedMETs'),\n)\n\n\nprocess.p = cms.Path(process.patJetCorrFactorsUpdatedJEC * process.updatedPatJetsUpdatedJEC * process.tightJets * process.tunePMuons * process.tunePIDIsoMuons * process.highMlljj)\n","sub_path":"test/highMlljj.py","file_name":"highMlljj.py","file_ext":"py","file_size_in_byte":3774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"534540","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 21 00:01:19 2013\n\n@author: feihoo87\n\"\"\"\nimport struct\nfrom pyvisa import vpp43\nfrom pyvisa.visa import VisaIOWarning\n\nfrom .error import DriverError\nfrom .base import CounterDriver\n\ndef convert(data, mode, expd):\n \"\"\"将读取出的二进制数据块转换成列表\n \n 根据说明书第53页中的 C 语音代码改写而成。\n \n data : 读出的原始二进制数据块以16位整形数排列而成的列表\n mode : 通过向 SR620 发送 'MODE?' 命令获取\n expd : 通过向 SR620 发送 'EXPD?' 命令获取\n \"\"\"\n factors=[1.05963812934E-14, 1.05963812934E-14,\n 1.05963812934E-14, 1.24900090270331E-9,\n 1.05963812934E-14, 8.3819032E-8, 0.00390625]\n\n l = len(data)/4\n ret = []\n for i in range(l):\n sign = (data[i*4+3] < 0)\n v = 0.0\n for j in range(4):\n v = v*65536.0 + (sign and ~data[i*4+3-j] or data[i*4+3-j])\n v = v*factors[mode]\n if expd != 0:\n v = v*1.0e-3\n if sign:\n v = -v-1.0\n ret.append(v)\n return ret\n\nclass sr620(CounterDriver):\n \"\"\"SR620的驱动\"\"\"\n def __init__(self, ins, *args, **kwargs):\n \"\"\"\n ins : 设备对象\n \"\"\"\n super(sr620, self).__init__(ins, *args, **kwargs)\n self.ins = ins\n \n def __read(self, size):\n \"\"\"从设备读取 size 个字节\"\"\"\n try:\n buff = vpp43.read(self.ins.vi, size)\n except VisaIOWarning:\n pass\n return buff\n\n def dump(self, n):\n \"\"\"用快速模式从仪器上读取n个数\"\"\"\n block = ''\n max = 5000\n\n loop = n / max\n last = n % max\n \n self.exec_(\"*CLS\")\n try:\n if last < n:\n for i in range(loop):\n self.exec_(\"BDMP %d\" % max)\n block += self.__read(8*max)\n self.exec_(\"BDMP %d\" % last)\n block += self.__read(8*last)\n except:\n #raise DriverError(self.ins, code=1, msg=\"SR620 Dump Error\")\n raise\n tmp = list(struct.unpack(\"%dH\" % 4*n, block))\n mode = int(self.ask('MODE?'))\n expd = int(self.ask('EXPD?'))\n self.ins.write(\"AUTM 1\")\n return convert(tmp, mode, expd)\n \n def set_level(self, v):\n pass\n \n def set_mode(self, mode):\n pass\n \n def errors(self):\n \"\"\"返回错误列表\"\"\"\n e = []\n return e\n ","sub_path":"qulab/drivers/SR620/sr620.py","file_name":"sr620.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"603807498","text":"# author: ALEXIS CARBILLET\n\n## import librairies\nimport pandas\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\nimport csv\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.linear_model import Perceptron, LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import f1_score\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import cross_val_score\nimport numpy as np\n## import data\ndf = pandas.read_csv('project_pump.csv', sep=';')\n(n,p)=df.shape\n## Preprocessing dataset\nprint((n,p))\nc=df.columns.values\n\nc=pandas.DataFrame.transpose(pandas.DataFrame(c))\nc.columns=df.columns.values\n# simply concatenate both dataframes \nc=c.append(df)\n\nprint(c.shape)\n\ndf.columns = ['Unix', 'Samples','Time','Sampling', 'Data']\n\nm=0\n\nfor i in range(n):\n s=df['Data'][i]\n s=s.rstrip(']')\n s=s.lstrip('[')\n s=s.split(',')\n p2=len(s)\n for j in range(p2):\n m+=float(s[j]) # replace list by its mean\n df['Data'][i]=str(m/p2)\n\n## How many cluster?\nSum_of_squared_distances = []\nK = range(1,15)\nfor k in K:\n km = KMeans(n_clusters=k)\n km = km.fit(df)\n Sum_of_squared_distances.append(km.inertia_)\n\nplt.figure()\nplt.plot(K, Sum_of_squared_distances, 'bx-')\nplt.xlabel('k')\nplt.ylabel('Sum_of_squared_distances')\nplt.title('Elbow Method For Optimal k')\n\n## In the plot above the elbow is at k=3 indicating the optimal k for this dataset is 3\nkmeans = KMeans(n_clusters=3, random_state=0).fit(df)\n\n## machine learning\nlabels=kmeans.labels_\n\ndef fit(nb,train,test,y,yt,height_f1,type):\n nb.fit(train, y)\n z=f1_score(yt, nb.predict(test),average='weighted')\n print('the f1 score obtained with ',type,' is:',z)\n height_f1.append(z)\n\ndef ml(train,test,y,yt):\n height=[]\n height_f1=[]\n bars=['bayes','perceptron','MLP','tree','logistic regression','kNN 3 neighbors','kNN 7 neighbors','kNN 15 neighbors','SVC','Random Forest']\n # bayes\n nb = MultinomialNB()\n fit(nb,train,test,y,yt,height_f1,'bayes')\n # perceptron\n nb = Perceptron(tol=1e-3, random_state=0)\n fit(nb,train,test,y,yt,height_f1,'perceptron')\n # multi-layer perceptron\n nb = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)\n fit(nb,train,test,y,yt,height_f1,'multi-layer perceptron')\n # tree classifier\n nb = DecisionTreeClassifier(random_state=0)\n fit(nb,train,test,y,yt,height_f1,'tree')\n # logistic regression\n nb = LogisticRegression(random_state=0, solver='lbfgs',multi_class='multinomial')\n fit(nb,train,test,y,yt,height_f1,'logistic regression')\n # kNN 3\n nb = KNeighborsClassifier(n_neighbors=3)\n fit(nb,train,test,y,yt,height_f1,'kNN 3 neighbors')\n # kNN 7\n nb = KNeighborsClassifier(n_neighbors=7)\n fit(nb,train,test,y,yt,height_f1,'kNN 7 neighbors')\n # kNN 15\n nb = KNeighborsClassifier(n_neighbors=15)\n fit(nb,train,test,y,yt,height_f1,'kNN 15 neighbors')\n # SVC\n nb = SVC(gamma='auto')\n fit(nb,train,test,y,yt,height_f1,'SVC')\n # random forest\n nb = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)\n fit(nb,train,test,y,yt,height_f1,'random forest')\n y_pos = np.arange(len(bars))\n plt.figure()\n\n title='F1 score'\n plt.title(title)\n plt.bar(y_pos, height_f1) # Create bars\n plt.xticks(y_pos, bars, rotation=90) # Create names on the x-axis\n plt.subplots_adjust(bottom=0.3, top=0.95) # Custom the subplot layout\n plt.show() # Show graphic\n print('the best one is ',bars[height_f1.index(max(height_f1))],' with a F1 score of ',height_f1[height_f1.index(max(height_f1))])\n\nX_train, X_test, y_train, y_test = train_test_split( df, labels, test_size=0.70, random_state=42)\nml(X_train,X_test,y_train,y_test)\n\n## write in csv the results of clustering\ndf['Cluster'] = pandas.Series(labels, index=df.index)\ndf.to_csv('project_pump_modified.csv')\n\n\n## bonus\n# df2 = pandas.read_csv('project_fan.csv', sep=';',encoding='utf-8', engine='c')\n# (n,p)=df2.shape\n# print((n,p))\n# c=df2.columns.values\n# \n# c=pandas.DataFrame.transpose(pandas.DataFrame(c))\n# c.columns=df2.columns.values\n# # simply concatenate both dataframes \n# c=c.append(df2)\n# \n# print(c.shape)\n# df2.columns = ['Unix', 'Samples','Time','Sampling', 'Data']\n# m=0\n# \n# for i in range(n):\n# s=df2['Data'][i]\n# s=s.rstrip(']')\n# s=s.lstrip('[')\n# s=s.split(',')\n# p2=len(s)\n# for j in range(p2):\n# m+=float(s[j]) # replace list by its mean\n# df2['Data'][i]=str(m/p2)\n# \n# Sum_of_squared_distances = []\n# K = range(1,15)\n# for k in K:\n# km = KMeans(n_clusters=k)\n# km = km.fit(df2)\n# Sum_of_squared_distances.append(km.inertia_)\n# \n# plt.figure()\n# plt.plot(K, Sum_of_squared_distances, 'bx-')\n# plt.xlabel('k')\n# plt.ylabel('Sum_of_squared_distances')\n# plt.title('Elbow Method For Optimal k')\n# # still 3 states\n# kmeans = KMeans(n_clusters=3, random_state=0).fit(df2)\n# labels=kmeans.labels_\n# X_train, X_test, y_train, y_test = train_test_split( df2, labels, test_size=0.20, random_state=42)\n# ml(X_train,X_test,y_train,y_test)\n# df2['Cluster'] = pandas.Series(labels, index=df2.index)\n# df2.to_csv('project_fan_modified.csv')","sub_path":"alexis-carbillet.py","file_name":"alexis-carbillet.py","file_ext":"py","file_size_in_byte":5384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"244491414","text":"import pytest\n\nfrom spotdl.providers.audio import YouTubeMusic\nfrom spotdl.types.song import Song\n\n\n@pytest.mark.vcr()\ndef test_ytm_search():\n provider = YouTubeMusic()\n\n assert (\n provider.search(\n Song.from_dict(\n {\n \"name\": \"Nobody Else\",\n \"artists\": [\"Abstrakt\"],\n \"artist\": \"Abstrakt\",\n \"album_id\": \"0kx3ml8bdAYrQtcIwvkhp8\",\n \"album_name\": \"Nobody Else\",\n \"album_artist\": \"Abstrakt\",\n \"genres\": [],\n \"disc_number\": 1,\n \"disc_count\": 1,\n \"duration\": 162.406,\n \"year\": 2022,\n \"date\": \"2022-03-17\",\n \"track_number\": 1,\n \"tracks_count\": 1,\n \"isrc\": \"GB2LD2210007\",\n \"song_id\": \"0kx3ml8bdAYrQtcIwvkhp8\",\n \"cover_url\": \"https://i.scdn.co/image/ab67616d0000b27345f5ba253b9825efc88bc236\",\n \"explicit\": False,\n \"publisher\": \"NCS\",\n \"url\": \"https://open.spotify.com/track/0kx3ml8bdAYrQtcIwvkhp8\",\n \"copyright_text\": \"2022 NCS\",\n \"download_url\": None,\n }\n )\n )\n is not None\n )\n\n\n@pytest.mark.vcr()\ndef test_ytm_get_results():\n provider = YouTubeMusic()\n\n results = provider.get_results(\"Lost Identities Moments\")\n\n assert len(results) > 3\n","sub_path":"tests/providers/audio/test_ytmusic.py","file_name":"test_ytmusic.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"40951588","text":"# The contents of this file are subject to the Mozilla Public\r\n# License Version 1.1 (the \"License\"); you may not use this file\r\n# except in compliance with the License. You may obtain a copy of\r\n# the License at http://www.mozilla.org/MPL/\r\n#\r\n# Software distributed under the License is distributed on an \"AS\r\n# IS\" basis, WITHOUT WARRANTY OF ANY KIND, either express or\r\n# implied. See the License for the specific language governing\r\n# rights and limitations under the License.\r\n#\r\n# The Initial Owner of the Original Code is European Environment\r\n# Agency (EEA). Portions created by Finsiel Romania and Eau de Web are\r\n# Copyright (C) European Environment Agency. All\r\n# Rights Reserved.\r\n#\r\n# Authors:\r\n#\r\n# Valentin Dumitru, Eau de Web\r\n\r\nPROVINCES = {\r\n '1':'Drenthe', '2':'Flevoland', '3':'Friesland ', '4':'Gelderland',\r\n '5':'Groningen', '6':'Limburg', '7':'North Brabant', '8':'North Holland',\r\n '9':'Overijssel', '10':'South Holland', '11':'Utrecht', '12':'Zeeland'\r\n }\r\n\r\nAMBASSADOR_CHOICES = {\r\n '1':'Did not choose a species yet',\r\n '2':'Is busy selecting an ambassador species',\r\n '3':'Already has selected ambassador species',\r\n }","sub_path":"eggs/naaya.content.municipality/naaya/content/municipality/skel.py","file_name":"skel.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"629880864","text":"#:!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport pygame\nimport error\nimport math\nfrom thing import Thing\n\nclass Engine:\n done = 0\n width = 1024\n height = 768\n clock = None\n screen = None\n\n def __init__(self):\n try:\n # initialize pygame\n pygame.init()\n\n # create a clock to limit the frames per second\n self.clock = pygame.time.Clock()\n\n # Set the width and the height of the screen and create the sceen \n self.screen = pygame.display.set_mode((self.width, self.height))\n pygame.display.set_caption(\"Press start to play\")\n\n # create player\n self.player = Thing((self.width >> 1), (self.height >> 1))\n\n # this variable is used to end the main loop\n self.done = 0\n except:\n raise error.Error(\"init engine\")\n\n def main_loop(self):\n try:\n # start main loop\n while self.done != 1:\n\n # handle some events\n for event in pygame.event.get():\n if event.type == pygame.VIDEORESIZE:\n self.width = event.dict[\"size\"][0]\n self.height = event.dict[\"size\"][1]\n self.screen = pygame.display.set_mode((self.width, self.height))\n elif event.type == pygame.QUIT:\n self.done = 1\n\n # handle some key input\n keys_pressed = pygame.key.get_pressed()\n if keys_pressed[pygame.K_ESCAPE]:\n self.done = 1\n dx = 0\n dy = 0\n if keys_pressed[pygame.K_w]:\n dy -= 1\n if keys_pressed[pygame.K_s]:\n dy += 1\n if keys_pressed[pygame.K_a]:\n dx -= 1\n if keys_pressed[pygame.K_d]:\n dx += 1\n if dx != 0 or dy != 0:\n self.player.acclerate(dx, dy)\n\n # update objects\n self.player.update()\n\n # fill the screen with one color\n self.screen.fill((10, 10, 10))\n \n # render the scene\n pygame.draw.rect(self.screen, (255, 0, 0), (\\\n self.player.position.x - 16, \\\n self.player.position.y - 16, \\\n 32, 32), 1)\n\n # update the screen\n pygame.display.flip()\n\n # Limit the game loop to 60 ticks(frames) per second\n self.clock.tick(60)\n except:\n raise error.Error(\"main loop\")\n\n def __del__(self):\n try:\n # release pygame\n pygame.quit()\n except:\n raise error.Error(\"del engine\")\n\n","sub_path":"pstp/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"630656458","text":"import gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nimport numpy as np\n\nclass ArmEnvNoise(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 30\n }\n\n def __init__(self):\n self.min_position = -np.pi\n self.max_position = np.pi\n\n self.low = np.array([self.min_position])\n self.high = np.array([self.max_position])\n \n self.action_space = spaces.Discrete(3)\n self.observation_space = spaces.Box(self.low, self.high)\n\n self.viewer = None\n\n self._seed()\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _step(self, action):\n assert self.action_space.contains(action), '%r (%s) invalid' % (action, type(action))\n\n translation_noise = self.np_random.normal(0, 0.01)\n observation_noise = self.np_random.normal(0, 0.1)\n\n action_step = np.pi / 12 + translation_noise\n\n state = self.state[0]\n state += (action - 1) * action_step\n\n if state > np.pi:\n state -= 2*np.pi\n if state < -np.pi:\n state += 2*np.pi\n\n self.state = np.array([state])\n obs = np.array([state+observation_noise, state])\n\n reward = (np.pi - abs(self.state[0] - self.goal_position)) / np.pi\n\n done = False\n\n return obs, reward, done, {}\n\n def _reset(self):\n init_step = 0.5\n self.state = self.np_random.uniform(np.pi-init_step, np.pi, 1) * [-1, 1][np.random.randint(1)]\n self.goal_position = self.np_random.uniform(low=-1*init_step, high=init_step)\n return self.state\n\n def _render(self, mode='human', close=False):\n if close:\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n return\n\n screen_width = 600\n screen_height = 400\n \n world_center = (screen_width/2, screen_height/2)\n state_radius = 150\n r = 10\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(screen_width, screen_height)\n\n # state line\n self.state_line = rendering.make_circle(state_radius, filled=False)\n self.state_line.add_attr(rendering.Transform(translation=world_center))\n self.viewer.add_geom(self.state_line)\n\n # center circle\n self.center_line = rendering.make_circle(r)\n self.center_line.add_attr(rendering.Transform(translation=world_center))\n self.viewer.add_geom(self.center_line)\n\n # goal circle\n self.goal_circle = rendering.make_circle(r)\n self.goal_circle.set_color(255, 0, 0)\n self.goal_circle.add_attr(rendering.Transform(translation=(\n state_radius * np.cos(self.goal_position) + world_center[0],\n state_radius * np.sin(self.goal_position) + world_center[1]\n )))\n self.viewer.add_geom(self.goal_circle)\n\n # agent circle\n self.agent = rendering.Transform()\n agent_arm = rendering.make_polyline([(0, 0), (state_radius, 0)])\n agent_arm.add_attr(self.agent)\n agent_arm.set_linewidth(4)\n self.viewer.add_geom(agent_arm)\n agent_circle = rendering.make_circle(r)\n agent_circle.add_attr(rendering.Transform(translation=(state_radius, 0)))\n agent_circle.set_color(0, 255, 0)\n agent_circle.add_attr(self.agent)\n self.viewer.add_geom(agent_circle)\n\n pos = self.state[0]\n self.agent.set_translation(world_center[0], world_center[1])\n self.agent.set_rotation(pos)\n\n\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n\n","sub_path":"gym_arm/envs/arm_env_noise.py","file_name":"arm_env_noise.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"304176375","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom keras.layers import Dense, Input, Embedding, Flatten, Concatenate, Lambda\nfrom keras.optimizers import Adam\n\ndef NeuMF(param):\n user_input = Input(shape=(1,), dtype='int32', name = 'user_input')\n item_input = Input(shape=(1,), dtype='int32', name = 'item_input')\n \n # Embedding layer\n MF_Embedding_User = Embedding(input_dim = param['n_users']+1, output_dim = 50, name = 'mf_embedding_user', input_length=1)\n MF_Embedding_Item = Embedding(input_dim = param['n_items']+1, output_dim = 50, name = 'mf_embedding_item', input_length=1) \n\n MLP_Embedding_User = Embedding(input_dim = param['n_users']+1, output_dim = 50, name = \"mlp_embedding_user\", input_length=1)\n MLP_Embedding_Item = Embedding(input_dim = param['n_items']+1, output_dim = 50, name = 'mlp_embedding_item', input_length=1) \n \n # MF part\n mf_user_latent = Flatten()(MF_Embedding_User(user_input))\n mf_item_latent = Flatten()(MF_Embedding_Item(item_input))\n mf_vector = tf.keras.layers.Multiply()([mf_user_latent, mf_item_latent])\n\n # MLP part \n mlp_user_latent = Flatten()(MLP_Embedding_User(user_input))\n mlp_item_latent = Flatten()(MLP_Embedding_Item(item_input))\n mlp_vector = Concatenate()([mlp_user_latent, mlp_item_latent])\n \n # dense layers\n dense = Dense(200, name='fully_connected_1')(mlp_vector)\n dense_2 = Dense(100, name='fully_connected_2')(dense)\n dense_3 = Dense(50, name='fully_connected_3')(dense_2)\n\n # Concatenate MF and MLP parts\n predict_vector = Concatenate()([mf_vector, dense_3])\n \n # Final prediction layer\n prediction = Dense(1, activation='sigmoid', name = \"prediction\")(predict_vector)\n \n model = keras.Model([user_input, item_input], prediction)\n model.compile(loss='binary_crossentropy', optimizer=Adam(lr=param['learn_rate']), metrics=['accuracy', 'AUC', 'Precision', 'Recall'])\n \n return model","sub_path":"Mobile_CARS/NeuMF.py","file_name":"NeuMF.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"27414058","text":"import json\nfrom bson import json_util\nfrom flask import make_response\nfrom datetime import timedelta\nfrom flask import make_response, request, current_app\nfrom functools import update_wrapper\nimport base64\nimport calendar\nimport datetime\nimport re\n\nimport base64\nimport calendar\nimport datetime\nimport re\n\njson_lib = True\ntry:\n import json\nexcept ImportError:\n try:\n import simplejson as json\n except ImportError:\n json_lib = False\n\nimport bson\nfrom bson import EPOCH_AWARE\nfrom bson.binary import Binary\nfrom bson.code import Code\nfrom bson.dbref import DBRef\nfrom bson.max_key import MaxKey\nfrom bson.min_key import MinKey\nfrom bson.objectid import ObjectId\nfrom bson.timestamp import Timestamp\n\nfrom bson.py3compat import PY3, binary_type, string_types\n\n\n\ndef make_json_response(body, status_code=200):\n\n resp = make_response(dump_string(body,indent=2))\n resp.status_code = status_code\n resp.mimetype = 'application/json'\n\n return resp\n\ndef make_jsonp_response(body, status_code=200,callback=\"callback\"):\n\n resp = make_response(callback+\"(\"+dump_string(body,indent=2)+\");\")\n resp.status_code = status_code\n resp.mimetype = 'application/javascript'\n\n return resp\n\ndef make_html_response(body, status_code=200,callback=\"callback\"):\n\n resp = make_response(callback+\"(\"+dump_string(body,indent=2)+\");\")\n resp.status_code = status_code\n resp.mimetype = 'text/html'\n return resp\ndef make_javascript_response(body, status_code=200,callback=\"callback\"):\n\n resp = make_response(body)\n resp.status_code = status_code\n resp.mimetype = 'application/javascript'\n return resp\n\n\n\ndef bad_id_response():\n\n return make_json_response({'message': 'invalid id'}, 400)\n\ndef error_response():\n\n return make_json_response({'message': 'error!'}, 400)\n\n\n\ndef crossdomain(origin=None, methods=None, headers=None,\n max_age=21600, attach_to_all=True,\n automatic_options=True):\n if methods is not None:\n methods = ', '.join(sorted(x.upper() for x in methods))\n if headers is not None and not isinstance(headers, basestring):\n headers = ', '.join(x.upper() for x in headers)\n if not isinstance(origin, basestring):\n origin = ', '.join(origin)\n if isinstance(max_age, timedelta):\n max_age = max_age.total_seconds()\n\n def get_methods():\n if methods is not None:\n return methods\n\n options_resp = current_app.make_default_options_response()\n return options_resp.headers['allow']\n\n def decorator(f):\n def wrapped_function(*args, **kwargs):\n if automatic_options and request.method == 'OPTIONS':\n resp = current_app.make_default_options_response()\n else:\n resp = make_response(f(*args, **kwargs))\n if not attach_to_all and request.method != 'OPTIONS':\n return resp\n h = resp.headers\n h['Access-Control-Allow-Origin'] = \"*\"\n h['Access-Control-Allow-Methods'] = get_methods()\n h['Access-Control-Max-Age'] = str(max_age)\n h['Access-Control-Allow-Headers'] = 'Content-Type'\n if headers is not None:\n h['Access-Control-Allow-Headers'] = headers\n return resp\n\n f.provide_automatic_options = False\n return update_wrapper(wrapped_function, f)\n return decorator\n\n\n# TODO share this with bson.py?\n_RE_TYPE = type(re.compile(\"foo\"))\n\n\n\ndef _json_convert(obj):\n \"\"\"Recursive helper method that converts BSON types so they can be\n converted into json.\n \"\"\"\n if hasattr(obj, 'iteritems') or hasattr(obj, 'items'): # PY3 support\n return dict(((k, _json_convert(v)) for k, v in obj.iteritems()))\n elif hasattr(obj, '__iter__') and not isinstance(obj, string_types):\n return list((_json_convert(v) for v in obj))\n try:\n return default_id(obj)\n except TypeError:\n return obj\n\n\n\n\ndef object_hook_id(dct):\n if \"_id\" in dct:\n dct['_id']=ObjectId(dct['_id'])\n return dct\n if \"$ref\" in dct:\n return DBRef(dct[\"$ref\"], dct[\"$id\"], dct.get(\"$db\", None))\n if \"$date\" in dct:\n secs = float(dct[\"$date\"]) / 1000.0\n return EPOCH_AWARE + datetime.timedelta(seconds=secs)\n if \"$regex\" in dct:\n flags = 0\n if \"i\" in dct[\"$options\"]:\n flags |= re.IGNORECASE\n if \"m\" in dct[\"$options\"]:\n flags |= re.MULTILINE\n return re.compile(dct[\"$regex\"], flags)\n if \"$minKey\" in dct:\n return MinKey()\n if \"$maxKey\" in dct:\n return MaxKey()\n if \"$binary\" in dct:\n return Binary(base64.b64decode(dct[\"$binary\"].encode()), dct[\"$type\"])\n if \"$code\" in dct:\n return Code(dct[\"$code\"], dct.get(\"$scope\"))\n if bson.has_uuid() and \"$uuid\" in dct:\n return bson.uuid.UUID(dct[\"$uuid\"])\n return dct\n\n\ndef default_id(obj):\n if isinstance(obj, ObjectId):\n return str(obj)\n if isinstance(obj, DBRef):\n return obj.as_doc()\n if isinstance(obj, datetime.datetime):\n # TODO share this code w/ bson.py?\n if obj.utcoffset() is not None:\n obj = obj - obj.utcoffset()\n millis = int(calendar.timegm(obj.timetuple()) * 1000 +\n obj.microsecond / 1000)\n return {\"$date\": millis}\n if isinstance(obj, _RE_TYPE):\n flags = \"\"\n if obj.flags & re.IGNORECASE:\n flags += \"i\"\n if obj.flags & re.MULTILINE:\n flags += \"m\"\n return {\"$regex\": obj.pattern,\n \"$options\": flags}\n if isinstance(obj, MinKey):\n return {\"$minKey\": 1}\n if isinstance(obj, MaxKey):\n return {\"$maxKey\": 1}\n if isinstance(obj, Timestamp):\n return {\"t\": obj.time, \"i\": obj.inc}\n if isinstance(obj, Code):\n return {'$code': \"%s\" % obj, '$scope': obj.scope}\n if isinstance(obj, Binary):\n return {'$binary': base64.b64encode(obj).decode(),\n '$type': obj.subtype}\n if PY3 and isinstance(obj, binary_type):\n return {'$binary': base64.b64encode(obj).decode(),\n '$type': 0}\n if bson.has_uuid() and isinstance(obj, bson.uuid.UUID):\n return {\"$uuid\": obj.hex}\n raise TypeError(\"%r is not JSON serializable\" % obj)\n\n\ndef dump_string(obj, *args, **kwargs):\n \"\"\"Helper function that wraps :class:`json.dumps`.\n\n Recursive function that handles all BSON types incuding\n :class:`~bson.binary.Binary` and :class:`~bson.code.Code`.\n \"\"\"\n if not json_lib:\n raise Exception(\"No json library available\")\n kwargs['default'] = default_id\n return json.dumps(_json_convert(obj), *args, **kwargs)\n\n\ndef load_string(s, *args, **kwargs):\n \"\"\"Helper function that wraps :class:`json.loads`.\n\n Automatically passes the object_hook for BSON type conversion.\n \"\"\"\n if not json_lib:\n raise Exception(\"No json library available\")\n kwargs['object_hook'] = object_hook_id\n return json.loads(s, *args, **kwargs)\n","sub_path":"camelot/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"645071949","text":"import re\nimport brotli\n# import zlib\nimport gzip\n\nfrom .ad_deleter import AdDeleter\n\n\nclass HTTPRequest:\n START_RE = re.compile(rb'(CONNECT|GET|HEAD|PUT|POST|DELETE|TRACE|OPTIONS) (.+) (.+)')\n HOST_RE = re.compile(rb'Host: ([^:\\r\\n]+):?(\\d+)?')\n\n def __init__(self, request):\n self.request = request\n self.method = None\n self.URL = None\n self.HTTP_v = None\n self.host = None\n self.host_port = None\n self._parse(request)\n\n def _parse(self, request):\n start_match = HTTPRequest.START_RE.search(request)\n if not start_match:\n print(request)\n self.method = start_match.group(1)\n self.URL = start_match.group(2)\n self.HTTP_v = start_match.group(3).strip(b'\\r')\n\n host_match = HTTPRequest.HOST_RE.search(request)\n self.host = host_match.group(1)\n self.host_port = host_match.group(2)\n\n\nclass HTTPResponse:\n HTML_CONT_TYP_RE = re.compile(rb'Content-Type: text/html')\n HTML_CHARSET_RE = re.compile(rb'Content-Type: text/html; charset=(.+)?')\n HTML_ENC_RE = re.compile(b'Content-Encoding: (.+)')\n\n def __init__(self, response):\n self.response = response\n self.is_html = self._is_html()\n self.charset = self._get_charset()\n self.content_encoding = self._get_content_encoding()\n\n def get_response(self):\n if not self.is_html:\n return self.response\n if self.content_encoding != 'gzip':\n return self.response\n headers, content = self._extract_content()\n decoded_content = gzip.decompress(content).decode(self.charset)\n altered_content = AdDeleter(decoded_content).get_content_without_ads()\n return headers + b'\\r\\n\\r\\n' + gzip.compress(altered_content.encode(self.charset))\n\n def _extract_content(self):\n sep = b'\\r\\n\\r\\n'\n headers, content = self.response.split(sep, maxsplit=1)\n return headers, content.strip(b'\\r\\n')\n\n def _is_html(self):\n return HTTPResponse.HTML_CONT_TYP_RE.search(self.response) is not None\n\n def _get_charset(self):\n match = HTTPResponse.HTML_CHARSET_RE.search(self.response)\n if not match:\n return 'utf-8'\n return match.group(1).strip(b'\\r\\n').decode()\n\n def _get_content_encoding(self):\n match = HTTPResponse.HTML_ENC_RE.search(self.response)\n if not match:\n return None\n return match.group(1).strip(b'\\r\\n').decode()\n","sub_path":"proxy/modules/http_parser.py","file_name":"http_parser.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"467741135","text":"from django.http import HttpResponseRedirect\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.core.urlresolvers import reverse\nfrom django.db import transaction\n\nfrom .forms import URLForm\nfrom urlShare.models import User, URL, Device\nfrom twython import Twython\nimport settings\n\nfrom urlShare.http_json_response import HttpJSONResponse\n\nmobile_ua_hints = [ 'ipad', 'ipod', 'iphone' ]\n\ndef index(request):\n return render(request, 'appUrlLoader.html',)\n\n\ndef twitter_user_required(view_fn):\n def view_wrapper(request):\n if not request.twitter_user:\n request.session['is_ipad'] = request.GET.get('is_ipad')\n return redirect('twitter_login')\n\n return view_fn(request)\n\n return view_wrapper\n\n\n@twitter_user_required\n@transaction.commit_on_success\ndef saveUrl(request):\n\n url = request.GET.get(\"url\", None)\n form = URLForm(request.POST or None, initial={\n 'url': url\n })\n\n is_ipad = request.session.get('is_ipad')\n twitter_id = request.twitter_user\n\n if form.is_valid():\n url = form.save(commit=False)\n url.user = twitter_id\n url.save()\n\n if not is_ipad:\n url.broad_cast()\n\n return redirect('saveUrl')\n\n try:\n last_url = request.twitter_user.url_set.order_by('-date_created')[:1].get().url\n except URL.DoesNotExist:\n last_url = None\n\n \n\n if is_ipad:\n is_ipad_login = 'true'\n else:\n is_ipad_login = 'false'\n\n return render(request, 'urlPostForm.html', {\n 'form': form,\n 'last_url': last_url,\n 'is_ipad_login': is_ipad_login,\n 'twitter_id': request.twitter_user,\n })\n\n\ndef saveUrlFast(request):\n\n url_shared = request.GET.get(\"url\", None)\n twitter_id = request.GET.get(\"twitter_id\", None)\n\n is_ios = request.GET.get(\"is_ios\")\n\n user = User.objects.get(twitter_id=twitter_id)\n url = URL.objects.create(user=user, url=url_shared)\n\n if not is_ios:\n url.broad_cast()\n\n return HttpResponse('success')\n\n\ndef validToken(request):\n return HttpResponse(\"true\" if request.twitter_user else \"false\")\n\n\ndef shakeSend(request):\n twitter_id = request.GET.get(\"twitter_id\", None)\n last = get_object_or_404(URL.objects\n .filter(user__twitter_id=twitter_id)\n .order_by('-date_created')[:1])\n\n last.broad_cast()\n\n return HttpResponse('success')\n\n\ndef getLastPage(request):\n twitter_id = request.GET.get(\"twitter_id\")\n last = get_object_or_404(URL.objects\n .filter(user__twitter_id=twitter_id)\n .order_by('-date_created')[:1])\n\n return HttpJSONResponse({'url': last.url})\n\n\ndef getHistory(request):\n twitter_id = request.GET.get(\"twitter_id\")\n userUrlCheak = (URL.objects\n .filter(user__twitter_id=twitter_id)\n .order_by('-date_created'))\n\n return HttpJSONResponse({'url': [o.url for o in userUrlCheak]})\n\n\ndef bookmarklet(request):\n twitter_id = request.twitter_user\n return render(request, 'bookmarklet.html', {'twitter_id': twitter_id})\n\n\ndef googleVerify(request):\n return render(request, 'google9bb880dc94f5c6e5.html',)\n\n\ndef normalize_device_id(raw_device_id):\n return raw_device_id.replace(\"<\", \"\").replace(\">\", \"\").replace(\" \", \"\")\n\n\ndef begin_auth(request):\n \"\"\"\n The view function that initiates the entire handshake.\n For the most part, this is 100% drag and drop.\n \"\"\"\n request.session['bookmarklet'] = request.GET.get('bookmarklet')\n # Instantiate Twython with the first leg of our trip.\n twitter = Twython(\n twitter_token=settings.TWITTER_KEY,\n twitter_secret=settings.TWITTER_SECRET,\n callback_url=request.build_absolute_uri(reverse('urlShare.views.thanks'))\n )\n\n # Request an authorization url to send the user to...\n auth_props = twitter.get_authentication_tokens()\n\n # Then send them over there, durh.\n request.session['request_token'] = auth_props\n return HttpResponseRedirect(auth_props['auth_url'])\n\n\ndef thanks(request):\n \"\"\"A user gets redirected here after hitting Twitter and authorizing your app to use their data.\n ***\n This is the view that stores the tokens you want\n for querying data. Pay attention to this.\n ***\n \"\"\"\n\n # Now that we've got the magic tokens back from Twitter, we need to exchange\n # for permanent ones and store them...\n twitter = Twython(\n twitter_token=settings.TWITTER_KEY,\n twitter_secret=settings.TWITTER_SECRET,\n oauth_token=request.session['request_token']['oauth_token'],\n oauth_token_secret=request.session['request_token']['oauth_token_secret'],\n )\n\n # Retrieve the tokens we want...\n authorized_tokens = twitter.get_authorized_tokens()\n try:\n twitter_id = authorized_tokens['user_id']\n request.session['twitter_id'] = twitter_id\n except:\n return redirect('twitter_login')\n\n if not User.objects.filter(twitter_id=twitter_id).exists():\n user = User.objects.create(twitter_id=twitter_id)\n\n is_ipad = request.session.get('is_ipad')\n bookmarklet = request.session.get('bookmarklet')\n\n if bookmarklet:\n return redirect('bookmarklet')\n\n if is_ipad:\n return redirect('saveUrl')\n else: \n return redirect('index')\n\n\ndef regisert_device(request):\n twitter_id = request.GET.get('twitter_id')\n device_id = request.GET.get('device_id')\n user = User.objects.get(twitter_id=twitter_id)\n user.device_set.create(device_id=device_id)\n \"\"\"if not User.objects.filter(twitter_id=twitter_id).exists():\n user = User.objects.create(twitter_id=twitter_id)\n\n if not Device.objects.filter(device_id=device_id).exists():\n user.device_set.create(device_id=device_id)\n \"\"\"\n\n return HttpResponse(\"success\")\n\n\ndef bilitz(request):\n return HttpResponse(\"42\")\n\n\ndef webapp(request):\n return render(request, 'pagemove.webapp',)\n","sub_path":"pagemove_server/pagemove/urlShare/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"300550431","text":"import json\n\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.http import HttpResponse, HttpResponseBadRequest\nfrom django.shortcuts import render\n\n# Create your views here.\n\nfrom base_file_server.models import TypeImg\n\n\ndef deal_with_img(request):\n if request.method == 'POST':\n if request.FILES['img']:\n img = TypeImg(file=request.FILES['img'])\n img.save()\n\n result = [{\n 'id': img.pk,\n 'url': img.file.url,\n 'modified':img.modified\n }]\n\n response_data = json.dumps(result,cls=DjangoJSONEncoder)\n return HttpResponse(response_data,status=201)\n else:\n result = [{\n 'error':'no file uploaded.'\n }]\n response_data = json.dumps(result)\n return HttpResponse(response_data,status=HttpResponseBadRequest)\n elif request.method == 'GET':\n \"\"\"\n get img's url by pk.\n \"\"\"\n if request.GET['pk']:\n img = TypeImg.objects.get(pk=request.GET['pk'])\n result = [{\n 'id': img.pk,\n 'url': img.file.url,\n 'modified': img.modified\n }]\n response_data = json.dumps(result, cls=DjangoJSONEncoder)\n return HttpResponse(response_data, status=200)\n else:\n result = [{\n 'error': 'need parameter pk.'\n }]\n response_data = json.dumps(result)\n return HttpResponse(response_data, status=HttpResponseBadRequest)\n else:\n result = [{\n 'error': 'wrong method.'\n }]\n response_data = json.dumps(result)\n return HttpResponse(response_data, status=HttpResponseBadRequest)\n\n\ndef test_upload(request):\n return render(request, 'bfs_upload.html')\n","sub_path":"base_file_server/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"166089805","text":"\"\"\"Architecture and creation of the Convolutional Neural Network model for binary classification \"\"\"\n\n# Imports\nimport tensorflow as tf\nfrom tensorflow.python.keras import Sequential\nfrom tensorflow.python.keras.layers import Conv2D\nfrom tensorflow.python.keras.layers import MaxPooling2D\nfrom tensorflow.python.keras.layers import Flatten\nfrom tensorflow.python.keras.layers import Dense\nfrom tensorflow.python.keras.layers import Dropout\nfrom tensorflow.python.keras.optimizers import Adam\nfrom tensorflow.python.keras.layers import BatchNormalization\nfrom tensorflow.python.keras.optimizers import RMSprop\nfrom tensorflow.python.keras.layers import GlobalAveragePooling2D\nfrom keras.callbacks import EarlyStopping\nfrom tensorflow.python.keras.callbacks import TensorBoard\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import ModelCheckpoint\nfrom tensorflow.python.keras.callbacks import ReduceLROnPlateau\nfrom tensorflow.python.keras import regularizers\nfrom sklearn.model_selection import cross_val_score\nimport matplotlib.pyplot as plt\nimport os\nfrom sklearn.model_selection import GridSearchCV\nfrom keras.wrappers.scikit_learn import KerasClassifier\nimport numpy as np\nfrom tensorflow.python.keras.preprocessing.image import image\nfrom tensorflow.python.keras.models import load_model\n\nimg_width, img_height = 150, 150 # Defining the image size for all input images\n\n\n# Creating model\ndef build_model(activation, loss):\n model = Sequential()\n model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(img_width, img_height, 3), activation=activation))\n model.add(MaxPooling2D(pool_size=(2, 2), padding='same')) # pooling an image with 2x2 matrix\n model.add(Conv2D(filters=64, kernel_size=(3, 3), activation=activation)) # second convolution layer\n model.add(MaxPooling2D(pool_size=(2, 2), padding='same')) # pooling an image with 2x2 matrix\n model.add(Conv2D(filters=128, kernel_size=(3, 3), activation=activation)) # third convolution layer\n model.add(MaxPooling2D(pool_size=(2, 2), padding='same')) # pooling an image with 2x2 matrix\n model.add(Flatten()) # transforming all image data to an array\n model.add(Dense(units=128, activation=activation)) # passing images to fully connected layer\n model.add(Dropout(rate=0.6)) # overfitting handling\n model.add(Dense(units=1, activation='sigmoid')) # output neuron\n optimizer = Adam(lr=0.0005)\n model.compile(optimizer = optimizer, loss =loss, metrics = ['accuracy']) # compiling the model\n\n return model\n\n\n# Data augmentation\ndef data_preprocessing(bs, class_mode):\n train_datagen = ImageDataGenerator(rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n rotation_range=90,\n width_shift_range=0.1,\n height_shift_range=0.1,\n zca_whitening=True,\n featurewise_std_normalization=True,\n featurewise_center=True) # data augmentation generator for train set\n\n test_datagen = ImageDataGenerator(rescale=1./255) # data generator for test set\n\n training_set = train_datagen.flow_from_directory('dataset/training_set',\n target_size=(img_width, img_height),\n batch_size=bs,\n class_mode=class_mode) # taking images from directory\n\n test_set = test_datagen.flow_from_directory('dataset/test_set',\n target_size=(img_width, img_height),\n batch_size=bs,\n class_mode=class_mode) # taking images from directory\n\n return training_set, test_set\n\n\n# Callbacks for implementing specific functions during the training\ndef callbacks():\n check_point = ModelCheckpoint(filepath='models/{val_acc:.5f}.h5', monitor='val_acc', save_best_only=True,\n mode='max', verbose=1) # saving the model if improvement happened\n\n # stopping the training if the monitored value has no improvement over defined number of epochs\n stop_if_not_improve = EarlyStopping(monitor='val_loss', patience=10, mode='min', min_delta=0, verbose=1)\n\n tensor_board = TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True,\n write_grads=True) # implementing Tensorboard to observe training process\n\n # reducing learning rate after defined number of epochs if no improvement was observed\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=3, min_lr=10e-7, factor=0.2,\n min_delta=0, verbose=1, mode='min')\n\n return [check_point, stop_if_not_improve, tensor_board, reduce_lr]\n\n\ndef run_model(epochs):\n training_set, test_set = data_preprocessing(bs=32, class_mode='binary') # creating training and test sets\n\n model = build_model(loss='binary_crossentropy', activation='relu') # calling build_model function\n\n history = model.fit_generator(training_set,\n steps_per_epoch = 2000,\n epochs = epochs,\n validation_data = test_set,\n validation_steps = 200,\n callbacks=callbacks(),\n workers=12, shuffle=True) # fitting data to the model\n\n # Plotting model train and test sets accuracies over the number of epochs\n plt.plot(history.history['val_acc'])\n plt.plot(history.history['acc'])\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.title('Model Accuracy')\n plt.legend(['test', 'train'], loc='upper left')\n plt.show()\n\n # Plotting model train and test sets losses over the number of epochs\n plt.plot(history.history['val_loss'])\n plt.plot(history.history['loss'])\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.title('Model Loss')\n plt.legend(['test', 'train'], loc='upper left')\n plt.show()\n\n\n# Saving only model with best accuracy\ndef delete_models():\n path = '/home/onedayyoumay/Desktop/Convolutional_Neural_Networks/models'\n os.chdir(path)\n dir = sorted(os.listdir(path))\n for i in range(len(os.listdir(path))- 1):\n os.remove(dir[i])\n\n\ndef main():\n run_model(epochs=100)\n delete_models()\n\n# Run the file\nif __name__ == '__main__':\n main()\n","sub_path":"project/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"16828068","text":"from django.shortcuts import render\nfrom django.utils import timezone\nfrom .models import Video\n\npage_list = [\n {'name': 'home', 'url': 'home'},\n {'name': 'contact', 'url': 'contact'},\n {'name': 'videos', 'url': 'videos'},\n ]\n\ndef home(request):\n return render(request, 'mixolydia/homepage.html',\n {'page_list': page_list,\n 'current_page': 'home',})\n\ndef contact(request):\n return render(request, 'mixolydia/contact.html',\n {'page_list': page_list,\n 'current_page': 'contact',})\n\ndef videos(request):\n published_videos = Video.objects.filter(date_published__lte=timezone.now())\n published_videos = published_videos.order_by('date_published')\n return render(request, 'mixolydia/videos.html',\n {'page_list': page_list,\n 'current_page': 'videos',\n 'vidoes': published_videos,})\n","sub_path":"mixolydia/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"609795219","text":"# author: wqyang@bu.edu\n# reference: [doc of weibo package](http://weibo.lxyu.net/)\n\nfrom weibo import Client\nimport webbrowser\nfrom config.Weibo_API_Config import * # local credentials\nimport os # for GitHub Actions test\n\ndef init():\n \"\"\"\n Instantiates a client.\n\n Besides the developer's credentials, weibo always a requires live login before using weibo API \n to prevent abusing. You can login with any plain weibo account.\n \"\"\"\n # API_KEY = os.getenv('API_KEY')\n # API_SECRET = os.getenv('API_SECRET')\n # REDIRECT_URI = os.getenv('REDIRECT_URI')\n try:\n client = Client(API_KEY, API_SECRET, REDIRECT_URI)\n except:\n print(\"Invalid API Credentials...\")\n while True: # check if authorization succeeds, if not, try again\n try:\n webbrowser.open_new(client.authorize_url)\n print(\"Please authorize... \\nIf your browser does not open automatically,\"\\\n \"please paste this URL to your browser manually: {}\".format(client.authorize_url))\n client.set_code(input(\"Input your code:\\n\"))\n break\n except:\n try_again = input(\"Authorization failed... Input Y to try again...\\n\")\n if try_again != 'y' and try_again != 'Y':\n break\n\n return Client(API_KEY, API_SECRET, REDIRECT_URI, client.token)\n\ndef get_comments(client, id, count):\n \"\"\"\n get comments\n \"\"\"\n comments = client.get('comments/show', id = id, count = count)[\"comments\"]\n return [comment[\"text\"] for comment in comments if \"text\" in comment]","sub_path":"apis/weibo_api.py","file_name":"weibo_api.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"93391919","text":"from req import WebRequestHandler\nfrom req import Service\nimport tornado\n\nclass WebStoreHandler(WebRequestHandler):\n @tornado.gen.coroutine\n def get(self, action=None):\n if action == None:\n err, data = yield from Service.Store.get_store({'id': self.id})\n self.render('store/get_store.html', data=data)\n if action == 'add':\n self.render('store/add_store.html')\n\n","sub_path":"backend/web/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"312918864","text":"from typing import Dict\n\nimport numpy as np\n\nfrom .. import colors\nfrom ..indicators import Indicator\n\n\nclass Writer:\n def write(self, *,\n global_step: int,\n values: Dict[str, any],\n indicators: Dict[str, Indicator]):\n raise NotImplementedError()\n\n\nclass ScreenWriter(Writer):\n def __init__(self, is_color=True):\n super().__init__()\n\n self.is_color = is_color\n\n def write(self, *,\n global_step: int,\n values: Dict[str, any],\n indicators: Dict[str, Indicator]):\n parts = []\n\n for k, ind in indicators.items():\n if not ind.options.is_print:\n continue\n\n if len(values[k]) == 0:\n continue\n\n v = np.mean(values[k])\n\n parts.append((f\" {k}: \", None))\n if self.is_color:\n parts.append((f\"{v :8,.2f}\", colors.Style.bold))\n else:\n parts.append((f\"{v :8,.2f}\", None))\n\n return parts\n","sub_path":"lab/logger/writers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"227793026","text":"\"\"\"We store all the model specific options using PublicationOptions class.\"\"\"\n\n# DEFAULT_NAMES = ('date_field', 'expired_field', 'data_field_attr', \n# 'datefield_verbose_name', 'date_field_help_text', \n# 'expired_field_attr', 'expired_field_verbose_name', \n# 'expired_field_help_text', 'manager_attr')\n\nclass PublishingOptions(object):\n \"\"\"Contains all the options available for a publishable model\"\"\"\n \n def __init__(self, **kwargs):\n # Set defaults for all the options\n self.date_field = kwargs.get('date_field', None)\n self.expired_field = kwargs.get('expired_field', None)\n \n self.date_field_attr = kwargs.get('date_field_attr', 'publishing_publication_date')\n self.date_field_verbose_name = kwargs.get('date_field_verbose_name', 'Publication Date')\n self.date_field_help_text = kwargs.get('date_field_help_text', None)\n \n self.expired_field_attr = kwargs.get('expired_field_attr', 'publishing_expiration_date')\n self.expired_field_verbose_name = kwargs.get('expired_field', 'Expiration Date')\n self.expired_field_help_text = kwargs.get('expired_field_help_text', None)\n \n self.manager_attr = kwargs.get(\"manager_attr\", \"publishing\")\n \n # Determine if the fields need to be created, if no date fields were \n # supplied we identify that they need to be created using some default \n # values supplied (attr, verbose_name and help_text)\n self.date_field_create = not ((self.date_field and True) or False)\n self.expired_field_create = not ((self.expired_field and True) or False)\n \n # Determines if expiration functionality is possible\n self.has_expired = (self.date_field and self.expired_field and True) or False\n \n # Set the correct date field names\n self.date_field = self.date_field or self.date_field_attr\n self.expired_field = self.expired_field or self.expired_field_attr\n \n def contribute_to_class(self, cls, name):\n \"\"\"\n This is called when the options is supplied to a class, which sets the \n publishing meta attribute\n \"\"\"\n cls._publishing_meta = self\n ","sub_path":"publisher/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"177009707","text":"from io import BytesIO\nfrom zeit.connector.connector import CannonicalId\nfrom zeit.connector.interfaces import UUID_PROPERTY\nimport datetime\nimport logging\nimport os\nimport os.path\nimport pkg_resources\nimport pytz\nimport random\nimport six\nimport six.moves.http_client\nimport six.moves.urllib.parse\nimport time\nimport uuid\nimport zeit.connector.cache\nimport zeit.connector.dav.interfaces\nimport zeit.connector.filesystem\nimport zeit.connector.interfaces\nimport zope.event\n\n\nID_NAMESPACE = u'http://xml.zeit.de/'\n\nlog = logging.getLogger(__name__)\n\n\nclass Connector(zeit.connector.filesystem.Connector):\n \"\"\"Connect to the CMS backend.\n\n The current implementation does *not* talk to the CMS backend but to\n some directory containing test content.\n\n \"\"\"\n\n _ignore_uuid_checks = False\n _set_lastmodified_property = True\n resource_class = zeit.connector.resource.WriteableCachedResource\n\n property_cache = zeit.connector.cache.AlwaysEmptyDict()\n body_cache = zeit.connector.cache.AlwaysEmptyDict()\n child_name_cache = zeit.connector.cache.AlwaysEmptyDict()\n canonical_id_cache = zeit.connector.cache.AlwaysEmptyDict()\n\n def __init__(self, repository_path):\n super(Connector, self).__init__(repository_path)\n self._reset()\n\n def _reset(self):\n self._locked = {}\n self._data = {}\n self._paths = {}\n self._deleted = set()\n self._properties = {}\n\n def listCollection(self, id):\n \"\"\"List the filenames of a collection identified by path. \"\"\"\n return (\n (name, _id)\n for name, _id in super(Connector, self).listCollection(id)\n if _id not in self._deleted and _id + u'/' not in self._deleted)\n\n def _get_collection_names(self, path):\n names = super(Connector, self)._get_collection_names(path)\n names |= self._paths.get(path, set())\n return names\n\n def getResourceType(self, id):\n id = self._get_cannonical_id(id)\n if id in self._deleted:\n raise KeyError(\"The resource '%s' does not exist.\" % id)\n return super(Connector, self).getResourceType(id)\n\n def __getitem__(self, id):\n id = self._get_cannonical_id(id)\n if id in self._deleted:\n raise KeyError(six.text_type(id))\n return super(Connector, self).__getitem__(id)\n\n def __setitem__(self, id, object):\n resource = zeit.connector.interfaces.IResource(object)\n id = self._get_cannonical_id(id)\n iscoll = (resource.type == 'collection' or\n resource.contentType == 'httpd/unix-directory')\n if iscoll and not id.endswith('/'):\n id = CannonicalId(id + '/')\n resource.id = six.text_type(id) # override\n\n if id in self:\n old_etag = self[id].properties.get(('getetag', 'DAV:'))\n else:\n old_etag = None\n new_etag = resource.properties.get(('getetag', 'DAV:'))\n if new_etag and new_etag != old_etag:\n if (id not in self or\n resource.data.read() != self[id].data.read()):\n raise zeit.connector.dav.interfaces.PreconditionFailedError()\n\n if id in self._deleted:\n self._deleted.remove(id)\n\n if not self._ignore_uuid_checks:\n existing_uuid = (\n id in self and self[id].properties.get(UUID_PROPERTY))\n new_uuid = resource.properties.get(UUID_PROPERTY)\n if not new_uuid:\n if existing_uuid:\n new_uuid = existing_uuid\n else:\n new_uuid = '{urn:uuid:%s}' % uuid.uuid4()\n resource.properties[UUID_PROPERTY] = new_uuid\n else:\n if existing_uuid and existing_uuid != new_uuid:\n raise six.moves.http_client.HTTPException(409, 'Conflict')\n\n for key in self._properties.keys():\n if key == self._get_cannonical_id(resource.id):\n continue\n existing_uuid = self._properties[key].get(UUID_PROPERTY)\n if (existing_uuid and existing_uuid ==\n resource.properties[UUID_PROPERTY]):\n raise six.moves.http_client.HTTPException(409, 'Conflict')\n\n # Just a very basic in-memory data storage for testing purposes.\n resource.data.seek(0)\n self._data[id] = resource.data.read()\n path = self._path(id)[:-1]\n name = self._path(id)[-1]\n self._paths.setdefault(path, set()).add(name)\n\n resource.properties[\n zeit.connector.interfaces.RESOURCE_TYPE_PROPERTY] = resource.type\n if resource.contentType == 'httpd/unix-directory':\n # XXX kludgy. We need to be able to differentiate directories,\n # so they get a trailing slash in their CanonicalId, but also\n # don't want to store random content types, so the filemagic\n # detetection e.g. for images takes over on the next read.\n resource.properties[\n ('getcontenttype', 'DAV:')] = resource.contentType\n resource.properties[('getlastmodified', 'DAV:')] = six.text_type(\n datetime.datetime.now(pytz.UTC).strftime(\n '%a, %d %b %Y %H:%M:%S GMT'))\n resource.properties[('getetag', 'DAV:')] = repr(\n time.time()) + repr(random.random())\n\n self._set_properties(id, resource.properties)\n\n zope.event.notify(\n zeit.connector.interfaces.ResourceInvaliatedEvent(id))\n\n def __delitem__(self, id):\n id = self._get_cannonical_id(id)\n self[id] # may raise KeyError\n for name, uid in self.listCollection(id):\n del self[uid]\n self._deleted.add(id)\n self._data.pop(id, None)\n self._properties.pop(id, None)\n zope.event.notify(\n zeit.connector.interfaces.ResourceInvaliatedEvent(id))\n\n def add(self, object, verify_etag=True):\n resource = zeit.connector.interfaces.IResource(object)\n if not verify_etag:\n resource.properties.pop(('getetag', 'DAV:'), None)\n self[resource.id] = resource\n\n def copy(self, old_id, new_id):\n r = self[old_id]\n r.id = new_id\n r.properties.pop(UUID_PROPERTY, None)\n self.add(r, verify_etag=False)\n if not new_id.endswith('/'):\n new_id = new_id + '/'\n for name, uid in self.listCollection(old_id):\n self.copy(uid, six.moves.urllib.parse.urljoin(new_id, name))\n\n def move(self, old_id, new_id):\n if new_id in self:\n # The target already exists. It's possible that there was a\n # conflict. Verify body.\n if ('httpd/unix-directory' in (self[old_id].contentType,\n self[new_id].contentType) or\n self[old_id].data.read() != self[new_id].data.read()):\n raise zeit.connector.interfaces.MoveError(\n old_id,\n \"Could not move %s to %s, because target alread exists.\" %\n (old_id, new_id))\n self._ignore_uuid_checks = True\n r = self[old_id]\n r.id = new_id\n try:\n self.add(r, verify_etag=False)\n finally:\n self._ignore_uuid_checks = False\n if not new_id.endswith('/'):\n new_id = new_id + '/'\n for name, uid in self.listCollection(old_id):\n self.move(uid, six.moves.urllib.parse.urljoin(new_id, name))\n del self[old_id]\n\n def changeProperties(self, id, properties):\n id = self._get_cannonical_id(id)\n properties.pop(zeit.connector.interfaces.UUID_PROPERTY, None)\n self._set_properties(id, properties)\n\n def lock(self, id, principal, until):\n \"\"\"Lock resource for principal until a given datetime.\"\"\"\n id = self._get_cannonical_id(id)\n # locked_by, locked_until = self.locked(id)\n # if locked_by is not None and locked_by != principal:\n # raise zeit.cms.interfaces.LockingError(\n # \"%s is already locked.\" % id)\n self._locked[id] = (principal, until, True)\n\n def unlock(self, id, locktoken=None):\n id = self._get_cannonical_id(id)\n del self._locked[id]\n return locktoken\n\n def locked(self, id):\n id = self._get_cannonical_id(id)\n return self._locked.get(id, (None, None, False))\n\n def search(self, attributes, expression):\n log.debug(\"Searching: %s\", expression._render())\n\n unique_ids = [\n u'http://xml.zeit.de/online/2007/01/Somalia',\n u'http://xml.zeit.de/online/2007/01/Saarland',\n u'http://xml.zeit.de/2006/52/Stimmts']\n\n metadata = ('pm', '07') + len(attributes) * (None,)\n metadata = metadata[:len(attributes)]\n\n return ((unique_id,) + metadata for unique_id in unique_ids)\n\n # internal helpers\n\n def _get_cannonical_id(self, id):\n \"\"\"Add / for collections if not appended yet.\"\"\"\n if isinstance(id, CannonicalId):\n return id\n if id == ID_NAMESPACE:\n return CannonicalId(id)\n if id.endswith('/'):\n id = id[:-1]\n if self._properties.get(id + '/') is not None:\n return CannonicalId(id + '/')\n if self._properties.get(id) is not None:\n return CannonicalId(id)\n path = self._absolute_path(self._path(id))\n if os.path.isdir(path):\n return CannonicalId(id + '/')\n return CannonicalId(id)\n\n def _absolute_path(self, path):\n if not path:\n return self.repository_path\n return os.path.join(self.repository_path, os.path.join(*path))\n\n def _get_file(self, id):\n if id in self._data:\n value = self._data[id]\n if isinstance(value, six.text_type):\n value = value.encode('utf-8')\n return BytesIO(value)\n return super(Connector, self)._get_file(id)\n\n def _get_lastmodified(self, id):\n return u'Fri, 07 Mar 2008 12:47:16 GMT'\n\n def _get_properties(self, id):\n properties = self._properties.get(id)\n if properties is not None:\n return properties\n properties = super(Connector, self)._get_properties(id)\n self._properties[id] = properties\n return properties\n\n def _set_properties(self, id, properties):\n stored_properties = self._get_properties(id)\n for ((name, namespace), value) in properties.items():\n if (name.startswith('get') and name not in (\n 'getlastmodified', 'getetag', 'getcontenttype')):\n continue\n stored_properties[(name, namespace)] = value\n if value is zeit.connector.interfaces.DeleteProperty:\n del stored_properties[(name, namespace)]\n self._properties[id] = stored_properties\n\n\ndef connector_factory():\n import zope.app.appsetup.product\n config = zope.app.appsetup.product.getProductConfiguration(\n 'zeit.connector')\n repository_path = (config or {}).get('repository-path')\n if not repository_path:\n repository_path = pkg_resources.resource_filename(\n __name__, 'testcontent')\n return Connector(repository_path)\n","sub_path":"core/src/zeit/connector/mock.py","file_name":"mock.py","file_ext":"py","file_size_in_byte":11261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"640629157","text":"from django.db import models\r\nfrom django import forms\r\nfrom captcha.fields import CaptchaField\r\n\r\n\r\n# Create your models here.\r\nclass RegisterForm(forms.Form):\r\n gender = (\r\n ('male', \"男\"),\r\n ('female', \"女\"),\r\n )\r\n institute = (\r\n ('船院','船舶海洋与建筑工程学院'),\r\n ('电院', '电子信息与电气工程学院'),\r\n ('机动', '机械与动力工程学院'),\r\n ('材料','材料科学与工程学院'),\r\n ('环境','环境科学与工程学院'),\r\n ('生医工','生物医学工程学院'),\r\n ('航空','航空航天学院'),\r\n ('数学','数学科学学院'),\r\n ('物理','物理与天文学院'),\r\n ('化学', '化学化工学院'),\r\n ('致远','致远学院'),\r\n ('海洋','海洋学院'),\r\n ('生命科学','生命科学技术学院'),\r\n ('农学院','农业与生物学院'),\r\n ('药学院','药学院'),\r\n ('医学院', '医学院'),\r\n ('安泰', '安泰经济与管理学院'),\r\n ('法学院','凯原法学院'),\r\n ('外院','外国语学院'),\r\n ('人文','人文学院'),\r\n ('马院','马克思主义学院'),\r\n ('媒体','媒体与传播学院'),\r\n ('设计','设计学院'),\r\n\r\n )\r\n username = forms.CharField(label=\"用户名\", max_length=128, widget=forms.TextInput(attrs={'class': 'form-control'}))\r\n password1 = forms.CharField(label=\"密码\", max_length=256, widget=forms.PasswordInput(attrs={'class': 'form-control'}))\r\n password2 = forms.CharField(label=\"确认密码\", max_length=256,\r\n widget=forms.PasswordInput(attrs={'class': 'form-control'}))\r\n email = forms.EmailField(label=\"邮箱地址\", widget=forms.EmailInput(attrs={'class': 'form-control'}))\r\n sex = forms.ChoiceField(label='性别', choices=gender)\r\n captcha = CaptchaField(label='验证码')\r\n institute = forms.ChoiceField(label='学院', choices=institute)\r\n\r\nclass User(models.Model):\r\n '''用户表'''\r\n gender = (\r\n ('male', '男'),\r\n ('female', '女'),\r\n )\r\n institute=(\r\n ('船院', '船舶海洋与建筑工程学院'),\r\n ('电院', '电子信息与电气工程学院'),\r\n ('机动', '机械与动力工程学院'),\r\n ('材料', '材料科学与工程学院'),\r\n ('环境', '环境科学与工程学院'),\r\n ('生医工', '生物医学工程学院'),\r\n ('航空', '航空航天学院'),\r\n ('数学', '数学科学学院'),\r\n ('物理', '物理与天文学院'),\r\n ('化学', '化学化工学院'),\r\n ('致远', '致远学院'),\r\n ('海洋', '海洋学院'),\r\n ('生命科学', '生命科学技术学院'),\r\n ('农学院', '农业与生物学院'),\r\n ('药学院', '药学院'),\r\n ('医学院', '医学院'),\r\n ('安泰', '安泰经济与管理学院'),\r\n ('法学院', '凯原法学院'),\r\n ('外院', '外国语学院'),\r\n ('人文', '人文学院'),\r\n ('马院', '马克思主义学院'),\r\n ('媒体', '媒体与传播学院'),\r\n ('设计', '设计学院'),\r\n )\r\n name = models.CharField(max_length=128, unique=True)\r\n password = models.CharField(max_length=256)\r\n email = models.EmailField(unique=True)\r\n sex = models.CharField(max_length=32, choices=gender, default='男')\r\n c_time = models.DateTimeField(auto_now_add=True)\r\n mylessons = models.ManyToManyField('search.lessons', related_name='myusers')\r\n institute = models.CharField( max_length=20,choices=institute,default='电院')\r\n def __str__(self):\r\n return self.name\r\n\r\n class Meta:\r\n ordering = ['c_time']\r\n verbose_name = '用户'\r\n verbose_name_plural = '用户'\r\n","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"211777175","text":"# My implementation of the sieve of eratosthenes\nfrom simple_num import *\nfrom lcd_lcm import *\nimport argparse\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-a\", \"--first\", required=True, help=\"First integer\")\nap.add_argument(\"-b\", \"--second\", required=True, help=\"Second integer\")\n#ap.add_argument(\"-c\", \"--third\", required=True, help=\"Third integer\")\nargs = vars(ap.parse_args())\n\nif __name__ == \"__main__\":\n print(\"\"\"\n Find the largest common divisor and least common multiple\n for three numbers {}, {} \n and numbers of Eratosthenes for them.\n \"\"\".format(args['first'], args['second']))\n\n list_for_first = list(eratosthenes(int(args['first'])))\n list_for_second = list(eratosthenes(int(args['second'])))\n #list_for_third = list(eratosthenes(int(args['third'])))\n\n print(f\"For number {args['first']} all simple nums are \\n{list_for_first}\\n\")\n print(f\"For number {args['second']} all simple nums are \\n{list_for_second}\\n\")\n #print(f\"For number {args['third']} all simple nums are \\n{list_for_third}\\n\")\n\n print(f\"For numbers {args['first']} and {args['second']}\")\n print(f\"the largest common divisor is {larg_com_div(int(args['first']), list_for_first, int(args['second']), list_for_second)}\")\n print(f\"the least common multiple is {lea_com_mult(int(args['first']), list_for_first, int(args['second']), list_for_second)}\")\n","sub_path":"eratophen/eratosthenes.py","file_name":"eratosthenes.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"512246370","text":"'''def cude(a,b):\n return a*b\n\nc=int(input())\nd=int(input())\nprint(cude(c,d))\n\ndef cude (a):\n return a**3\n\nb=int(input())\nprint(cude(b))\n\nfor i in range(1,1000):\n print(i)'''\n'''a=open(\"kunjabijukchhe.txt\",\"w\")\nfor i in range(1,1001):\n a.write(str(i))\n a.write(\". Kunja Bijukchhe\\n\")'''\n\n'''a=\"kunja bijukchhe\"\nfor i in a.split():\n print(i)'''\n'''# importing randint function\n# from random module\nfrom random import randint\n\n\n# Function which generates a new\n# random number everytime it executes\ndef generator():\n return randint(1, 10)\n\n\n# Function takes user input and returns\n# true or false depending whether the\n# user wins the lucky draw!\ndef rand_guess():\n # calls generator() which returns a\n # random integer between 1 and 10\n random_number = generator()\n\n # defining the number of\n # guesses the user gets\n guess_left = 3\n\n # Setting a flag variable to check\n # the win-condition for user\n flag = 0\n\n # looping the number of times\n # the user gets chances\n while guess_left > 0:\n\n # Taking a input from the user\n guess = int(input(\"Pick your number to \"\n \"enter the lucky draw\\n\"))\n\n # checking whether user's guess\n # matches the generated win-condition\n if guess == random_number:\n\n # setting flag as 1 if user guessses\n # correctly and then loop is broken\n flag = 1\n break\n\n else:\n\n # If user's choice doesn't match\n # win-condition then it is printed\n print(\"Wrong Guess!!\")\n\n # Decrementing number of\n # guesses left by 1\n guess_left -= 1\n\n # If win-condition is satisfied then,\n # the function rand_guess returns True\n if flag is 1:\n return True\n\n # Else the function returns False\n else:\n return False\n\n\n# Driver code\nif __name__ == '__main__':\n if rand_guess() is True:\n print(\"Congrats!! You Win.\")\n else:\n print(\"Sorry, You Lost!\")'''\n'''class circle:\n pi = 3.14\n def __init__(self,radius):\n\n self.radius=radius\n self.area=radius*radius*circle.pi\n def circumfrence(self):\n return self.radius*self.pi*2'''\ncont = \"Y\"\nwhile cont.upper()== \"Y\":\n for i in range(2):\n print(i)\n cont = input(\"Continue?y/n:\")\n if cont == \"n\":\n break\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"560115202","text":"import sys\nimport os\nimport json\nimport time\nfrom purchase_history import PurchaseHistory\nfrom friends_network import FriendsNetwork\n\n'''\nloads and process both batch data and batch data to determine if user's anaomlous in the Dth degree network\n\n'''\n\n\nclass AnomalyDetection:\n\n def __init__(self, batchFile, streamFile, flaggedFile):\n # set files and attributes of friendsNetwork and purchase history\n self.batchFile = batchFile\n self.streamFile = streamFile\n self.flaggedFile = flaggedFile\n self.friendsNetwork = {}\n self.purchasesList = {}\n\n # load and process both batch data and stream data\n def process_log(self):\n print('Loading Data from Batch Log...')\n t0 = time.time()\n self.batch_data()\n print ('Batch Data Loaded (%s users and %d purchases) in %.4f seconds.' % (self.friendsNetwork.get_number_users(),\n self.purchasesList.get_number_purchases(),\n time.time() - t0))\n\n # load and process stream data\n print('\\nLoading data from stream log...')\n print('\\nFlagged Purchases: \\n')\n self.stream_data()\n\n # load and process batch data\n def batch_data(self):\n file = open(self.batchFile)\n\n # read first line of T and D as params\n params = json.loads(file.readline().strip())\n self.final_objects(params)\n\n # process events from batch log\n file = self.process_events(file, 'batch')\n file.close()\n\n # when users are loaded, generate Dth degree network\n self.friendsNetwork.update_friendsNetwork()\n\n # load and process stream data\n def stream_data(self):\n file = open(self.streamFile)\n file = self.process_events(file, 'stream')\n file.close()\n\n # process events\n # batch data - update_status is False\n # stream data - update_status is True\n def process_events(self, file, data_type):\n # list to compare event types\n event_type = ['purchase', 'befriend', 'unfriend']\n\n # process each event\n while True:\n line = file.readline().strip()\n if line:\n event = json.loads(line)\n\n # if event type is Purchase\n if event['event_type'] == event_type[0]:\n if data_type == 'stream':\n self.anomaly_check(event)\n # add both batch and steam data to purchasesList as user's history\n self.purchasesList.add_purchase(event)\n\n # if event type is befriend\n elif event['event_type'] == event_type[1]:\n if data_type == 'stream':\n # update stream data immediately\n self.friendsNetwork.add_friend(event, update_status=True)\n else:\n # add friends but doesnt update immediately\n self.friendsNetwork.add_friend(event)\n\n # if event type is unfriend\n elif event['event_type'] == event_type[2]:\n if data_type == 'stream':\n self.friendsNetwork.delete_friend(event, update_status=True)\n else:\n # batch data\n self.friendsNetwork.delete_friend(event)\n else:\n break\n return file\n\n # make sure D and T object is set properly for friends network and purchase history\n def final_objects(self, params):\n if 'D' and 'T' in params:\n D = params['D']\n T = params['T']\n else:\n print('D and T were input incorrectly ')\n D = input('Input Degree (D): ')\n T = input('Input Tracked Purchases (T): ')\n\n self.friendsNetwork = FriendsNetwork(D)\n self.purchasesList = PurchaseHistory(T)\n\n # check whether a purchase is anomalous\n def anomaly_check(self, purchase):\n userId = purchase.get('id')\n amount = purchase.get('amount')\n\n if userId and amount:\n # get the T purchases of user's network\n users = self.friendsNetwork.get_user(userId)\n mean, sd, numPurchases = self.purchasesList.get_purchase_stats(users)\n\n if mean and sd:\n amount = float(amount)\n # if purchase amount if greater than sd, it's anomalous\n if amount > mean + (3 * sd):\n\n # write anomaly to the flagged purchases\n file = open(self.flaggedFile, 'a')\n file.write('{\"event_type\": \"%s\", \"timestamp\": \"%s\", \"id\": \"%s\", \"amount\": \"%.2f\", \"mean\": \"%.2f\", \"sd\": \"%.2f\"}\\n' % (purchase['event_type'], purchase['timestamp'], purchase['id'], amount, mean, sd))\n print('{\"event_type\": \"%s\", \"timestamp\": \"%s\", \"id\": \"%s\", \"amount\": \"%.2f\", \"mean\": \"%.2f\", \"sd\": \"%.2f\"}' % (purchase['event_type'], purchase['timestamp'], purchase['id'], amount, mean, sd))\n\n print('The friends network of %d user(s) and %d purchase(s) is anomalous: $%.2f\\n' % (len(users), numPurchases, amount))\n\n file.close()\n return True\n\n else:\n print('Purchase Event is Incomplete')\n return False\n\n\ndef main():\n batchFile = sys.argv[1]\n streamFile = sys.argv[2]\n flaggedFile = sys.argv[3]\n t0 = time.time()\n\n AnomalyDetection(batchFile, streamFile, flaggedFile).process_log()\n print ('\\nProcessed both batch log and stream log in %.4f seconds.' % (time.time() - t0))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"insight_testsuite/temp/src/anomaly_detection.py","file_name":"anomaly_detection.py","file_ext":"py","file_size_in_byte":5745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"354484681","text":"from django.shortcuts import render, redirect, reverse, get_object_or_404\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.template import loader\nfrom django.shortcuts import get_object_or_404\nfrom products.models import Product\nfrom accounts.models import userAccounts\n\ndef view_cart(request):\n template= loader.get_template('cart.html')\n\n cart = request.session.get('cart', {})\n\n cart_items = []\n total = 0\n product_count = 0\n for id, quantity in cart.items():\n product = get_object_or_404(Product, pk=id)\n total += quantity * product.price\n product_count += quantity\n cart_items.append({'id': id, 'quantity': quantity, 'product': product})\n\n context={'cart_items': cart_items, 'total': total, 'product_count': product_count}\n return HttpResponse(template.render(context,request))\n\n\n\ndef add_to_cart(request, id):\n\n \n\n cart = request.session.get('cart', {})\n\n \n\n request.session['cart'] = cart\n\n return redirect(reverse('index'))\n\n\ndef adjust_cart(request, id):\n return redirect(reverse('view_cart'))","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"229524135","text":"import falcon\nimport json\nimport sys\nimport logging\n\nclass ModelWorkersResource(object):\n \"\"\"\n To expose Restful endpoints related to Model to Workers mapping\n \"\"\"\n def __init__(self, load_balancer):\n self.load_balancer = load_balancer\n self.logger = logging.getLogger(__name__)\n\n def on_put(self, req, resp):\n \"\"\"\n Method to update model to workers map\n :param req:\n :param resp:\n :return:\n \"\"\"\n model_to_workers_map = json.loads(req.stream.read())\n self.logger.debug('Update request received with payload: %s', str(model_to_workers_map))\n self.load_balancer.update_workers_list(model_to_workers_map)\n #model_type=model_to_workers_map['model_type']\n \n resp.status = falcon.HTTP_200\n #resp.content=json.dumps({\"reg info\":\"sucess\"})\n resp.body = json.dumps({\"Register Model Service\": \"Success\"})\n def on_get(self, req, resp):\n \"\"\"Handles GET requests\"\"\"\n #handle_type = req.get_param('handle_type')\n model_info=self.load_balancer.get_all_workers()\n resp.status = falcon.HTTP_200 # This is the default status\n if model_info:\n resp.body =json.dumps(model_info)# str(self.load_balancer.get_model_to_workers_list(model_type))\n else:\n resp.status = falcon.HTTP_400\n raise falcon.HTTPBadRequest(\"Bad Request\", \"None info is responsed\")\n","sub_path":"router/.ipynb_checkpoints/model_workers_resource-checkpoint.py","file_name":"model_workers_resource-checkpoint.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"406592698","text":"import AST\nimport operator\nimport sys\nimport os\n\nfrom AST import addToClass\nfrom functools import reduce\nfrom drawTools import DrawTools\nfrom yacc import parse\n\n\noperations = {\n '+': lambda x, y: x + y,\n '-': lambda x, y: x - y,\n '*': lambda x, y: x * y,\n '/': lambda x, y: x / y,\n}\n\ncomparators = {\n '<': operator.lt,\n '>': operator.gt,\n '=>': operator.ge,\n '=<': operator.le,\n '==': operator.eq,\n}\n\np = DrawTools((1200, 800), (255, 255, 255))\n\n\ncollect_params = {\n 'line': ('pos','color','width'),\n 'text': ('pos','word','fontsize','color'),\n 'ellipse': ('pos','color','linecolor'),\n 'rectangle': ('pos','color','linecolor'),\n\n}\n\nparams = (\n 'pos',\n 'width',\n 'color',\n 'linecolor',\n 'fontsize',\n 'word'\n)\n\nvars = {\n 'white': (255, 255, 255),\n 'black': (0, 0, 0),\n 'red': (255, 0, 0),\n 'green': (0, 255, 0),\n 'blue': (0, 0, 255),\n 'yellow': (255, 255, 0),\n 'pink': (255, 0, 255),\n 'purple': (127, 0, 255),\n 'maroon': (153, 76, 0),\n 'orange': (255, 128, 0),\n 'lime': (128, 255, 0),\n}\n\n@addToClass(AST.InitNode)\ndef execute(self, namefile):\n for c in self.children:\n c.execute()\n p.PDFSave(namefile)\n\n@addToClass(AST.ProgramNode)\ndef execute(self):\n for c in self.children:\n c.execute()\n\n@addToClass(AST.TokenNode)\ndef execute(self, diff='test'):\n if diff == 'word':\n return self.tok\n if isinstance(self.tok, str):\n try:\n return vars[self.tok]\n except KeyError:\n print(\"*** Error : variable %s undefined!\" % self.tok)\n elif isinstance(self.tok, float):\n return int(self.tok)\n return self.tok\n\n@addToClass(AST.OpNode)\ndef execute(self, name='fix'):\n args = [c.execute() for c in self.children]\n if len(args) == 1:\n args.insert(0,0)\n return reduce(operations[self.op], args)\n\n@addToClass(AST.AssignNode)\ndef execute(self):\n vars[self.children[0].tok] = self.children[1].execute()\n\n\n@addToClass(AST.PrintNode)\ndef execute(self):\n print(self.children[0].execute())\n\n@addToClass(AST.WhileNode)\ndef execute(self):\n while self.children[0].execute():\n self.children[1].execute()\n\n@addToClass(AST.ForNode)\ndef execute(self):\n self.children[0].execute()\n while self.children[1].execute():\n self.children[3].execute()\n self.children[2].execute()\n\n@addToClass(AST.CompareNode)\ndef execute(self):\n args = [c.execute() for c in self.children]\n compar = comparators.get(self.comparator)\n return compar(args[0],args[1])\n\n@addToClass(AST.FormNode)\ndef execute(self):\n args = self.children[0].execute()\n param = collect_params[self.name]\n val =[]\n for i in param:\n if i in args:\n if i == 'pos':\n val.append(tuple(args.get(i)))\n else:\n val.append(args.get(i)[0])\n attrib = tuple(val)\n if self.name == 'line':\n p.addLine(*attrib)\n elif self.name == 'rectangle':\n p.addRectangle(*attrib)\n elif self.name == 'ellipse':\n p.addEllipse(*attrib)\n elif self.name == 'text':\n p.addText(*attrib)\n\n\n@addToClass(AST.ParameterList)\ndef execute(self):\n args = {}\n for c in self.children:\n t = c.execute()\n args[t[0]] = t[1]\n return args\n\n@addToClass(AST.ParameterNode)\ndef execute(self):\n args = self.children[0].execute(self.name)\n return [self.name, args]\n\n@addToClass(AST.ValueNode)\ndef execute(self, namevalue):\n return [c.execute(namevalue) for c in self.children]\n\n\nif __name__ == \"__main__\":\n try:\n filename = sys.argv[1]\n except:\n filename = \"ExempleBoucle.txt\"\n\n prog = open(filename).read()\n name = os.path.splitext(filename)[0] + '-image.pdf'\n ast = parse(prog)\n\n ast.execute(name)\n\n","sub_path":"interpreter.py","file_name":"interpreter.py","file_ext":"py","file_size_in_byte":3775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"334160705","text":"# wektory do zapamietania macierzy\r\nu0 = []; u1 = []; u2 = []; l1 = []; x = []\r\ndetA = 1\r\n\r\nN = int(input(\"Podaj wymiar macierzy N: \"))\r\n\r\nfor i in range(0, N):\r\n x.append(i+1)\r\n u0.append(1.2)\r\n if i < N-1:\r\n u1.append(0.1/(i+1))\r\n l1.append(0.2)\r\n if i < N-2:\r\n u2.append(0.4/((i+1)**2))\r\n\r\n# inicjalizacja\r\nl1[0] = l1[0]/u0[0]\r\n\r\n#obliczam macierz LU\r\nfor i in range(1, N):\r\n u0[i] = u0[i] - l1[i-1] * u1[i-1]\r\n if i < N-1:\r\n l1[i] = l1[i]/u0[i]\r\n u1[i] = u1[i] - l1[i-1] * u2[i-1]\r\n\r\n#obliczam wyznacznik macierzy A\r\nfor i in range(N):\r\n detA *= u0[i]\r\nprint(\"det A = \", detA)\r\n\r\n#obliczam wektor y z rownania Lz = x\r\nfor i in range(1, N):\r\n x[i] = x[i] - (l1[i-1] * x[i-1])\r\n\r\n#obliczam wektor z rownania Uy = z\r\nx[N-1] = x[N-1]/u0[N-1]\r\nx[N-2] = (x[N-2] - (u1[N-2] * x[N-1])) / u0[N-2]\r\nfor i in reversed(range(N-2)):\r\n x[i] = (x[i] - (u2[i] * x[i+2]) - (u1[i] * x[i+1])) / u0[i]\r\n\r\nprint(\"Wynik:\")\r\nprint(x)\r\n\r\n","sub_path":"Numerical_methods/NUM3/NUM3.py","file_name":"NUM3.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"463522569","text":"#-*- encoding: utf-8 -*-\n\"\"\"설탕 배달\"\"\"\n#idea : 5X + 3Y = N 이 만족하도록 가장 작은 수부터 시작하여 식을 만족하는 x, y를 만나면 멈추고 return 함.\n\nn = int(input())\n\ndef solve(n) :\n\n for y in range((n//3)+1) :\n for x in range((n//5)+1) :\n if (5*x+3*y) == n :\n return x + y\n\n return -1\n\n\nanswer = solve(n)\nprint(answer)","sub_path":"BOJ/MATH/2839.py","file_name":"2839.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"190728462","text":"class Lru:\n def __init__(self, frame_size):\n self.frame_size = frame_size\n self.page = []\n self.page_fault = 0\n self.cur_time = 1\n for i in range(frame_size):\n self.page.append(Page())\n\n def find_space(self):\n for i in range(self.frame_size):\n if self.page[i].valid:\n return i\n return -1\n\n def find_page(self, addr):\n for i in range(self.frame_size):\n if self.page[i].physical_address == addr:\n return i\n return -1\n\n def replace(self):\n mn = self.cur_time\n space = 0\n for i in range(self.frame_size):\n if self.page[i].arrival_time < mn:\n mn = self.page[i].arrival_time\n space = i\n return space\n\n def run(self, ref_string):\n for string in ref_string:\n cur_index = self.find_page(string)\n if cur_index == -1:\n cur_index = self.find_space()\n if cur_index == -1:\n cur_index = self.replace()\n self.page_fault += 1\n self.page[cur_index].valid = False\n self.page[cur_index].physical_address = string\n self.page[cur_index].arrival_time = self.cur_time\n self.cur_time += 1","sub_path":"hw2/src/lru.py","file_name":"lru.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"351695902","text":"from decouple import config\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated, BasePermission, SAFE_METHODS\nfrom user.models import BlackListedToken, UserProfile\n\n\nclass GenericAuth(IsAuthenticated):\n def has_permission(self, request, view):\n try:\n token = request.headers['Authorization'].split(' ')[1]\n except KeyError:\n return False\n\n try:\n is_blacklisted = BlackListedToken.objects.get(\n user=request.user, token=token)\n\n if is_blacklisted:\n return False\n\n except BlackListedToken.DoesNotExist:\n pass\n return super().has_permission(request, view)\n\n\nclass AdminAuth(IsAdminUser):\n def has_permission(self, request, view):\n username = request.user.username\n is_access_denied = request.user.groups.filter(name='access_denied').exists()\n try:\n token = request.headers['Authorization'].split(' ')[1]\n\n except KeyError:\n return False\n\n try:\n is_blacklisted = BlackListedToken.objects.get(\n user=UserProfile.objects.filter(username=username)[0], token=token)\n\n if is_blacklisted:\n return False\n\n except BlackListedToken.DoesNotExist:\n pass\n if is_access_denied:\n return False\n return super().has_permission(request, view)\n\n\nclass ReadOnly(BasePermission):\n \"\"\"\n The request is a read-only request.\n \"\"\"\n\n def has_permission(self, request, view):\n return bool(\n request.method in SAFE_METHODS or\n request.user\n )\n\n\nclass ReadOnlyAuth(IsAuthenticated):\n def has_permission(self, request, view):\n is_read_only = request.user.groups.filter(name='read_only').exists()\n try:\n token = request.headers['Authorization'].split(' ')[1]\n except KeyError:\n return False\n\n try:\n is_blacklisted = BlackListedToken.objects.get(\n user=request.user, token=token)\n\n if is_blacklisted:\n return False\n\n except BlackListedToken.DoesNotExist:\n pass\n if is_read_only:\n return bool(\n request.method in SAFE_METHODS\n )\n return super().has_permission(request, view)\n\n\nclass IsAdminUserQP(TokenAuthentication):\n def has_permission(self, request, view):\n token = request.query_params.get('token')\n if token:\n user, _ = self.authenticate_credentials(token)\n return user.is_staff\n return False\n\n\nclass ServiceAPIAuth(BasePermission):\n\n def has_permission(self, request, view):\n api_key = 'API-Key ' + config(\"API_KEY\")\n api_key_info = request.headers.get('Authorization')\n\n if api_key_info and api_key_info == api_key:\n return True\n return False\n","sub_path":"shodai/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"252009589","text":"# -*- coding: utf-8 -*-\n\"\"\"\n urls\n ~~~~\n\n URL definitions.\n\n :copyright: 2009 by tipfy.org.\n :license: BSD, see LICENSE.txt for more details.\n\"\"\"\nfrom tipfy import Rule, HandlerPrefix\n\n\ndef get_rules(app):\n \"\"\"Returns a list of URL rules for the Hello, World! application.\n\n :param app:\n The WSGI application instance.\n :return:\n A list of class:`tipfy.Rule` instances.\n \"\"\"\n rules = [\n HandlerPrefix('apps.api.handlers.', [\n Rule('/api/v1/tasks', endpoint='api-tasks-list', handler='TasksListHandler'),\n Rule('/api/v1/tasks/', endpoint='api-tasks-specific', handler='SpecificTaskHandler'),\n ]),\n ]\n\n return rules\n","sub_path":"app/apps/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"31440092","text":"#!/usr/bin/env python\nimport os\nimport fnmatch\nimport sys\nimport re\n\nrootdir = './'\nmatches = []\n\nfor root, dirnames, filenames in os.walk(rootdir):\n for filename in fnmatch.filter(filenames, 'out_1.txt'):\n path = os.path.join(root, filename)\n allValues = {}\n resultList = []\n with open (path, 'r') as inf:\n content = inf.read()\n matches = re.findall('Scale factor:[\\s\\S]*?CQET \\(geom.\\):[^\\n]+?\\n', content)\n for match in matches:\n resultValue = {}\n lines = match.split('\\n')\n for line in lines:\n if(line != ''):\n duo = line.split(':')\n allValues[duo[0]] = duo[0]\n resultValue[duo[0]] = re.findall('^[\\d\\s\\./s]*', duo[1].strip())[0].strip()\n\n resultList.append(resultValue)\n \n \n\n with open(path + '.run', 'w+') as outf:\n outf.write('\\t'.join(allValues) + '\\n')\n\n with open(path + '.run', 'a') as outf:\n for result in resultList: \n rowValue = ''\n for value in allValues:\n if value in result:\n rowValue = rowValue + result[value]\n rowValue = rowValue + '\\t'\n outf.write(rowValue + '\\n')\n \n","sub_path":"scripts/parsing_scripts/2b_parsebsbm.py","file_name":"2b_parsebsbm.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"510010452","text":"\n# reference: https://danijar.com/variable-sequence-lengths-in-tensorflow/\n\n# notes:\n# for variable length, paddling seems needed.\n# tf.nn.dynamic_rnn uses a tf.While loop to dynamically construct the graph when it is executed.\n# static run should be creating an unrolled graph for a fixed RNN length.\n\n\n# assume that the sequences are padded with 0 vectors to fill up the remaining time steps in the batch.\n# compute the length of a sequence in the tensorflow graph\ndef length(sequence):\n\t# collapse the frame vectors (3rd dimension of a batch) into scalars using maximum;\n\t# each sequence is now a vector of scalars that will be 0 for the padded frames at the end;\n\t# use tf.sign() to convert the actual frames from their maximum values to values of one;\n\t# this gives us a binary mask of ones for used frames and 0s for unused frames that we can just sum to get the sequence length\n\tused = tf.sign(tf.reduce_max(tf.abs(sequence), 2))\n\tlength = tf.reduce_sum(used, 1)\n\tlength = tf.cast(length, tf.int32)\n\treturn length\n\n\nmax_length = 100\nframe_size = 64\nnum_hidden = 200\n\nsequence = tf.placeholder(\n\ttf.float32,\n\t[None, max_length, frame_size]\n)\n\n# now that we have a vector holding the sequence lenghts, we can pass it to dynamic_rnn(),\n# the function that unfolds our network, using the optional sequence_length parameter.\n# when running the model later, tensorflow will return 0 vectors for states that outputs after these sequence lengths.\n# therefore, weights will not affect those outputs and don't get trained on them.\noutput, state = tf.nn.dynamic_run(\n\ttf.contrib.rnn.GRUCell(num_hidden),\n\tsequence,\n\tdtype=tf.float32,\n\tsequence_length=length(sequence),\n)\n\n# output will still be of size batch_size x max_length x out_size, but with the last being zero vectors for sequences shorter than the maximum length.\n# when you use the outputs at each time step, as in sequence labelling, we don't want to consider them in our cost function.\n# we mask out the unused frames and compute the mean error over the sequence length by dividing by the actual length.\n# using tf.reduce_mean() does not work here because it would divide by the maximum sequence length.\n\ndef cost(output, target):\n\t# compute cross entropy for each frame\n\tcross_entropy = target * tf.log(output)\n\tcross_entropy = -tf.reduce_sum(cross_entropy, 2)\n\tmask = tf.sign(tf.reduce_max(tf.abs(target), 2))\n\tcross_entropy *= mask\n\n\t# average over actual sequence lengths\n\tcross_entropy = tf.reduce_sum(cross_entropy, 1)\n\tcross_entropy /= tf.reduce_sum(mask, 1)\n\treturn tf.reduce_mean(cross_entropy)\n\n# we flatten the output tensor to shape (frames in all examples x output size).\n# then we construct an index into that by creating a tensor with the start indices for each example tf.range(0, batch_size) * max_length\n# and add the individual sequence lengths to it.\n# tf.gather() then performs the actual indexing.\ndef last_relevant(output, length):\n\tbatch_size = tf.shape(output)[0]\n\tmax_length = tf.shape(output)[1]\n\tout_size = int(output.get_shape()[2])\n\tindex = tf.range(0, batch_size) * max_length + (length - 1)\n\tflat = tf.reshape(output, [-1, out_size])\n\trelevant = tf.gather(flat, index)\n\treturn relevant\n\n# we got the last relevant output and can feed that into a simple softmax layer to predict the class of each sequence.\nnum_classes = 10\nlast = last_relevant(output)\nweight = tf.Variable(\n\ttf.truncated_normal([num_hidden, num_classes], stddev=0.1)\n)\nbias = tf.Variable(tf.constant(0.1, shape=[num_classes]))\nprediction = tf.nn.softmax(tf.matmul(last, weight) + bias)\n","sub_path":"machine_learning/rnn/lstm_varied_lengths/example1.py","file_name":"example1.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"78152538","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport csv\nimport json\n\nwith open('data.csv', 'r') as f:\n reader = csv.reader(f)\n header = next(reader)\n data = []\n for row in reader:\n d = {}\n tags = []\n comments = {}\n for i, (r, h) in enumerate(zip(row, header)):\n #print (r , h)\n if '説明' in h:\n pass\n elif 'タグ' == h:\n for s in r.split(';#'):\n tags.append(s) \n elif 'タグ' in h:\n if row[i+1]:\n comments[r]=row[i+1].replace(';#','')\n else: \n d[h]=r\n\n d['タグの説明'] = comments\n\n d['タグ'] = tags\n data.append(d)\n #print(data)\n\n with open('result.json','w') as f:\n json.dump(data, f, indent=4, sort_keys=True, ensure_ascii=False)\n","sub_path":"src/script/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"405471661","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\n\nimport unittest\n\nfrom coaster.db import db\nfrom coaster.gfm import markdown\nfrom coaster.sqlalchemy import BaseMixin, MarkdownColumn\n\nfrom .test_sqlalchemy_models import app1, app2\n\n\nclass MarkdownData(BaseMixin, db.Model):\n __tablename__ = 'md_data'\n value = MarkdownColumn('value', nullable=False)\n\n\n# -- Tests --------------------------------------------------------------------\n\n\nclass TestMarkdownColumn(unittest.TestCase):\n app = app1\n\n def setUp(self):\n self.ctx = self.app.test_request_context()\n self.ctx.push()\n db.create_all()\n self.session = db.session\n\n def tearDown(self):\n self.session.rollback()\n db.drop_all()\n self.ctx.pop()\n\n def test_markdown_column(self):\n text = u\"\"\"# this is going to be h1.\\n- Now a list. \\n- 1\\n- 2\\n- 3\"\"\"\n data = MarkdownData(value=text)\n self.session.add(data)\n self.session.commit()\n assert data.value.html == markdown(text)\n assert data.value.text == text\n assert data.value.__str__() == text\n assert data.value.__html__() == markdown(text)\n\n def test_does_not_render_on_load(self):\n text = u\"This is the text\"\n real_html = markdown(text)\n fake_html = u\"This is not the text\"\n data = MarkdownData(value=text)\n self.session.add(data)\n\n # Insert fake rendered data for commit to db\n data.value._html = fake_html\n data.value.changed()\n self.session.commit()\n del data\n\n # Reload from db and confirm HTML is exactly as committed\n data = MarkdownData.query.first()\n assert data.value.text == text\n assert data.value.html == fake_html\n assert data.value.__str__() == text\n assert data.value.__html__() == fake_html\n\n # Edit text and confirm HTML was regenerated, saved and reloaded\n data.value.text = text\n db.session.commit()\n del data\n\n data = MarkdownData.query.first()\n assert data.value.text == text\n assert data.value.html == real_html\n assert data.value.__str__() == text\n assert data.value.__html__() == real_html\n\n def test_raw_value(self):\n text = u\"This is the text\"\n data = MarkdownData()\n self.session.add(data)\n data.value = text\n self.session.commit()\n\n\nclass TestMarkdownColumn2(TestMarkdownColumn):\n app = app2\n","sub_path":"tests/test_sqlalchemy_markdowncolumn.py","file_name":"test_sqlalchemy_markdowncolumn.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"179118502","text":"\n########### Python 2.7 #############\nimport httplib, urllib, os\n\nheaders = {\n # Request headers\n 'Content-Type': 'application/octet-stream',\n 'Ocp-Apim-Subscription-Key': 'f2490df0700d478ea843e4e824adfa92',\n}\n\nparams = urllib.urlencode({\n # Request parameters\n 'visualFeatures': 'Description, Tags, Faces, ImageType, Color, Categories'\n})\n\npic = open('tiggy.jpg', 'rb').read()\n\n\ntry:\n conn = httplib.HTTPSConnection('westus.api.cognitive.microsoft.com')\n conn.request(\"POST\", \"/vision/v1.0/analyze?%s\" % params, pic, headers)\n response = conn.getresponse()\n data = response.read()\n print(data)\n conn.close()\nexcept Exception as e:\n print(\"[Errno {0}] {1}\".format(e.errno, e.strerror))\n\n####################################\n","sub_path":"computer-vision/python27/cv_description_file.py","file_name":"cv_description_file.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"645022006","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of CERN Document Server.\n# Copyright (C) 2017 CERN.\n#\n# Invenio is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation; either version 2 of the\n# License, or (at your option) any later version.\n#\n# Invenio is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Invenio; if not, write to the Free Software Foundation, Inc.,\n# 59 Temple Place, Suite 330, Boston, MA 02D111-1307, USA.\n\n\"\"\"CDS Video project fields.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nfrom dojson.utils import filter_values, for_each_value\n\nfrom ...models.videos.project import model\n\n\n@model.over('related_links', '^773__')\n@for_each_value\n@filter_values\ndef related_links(self, key, value):\n \"\"\"Related links.\"\"\"\n return {\n 'name': value.get('p'),\n 'url': value.get('u'),\n }\n","sub_path":"cds_dojson/marc21/fields/videos/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"332578092","text":"#file文件的读写\n#file:txt,xml,html\n\n#mode 打开文件的模式\n#r w a\n#r+ w+ a+\n#read write append\n#rb rb+ wb wb+ ab ab+ /做单元测试的时候用到\n# file=open(\"python11.txt\",\"r+\",encoding=\"utf-8\")\n# # a=file.read()#进行完一次读取操作后,光标就到文末\n# file.write('阿坤 2020/4/13')\n# # print(a)\n# #1.file文件open之后默认是r 只读模式 如过你要写入内容,就会报错\n#2.r+可读可写 1)先写的话,从头开始覆盖写;2)读光标之后的内容,读写跟着光标走\n#3.如果要写入中文,指定编码格式encoding='utf-8'/读和写尽量分开\n\n#####################################################\n# #1.w 只写 硬要去读 就会报错\n# #2.w+ 可读可写 不管是w 还是w+ 如果文件存在 就直接清空再重写/如果文件不存在,则新建一个文件,然后写\n# file=open(\"python12.txt\",\"w+\",encoding=\"utf-8\")\n# file.write(\"哈哈哈,今天是充实的一天!\")\n#拓展:怎么移动光标?(自行学习)\n\n#######################################################\n#1.a 追加 a+(推荐)\n#2.如果文件存在 就直接追加写在后面;如果文件不存在,则新建一个文件进行结果写入\n# file=open(\"python13.txt\",\"a+\",encoding=\"utf-8\")\n# file.write(\"\\n今天是愉快的一天\")#加转义符\\n 可进行换行写\n\n###############***************##################\n# #*重点掌握'r','a'\n# file=open('python13.txt','r',encoding='utf-8')\n# # print(file.read())#读取所有内容\n# # print(file.readline())\n# # print(file.readline())#按行读取,读取两行\n# print(file.readlines())#读取多行,已列表的形式展示\n\nfile_01=open('python13.txt','a',encoding='utf-8')\nfile_01.write(\"hhhhhhhhh\")#单行写入\nfile_01.writelines([\"\\n33333\\n\",\"4444444\\n\",\"555555\"])#多行写入\n#也可用绝对路径\n","sub_path":"test_zy/test_03/do_file.py","file_name":"do_file.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"265823412","text":"#!/usr/bin/python3\n# example_class_import.py\n# Shows how to import a class from a module.\n\nfrom example_class_module import C1, C2\n\nobj1 = C1(1)\nprint(f'obj1.x = {obj1.x}')\n\nobj2 = C2(2)\nprint(f'obj2.y = {obj2.y}')\n","sub_path":"programming/python/exercises/crash_course/09_classes/example_class_import.py","file_name":"example_class_import.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"126969626","text":"class computer:\r\n def __init__(self, cpu, ram):\r\n self.c = cpu\r\n self.r = ram\r\n\r\n def config(self):\r\n print(\"config is :\",self.c,self.r)\r\n\r\n\r\ncomp1 = computer(\"i5\",16)\r\ncomp2 = computer(\"i3\",8)\r\ncomp1.config()\r\ncomp2.config()\r\n","sub_path":"init inpython.py","file_name":"init inpython.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"166619311","text":"from network import Network\nfrom optimizer import Optimizer\nimport numpy as np\nfrom tqdm import tqdm\n\n\ndef train_networks(networks, dataset):\n # Función para entrenar a cada una de las redes de la población\n\n pbar = tqdm(total=len(networks)) # Para mostrar la barra de progreso en la terminal\n for network in networks:\n network.train(dataset) # Entrenar a la red\n pbar.update(1)\n pbar.close()\n\n\ndef get_average_accuracy(networks):\n # Función para calcular el promedio de precisión\n\n total_accuracy = 0\n for network in networks:\n total_accuracy += network.accuracy\n\n return total_accuracy / len(networks)\n\n\ndef generate(generations, population, params, dataset):\n # Función para crear las generaciones con sus respectivas poblaciones\n\n f= open(\"results.txt\",\"a+\") # Abrir archivo donde se guardarán los resultados\n fg= open(\"resultsByGen.txt\",\"a+\") # Abrir archivo donde se guardarán los resultados\n optimizer = Optimizer(params, 0.1, 0.2) # Instanciar el algoritmo genético\n networks = optimizer.population(population) # Crear la población con el número deseado\n\n # ciclo de generaciones\n for i in range(generations):\n print('generation', i)\n fg.write('Generacion %d\\r\\n'%i)\n # Entrenar a todas las redes de la población\n train_networks(networks, dataset)\n write_results(networks, fg)\n\n # Obtener la precisión promedio de la generación\n average_accuracy = get_average_accuracy(networks)\n print(\"Generation average: %.2f%%\" % (average_accuracy * 100))\n\n # Si no es la última generación, evolucionar a la población\n if i != generations - 1:\n networks = optimizer.evolve(networks)\n\n # Una vez terminado el ciclo de generaciones, se sortean los resultados según la precisión\n networks = sorted(networks, key=lambda x: x.accuracy, reverse=True)\n\n #Escribir los resultados (5 mejores redes) en un archivo\n write_results(networks[:5], f)\n f.close()\n\n\ndef write_results(networks, f):\n # Función para escribir los resultados a un archivo\n for network in networks:\n f.write(network.get_result())\n\ndef main():\n # Función principal, lee del input el tamaño de la población y el número de generaciones\n print('Tamaño de población: ')\n population = int(input()) # Leer el tamaño de población\n\n # Población mínima de 4 para poder cruzar las redes\n if population < 4:\n print('Población mínima de 4')\n return\n\n print('Número de generaciones: ')\n generations = int(input()) # Leer el número de generaciones\n dataset = 'fire.csv' # Selección de base de datos\n\n # Parámetros iniciales para los experimentos, definidos en rangos\n\n #####################################\n # Cambiar estos parámetros a #\n # la hora de realizar experimentos #\n #####################################\n params = {\n 'neurons': list(range(3, 24)),\n 'layers': list(range(1, 5)),\n 'epochs': list(range(0, 601, 200)),\n 'lr': list(np.arange(0.2, 0.41, 0.01)),\n 'momentum': list(np.arange(0.2, 0.41, 0.01))\n }\n\n print('Iniciando con parámetros: ', params)\n # Empieza el proceso generando las poblaciones requeridas\n generate(generations, population, params, dataset)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"417775959","text":"import math\nimport matplotlib.pyplot as plt\nimport numpy\nimport csv \nimport re\n\nshapelist = []\nwith open('shapes.txt') as csvfile:\n shapes = csv.reader(csvfile)\n for i in shapes:\n shapelist.append(i)\nprint(shapelist)\n\nreclist = []\nfor x in shapelist:\n if 'Rectangle' in x:\n y = x \n reclist.append(y)\nprint(reclist)\n\nclass IterShape(type):\n def __iter__ (cls):\n return iter(cls.reclist)\n\nclass Rectangle(metaclass = IterShape):\n reclist = []\n def __init__(self,name, l, w):\n self.reclist.append(self)\n\n self.name = name\n self.length = l\n self.width = w\n\n def rectangle_area(self):\n return self.length*self.width\n\n\n\n\nfor instance in Rectangle:\n print(\"The area of \" + instance.name + \" is: \" + str(instance.rectangle_area()))\n\n","sub_path":"gis_programming/Homeworks/HW03/HW03_plswork.py","file_name":"HW03_plswork.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"55271529","text":"import os\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score\nfrom imblearn.under_sampling import ClusterCentroids, RandomUnderSampler, NearMiss\nfrom imblearn.over_sampling import RandomOverSampler, SMOTE, ADASYN\n\n\n# from sklearn.metrics import \n\ndef logisticRegressionClassification(train_mat, test_mat, headers, binary = False):\n\ttrain = pd.DataFrame(data = train_mat, columns = headers)\n\ttest = pd.DataFrame(data = test_mat, columns = headers)\n\n\tif binary:\n\t\ttrain[train >= 0.5] = 1\n\t\ttrain[train < 0.5] = 0\n\n\t\ttest[test >= 0.5] = 1\n\t\ttest[test < 0.5] = 0\n\n\tret_list = []\n\tcount = 0\n\n\tfor col in headers:\n\t\tcount = count + 1\n\t\tprint(count)\n\n\t\tx_train = train.drop([col], axis = 1)\n\t\ty_train = train.loc[:, col]\n\t\t# y_train[y_train >= 0.5] = 1\n\t\t# y_train[y_train < 0.5] = 0\n\n\t\tx_test = test.drop([col], axis = 1)\n\t\ty_test = test.loc[:, col]\n\n\t\tlr = LogisticRegression(max_iter = 300, random_state = 0)\n\t\t# print(\"Starting training\")\n\t\ttry:\n\t\t\tlr.fit(x_train, y_train)\n\t\t\t# print(\"Ending Training\")\n\t\t\ty_pred = lr.predict(x_test)\n\t\t\tf1 = f1_score(y_test, y_pred)\n\t\t\tacc = accuracy_score(y_test, y_pred)\n\t\t\trecall = recall_score(y_test, y_pred)\n\t\t\tprec = precision_score(y_test, y_pred)\n\n\t\t\tprob_true = sum(y_test)/len(y_test)\n\t\t\tprob_pred = sum(y_pred)/len(y_pred)\n\n\t\texcept:\n\t\t\tf1 = 0.0\n\t\t\tacc = 0.0\n\t\t\trecall = 0.0\n\t\t\tprec = 0.0\n\t\t\tprob_pred = 0.0\n\t\t\tprob_true = 0.0\n\t\t\tprint(\"value error detected. one class of values encountered.\")\n\n\t\tprint(f1)\n\n\n\t\tretl = [col, f1, acc, recall, prec, prob_true, prob_pred]\n\t\tret_list.append(retl)\n\t\t# x_test = train.drop([col], axis = 1)\n\t\t# y_train = train.loc[:, col]\n\t\t# rf = RandomForestClassifier(n_estimators = 10000, random_state = 0, n_jobs = -1)\n\t\t# if count == 5:\n\t\t# \tbreak\n\n\n\tretdf = pd.DataFrame(ret_list, columns = ['variable', \"f1\", \"accuracy\", \"recall\", \"precision\", \"prob_occurence_true\", \"prob_occurence_pred\"])\n\t\n\treturn retdf\n\n\n\n\nif __name__ == \"__main__\":\n\n\theaders_dict = np.load(\"../converted_raw/cerner.types\", allow_pickle = True)\n\tbh = list(headers_dict.keys())\n\t# bh = bh[:10]\n\n\tfilename_generated = os.path.join(\"../raw/healthgan_v3.npy\") \n\tfile_generated = np.load(filename_generated, allow_pickle = True)\n\t# file_generated = file_generated[:10, :10]\n\tprint(file_generated.shape)\n\t\t\n\tfilename_test = os.path.join(\"../data/cerner_test.matrix\")\n\tfile_test = np.load(filename_test, allow_pickle = True)\n\t# file_test = file_test[:10, :10]\n\tprint(file_test.shape)\n\n\tfilename_original = os.path.join(\"../data/cerner_train.matrix\")\n\tfile_original = np.load(filename_original, allow_pickle = True)\n\t# file_original = file_original[:10, :10]\t\n\tprint(file_original.shape)\n\n\n\n\tdf = logisticRegressionClassification(train_mat = file_generated, test_mat = file_test, headers = bh, binary = True)\n\tdf.to_csv(\"../results/healthgan/logistic_regression_metrics_healthgan_generated.csv\", index = False)\n\n\n\tdf = logisticRegressionClassification(train_mat = file_original, test_mat = file_test, headers = bh, binary = True)\n\tdf.to_csv(\"../results/healthgan/logistic_regression_metrics_healthgan_original.csv\", index = False)\n\n\n\t# df = randomForestUndersampling(train_mat = file_generated, test_mat = file_test, headers = bh, binary = True)\n\t# df.to_csv(\"../summary_stats/random_forest_metrics_undersampling_randomundersampler.csv\", index = False)\n\n\t# df = randomForestOversampling(train_mat = file_generated, test_mat = file_test, headers = bh, binary = True)\n\t# df.to_csv(\"../summary_stats/random_forest_metrics_oversampling.csv\", index = False)\n\n\n\t# print(file_test.shape)\n\n\n############Test\n\n\n\n\n# def randomForestUndersampling(train_mat, test_mat, headers, binary = False):\n# \ttrain = pd.DataFrame(data = train_mat, columns = headers)\n# \ttest = pd.DataFrame(data = test_mat, columns = headers)\n\n# \tif binary:\n# \t\ttrain[train >= 0.5] = 1\n# \t\ttrain[train < 0.5] = 0\n\n# \t\ttest[test >= 0.5] = 1\n# \t\ttest[test < 0.5] = 0\n\n# \tret_list = []\n# \tcount = 0\n\n# \tfor col in headers:\n# \t\tcount = count + 1\n# \t\tprint(count)\n\n# \t\tx_train = train.drop([col], axis = 1)\n# \t\ty_train = train.loc[:, col]\n\n# \t\t# cc = ClusterCentroids(random_state = 0)\n# \t\tcc = RandomUnderSampler(random_state=0)\n# \t\t# cc = NearMiss(version=1)\n# \t\t# print(\"clustering......\")\n# \t\tx_train_resampled, y_train_resampled = cc.fit_resample(x_train, y_train)\n# \t\t# print(\"clustered.......\")\n\n# \t\tx_test = test.drop([col], axis = 1)\n# \t\ty_test = test.loc[:, col]\n\n# \t\trf = RandomForestClassifier(n_estimators = 300, random_state = 0, n_jobs = -1)\n# \t\t# print(\"Starting training\")\n# \t\trf.fit(x_train_resampled, y_train_resampled)\n# \t\t# print(\"Ending Training\")\n# \t\ty_pred = rf.predict(x_test)\n# \t\t# exit(0)\n\n# \t\tf1 = f1_score(y_test, y_pred)\n# \t\tacc = accuracy_score(y_test, y_pred)\n# \t\trecall = recall_score(y_test, y_pred)\n\n# \t\tprob_true = sum(y_test)/len(y_test)\n# \t\tprob_pred = sum(y_pred)/len(y_pred)\n\n# \t\tprint(f1, pd.Series(y_test).isin([1]).sum(), pd.Series(y_pred).isin([1]).sum())\n\n# \t\tretl = [col, acc, f1, recall, prob_true, prob_pred]\n# \t\tret_list.append(retl)\n# \t\t# x_test = train.drop([col], axis = 1)\n# \t\t# y_train = train.loc[:, col]\n# \t\t# rf = RandomForestClassifier(n_estimators = 10000, random_state = 0, n_jobs = -1)\n# \t\t# if count == 5:\n# \t\t# \tbreak\n\n\n# \tretdf = pd.DataFrame(ret_list, columns = ['variable', \"f1\", \"accuracy\", \"recall\", \"prob_occurence_true\", \"prob_occurence_pred\"])\n\t\n# \treturn retdf\n\n\n\n\n# def randomForestOversampling(train_mat, test_mat, headers, binary = False):\n# \ttrain = pd.DataFrame(data = train_mat, columns = headers)\n# \ttest = pd.DataFrame(data = test_mat, columns = headers)\n\n# \tif binary:\n# \t\ttrain[train >= 0.5] = 1\n# \t\ttrain[train < 0.5] = 0\n\n# \t\ttest[test >= 0.5] = 1\n# \t\ttest[test < 0.5] = 0\n\n# \tret_list = []\n# \tcount = 0\n\n# \tfor col in headers:\n# \t\tcount = count + 1\n# \t\tprint(count)\n\n# \t\tcol = headers[25]\n\n# \t\tx_train = train.drop([col], axis = 1)\n# \t\ty_train = train.loc[:, col]\n\n# \t\t# ros = RandomOverSampler(random_state=0)\n# \t\tros = SMOTE(random_state=0)\n# \t\t# ros = ADASYN(random_state=0)\n# \t\t# print(\"generating ADASYN......\")\n# \t\tx_train_resampled, y_train_resampled = ros.fit_resample(x_train, y_train)\n# \t\t# print(\"generated ADASYN.......\")\n\n\n# \t\tx_test = test.drop([col], axis = 1)\n# \t\ty_test = test.loc[:, col]\n\n# \t\trf = RandomForestClassifier(n_estimators = 100, n_jobs = -1)\n# \t\t# print(\"Starting training\")\n# \t\trf.fit(x_train_resampled, y_train_resampled)\n# \t\t# print(\"Ending Training\")\n# \t\ty_pred = rf.predict(x_test)\n# \t\tprint(pd.Series(y_test).isin([1]).sum())\n# \t\tprint(pd.Series(y_pred).isin([1]).sum())\n# \t\texit(0)\n\n# \t\tf1 = f1_score(y_test, y_pred)\n# \t\tacc = accuracy_score(y_test, y_pred)\n# \t\trecall = recall_score(y_test, y_pred)\n\n# \t\tprob_true = sum(y_test)/len(y_test)\n# \t\tprob_pred = sum(y_pred)/len(y_pred)\n\n# \t\tprint(f1)\n\n# \t\tretl = [col, acc, f1, recall, prob_true, prob_pred]\n# \t\tret_list.append(retl)\n# \t\t# x_test = train.drop([col], axis = 1)\n# \t\t# y_train = train.loc[:, col]\n# \t\t# rf = RandomForestClassifier(n_estimators = 10000, random_state = 0, n_jobs = -1)\n# \t\t# if count == 5:\n# \t\t# \tbreak\n\n\n# \tretdf = pd.DataFrame(ret_list, columns = ['variable', \"f1\", \"accuracy\", \"recall\", \"prob_occurence_true\", \"prob_occurence_pred\"])\n\t\n# \treturn retdf\n","sub_path":"src/logistic_regression_healthgan.py","file_name":"logistic_regression_healthgan.py","file_ext":"py","file_size_in_byte":7283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"618476204","text":"from types import FunctionType\nfrom functools import partial\nimport logging as root_logger\nfrom string import ascii_uppercase\nimport IPython\nlogging = root_logger.getLogger(__name__)\n\nclass Node:\n \"\"\" The Container for RBTree Data \"\"\"\n i = 0\n \n def __init__(self,value,parent=None,data=None,eqFunc=None):\n self.id = Node.i\n Node.i += 1\n #Children:\n self.left = None\n self.right = None\n #Parent:\n self.parent = parent\n #Node Date:\n self.red = True\n self.value = value\n self.data = {}\n self.eqFunc = eqFunc\n if data is not None:\n assert(isinstance(data,dict))\n self.data.update(data)\n\n #todo: create templates for data.\n #for arc/voronoi/beachline: left and right circle events\n\n\n #------------------------------\n # def Basic Info\n #------------------------------\n\n def __hash__(self):\n if self.value is not None and hasattr(self.value, \"id\"):\n return self.value.id\n return self.id\n \n def __eq__(self, other):\n assert(other is None or isinstance(other, Node))\n result = False\n if other is None:\n return result\n result = self.id == other.id\n return result\n\n def __repr__(self):\n if self.value is not None and hasattr(self.value, \"id\"):\n return \"({}_{})\".format(ascii_uppercase[self.value.id % 26], int(self.value.id/26), self.id)\n else:\n return \"({}:{})\".format(self.value, self.id)\n \n\n def getBlackHeight(self,parent=None):\n current = self\n height = 0\n while current is not None:\n if not current.red:\n height += 1\n current = current.parent\n return height\n\n def min(self):\n current = self\n while current.left is not None:\n current = current.left\n return current\n\n def max(self):\n current = self\n while current.right is not None:\n current = current.right\n return current\n\n def getPredecessor(self):\n if self.left is not None:\n return self.left.max()\n if self.parent is not None and not self.parent.on_left(self):\n return self.parent\n prev = self\n current = self.parent\n count = 0\n while current is not None and current.right != prev:\n prev = current\n current = current.parent\n count += 1\n\n if current is not self:\n return current\n else:\n return None\n\n def getSuccessor(self):\n if self.right is not None:\n return self.right.min()\n if self.parent is not None and self.parent.on_left(self):\n return self.parent\n prev = self\n current = self.parent\n while current is not None and current.left != prev:\n prev = current\n current = current.parent\n\n if current is not self:\n return current\n else:\n return None\n\n def getPredecessor_while(self, condition):\n assert(isinstance(condition, (FunctionType, partial)))\n results = []\n current = self.getPredecessor()\n while current is not None and condition(current):\n results.append(current)\n current = current.getPredecessor()\n return results\n \n\n def getSuccessor_while(self, condition):\n assert(isinstance(condition, (FunctionType, partial)))\n results = []\n current = self.getSuccessor()\n while current is not None and condition(current):\n results.append(current)\n current = current.getSuccessor()\n return results\n\n\n def getNeighbours_while(self, condition):\n results = []\n results += self.getPredecessor_while(condition)\n results += self.getSuccessor_while(condition)\n return results\n\n def isLeaf(self):\n return self.left is None and self.right is None\n\n #------------------------------\n # def Basic Update\n #------------------------------\n \n def add_left(self,node,force=False):\n if self == node:\n node = None\n if self.left == None or force:\n self.link_left(node)\n else:\n self.getPredecessor().add_right(node)\n logging.debug(\"{}: Adding {} to Left\".format(self,node))\n\n \n def add_right(self,node,force=False):\n if self == node:\n node = None\n if self.right == None or force:\n self.link_right(node)\n else:\n self.getSuccessor().add_left(node)\n logging.debug(\"{}: Adding {} to Right\".format(self,node))\n\n def link_left(self,node):\n assert(node is not self)\n if node is not None:\n assert(self.right is not node)\n assert(self.parent is not node)\n assert(node.left is not self)\n assert(node.right is not self)\n self.left = node\n if self.left is not None:\n self.left.parent = self\n logging.debug(\"{} L-> {}\".format(self,node))\n\n\n def link_right(self,node):\n assert(node is not self)\n if node is not None:\n assert(self.parent is not node)\n assert(node.left is not self)\n assert(node.right is not self)\n assert(self.left is not node)\n self.right = node\n if self.right is not None:\n self.right.parent = self\n logging.debug(\"{} R-> {}\".format(self,node))\n\n \n def disconnect_from_parent(self):\n parent = self.parent\n if self.parent != None:\n if self.parent.on_left(self):\n self.parent.left = None\n else:\n self.parent.right = None\n self.parent = None\n logging.debug(\"Disconnecting {} -> {}\".format(parent,self))\n\n \n\n def disconnect_left(self):\n if self.left != None:\n node = self.left\n self.left = None\n node.parent = None\n logging.debug(\"{} disconnecting left\".format(self))\n return node\n return None\n\n def disconnect_right(self):\n if self.right != None:\n node = self.right\n self.right = None\n node.parent = None\n logging.debug(\"{} disconnecting right\".format(self))\n return node\n return None\n\n def on_left(self, node):\n assert(isinstance(node, Node))\n return self.left == node\n \n def rotate_right(self):\n setAsRoot = True\n orig_parent = None\n originally_on_left = False\n newHead = self.left\n newRight = self\n newLeft = newHead.right\n if self.parent is not None:\n setAsRoot = False\n originally_on_left = self.parent.on_left(self)\n orig_parent = self.parent\n newRight.disconnect_from_parent()\n newHead.disconnect_from_parent()\n if newLeft is not None:\n newLeft.disconnect_from_parent()\n\n newRight.link_left(newLeft)\n newHead.link_right(newRight)\n if orig_parent is not None:\n if originally_on_left:\n orig_parent.link_left(newHead)\n else:\n orig_parent.link_right(newHead)\n return setAsRoot, newHead\n\n def rotate_left(self):\n setAsRoot = True\n orig_parent = None\n originally_on_left = False\n newHead = self.right\n newLeft = self\n newRight = newHead.left\n if self.parent is not None:\n setAsRoot = False\n originally_on_left = self.parent.on_left(self)\n orig_parent = self.parent\n newLeft.disconnect_from_parent()\n newHead.disconnect_from_parent()\n if newRight is not None:\n newRight.disconnect_from_parent()\n\n newLeft.link_right(newRight)\n newHead.link_left(newLeft)\n if orig_parent is not None:\n if originally_on_left:\n orig_parent.link_left(newHead)\n else:\n orig_parent.link_right(newHead)\n return setAsRoot, newHead\n \n \n \n \n \n #------------------------------\n # def Deprecated\n #------------------------------\n \n def get_predecessor(self):\n raise Exception(\"Deprecated: use getPredecessor\")\n\n def get_successor(self):\n raise Exception(\"Deprecated: use getSuccessor\")\n\n def compare_simple(self):\n raise Exception(\"Deprecated: use appropriate comparison function in rbtree\")\n\n def intersect(self):\n raise Exception(\"Deprecated: use appropriate method in value\")\n\n def update_arcs(self):\n raise Exception(\"Deprecated: use rbtree update_values with appropriate lambda\")\n\n def countBlackHeight_null_add(self):\n raise Exception(\"Deprecated\")\n\n def print_colour(self):\n raise Exception(\"Deprecated: check node.red \")\n\n def print_blackheight(self):\n raise Exception(\"Deprecated\")\n\n def print_tree(self):\n raise Exception(\"Deprecated\")\n\n def print_tree_plus(self):\n raise Exception(\"Deprecated\")\n\n def getMinValue(self):\n raise Exception(\"Deprecated\")\n\n def getMaxValue(self):\n raise Exception(\"Deprecated\")\n\n def getMin(self):\n raise Exception(\"Deprecated: use .min()\")\n\n def getMax(self):\n raise Exception(\"Deprecated: use .max()\")\n \n def disconnect_hierarchy(self):\n #return [self.disconnect_left(),self.disconnect_right()]\n raise Exception(\"Deprecated\")\n\n def disconnect_sequence(self):\n # self.disconnect_right()\n # self.disconnect_left()\n raise Exception(\"Deprecated\")\n","sub_path":"cairo_utils/rbtree/Node.py","file_name":"Node.py","file_ext":"py","file_size_in_byte":9749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"551229145","text":"\nimport numpy\nimport os\nimport sys\nimport pylmps\nimport molsys\n\nT=300.0\nP=10000 # lammps real units: [P] = atm\n\nrelax= [0.1,1.0] # thermostat, barostat\n\nnumpy.random.seed(1581002512)\n\n\nname = [x for x in os.listdir('.') if x.rsplit('.')[-1] == 'mfpx']\nif len(name) != 1:\n print('ERROR, multiple mfpx files found')\n exit()\nname = name[0].rsplit('.',1)[0]\n\nfilename = name +'.mfpx'\n\nm = molsys.mol.from_file(filename)\nm.addon('ff')\nm.ff.read(name)\n\npl = pylmps.pylmps(name)\npl.control['kspace'] = True\npl.setup(mol=m,kspace=True)\n\npl.MD_init('equil_NVT_ber',ensemble='nvt',\n T=[10,T],\n thermo='ber',\n relax = [0.01],\n tnstep=1000,\n startup=True,\n startup_seed=1581002512,\n )\npl.MD_run(100000)\n\npl.MD_init('equil_NVT',ensemble='nvt',\n T=T,\n thermo='hoover',\n relax = [relax[0]],\n tnstep=1000,\n startup=False,\n )\npl.MD_run(250000)\n\n\npl.MD_init('equil_npt',ensemble='npt',\n p=0.0,\n T=T,\n thermo='mttk',\n startup=False,\n relax=relax,\n tnstep=1000,\n mttk_volconstraint='yes',\n )\npl.MD_run(250000)\n\npl.MD_init('produ_pminus',ensemble='npt',\n T=T,\n p=[0.0,-P/2.0],\n thermo='mttk',\n startup=False,\n relax=relax,\n tnstep=500,\n mttk_volconstraint='no',\n )\npl.MD_run(5000000)\n\npl.MD_init('produ_pplus',ensemble='npt',\n T=T,\n p=[-P/2.0,0],\n thermo='mttk',\n startup=False,\n relax=relax,\n tnstep=500,\n mttk_volconstraint='no',\n )\npl.MD_run(5000000)\n\n\npl.MD_init('produ',ensemble='npt',\n T=T,\n p=[0.0,P],\n thermo='mttk',\n startup=False,\n relax=relax,\n tnstep=500,\n mttk_volconstraint='no',\n )\npl.MD_run(10000000)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#pl.lmps.command('fix 1 all mttknhc temp ${temperature} ${temperature} ${tdamp} tri ${pressure} ${pressure} ${pdamp} volconstraint yes')\n#pl.lmps.command('fix 1 all mttknhc temp %8.4f %8.4f %8.4f tri %12.6f %12.6f %12.6f volconstraint yes' % (T,T,Trelax,P,P,Prelax))\n#pl.lmps.command('fix_modify 1 energy yes') # Add thermo/baro contributions to \n\n#pl.lmps.command('compute thermo_temp2 all temp')\n#pl.lmps.command('compute thermo_press2 all pressure thermo_temp2')\n#pl.lmps.command('dump ptens_dump all local 10 ptens.dump index thermo_press')\n#pl.lmps.command('dump ptens_dump all custom 10 ptens.dump id pxx pyy pzz pxy pxz pyz')\n#pl.lmps.command('thermo_style custom step ecoul elong ebond eangle edihed eimp pe\\\n# ke etotal temp press vol cella cellb cellc cellalpha cellbeta cellgamma\\\n# pxx pyy pzz pxy pxz pyz')\n","sub_path":"90-Pallach-Keupp-NatCommun/NPT_simulations/Pramp_NPT.py","file_name":"Pramp_NPT.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"400432301","text":"from tkinter import *\nfrom tkinter import ttk\nimport mysql.connector\nfrom tkinter import messagebox\n\n\nclass orphans:\n\tdef __init__(self, root):\n\t\tself.root = root\n\t\tself.root.title(\"Orphanage management system\")\n\t\tself.root.geometry(\"1200x1200\")\n\n\t\tself.id_var = StringVar()\n\t\tself.name_var = StringVar()\n\t\tself.email_var = StringVar()\n\t\tself.gender_var = StringVar()\n\t\tself.dob_var = StringVar()\n\t\tself.contact_var = StringVar()\n\t\tself.search_by=StringVar()\n\t\tself.search_txt=StringVar()\n\n\t\ttitle = Label(self.root, text=\"ORPHANAGE MANAGEMENT SYSTEM\", font=(\"times new roman\", 30, \"bold\"), fg=\"black\",bg=\"RosyBrown1\", bd=4, relief=RAISED)\n\t\ttitle.pack(side=TOP, fill=X)\n\n\t\t# Manage frame\n\t\tManage_frame = Frame(self.root, bd=4, relief=RIDGE, bg=\"RosyBrown1\")\n\t\tManage_frame.place(x=20, y=80, width=500, height=580)\n\n\t\t# manage title\n\t\tM_title = Label(Manage_frame, text=\"MANAGE ORPHAN\", font=(\"times new roman\", 20, \"bold\"), fg=\"black\",bg=\"RosyBrown1\", bd=3, relief=RAISED)\n\t\tM_title.grid(row=0, column=0, padx=20, pady=20)\n\n\t\t# Next Button\n\t\tbackbtn4 = Button(self.root, text='Next', font=\"Arial 10 bold\", borderwidth=0, bg='grey56')\n\t\tbackbtn4.place(x=1150, y=10, width=80, height=30)\n\n\t\t# labels\n\t\tlbl_roll = Label(Manage_frame, text=\"Id\", font=(\"times new roman\", 20, \"bold\"), fg=\"black\", bg=\"RosyBrown1\")\n\t\tlbl_roll.grid(row=1, column=0, padx=20, pady=5, sticky=\"w\")\n\t\troll_entry = ttk.Entry(Manage_frame, textvariable=self.id_var, font=(\"times new roman\", 15, \"bold\"))\n\t\troll_entry.grid(row=1, column=0, padx=180, pady=5, sticky=\"w\")\n\n\t\tlbl_name = Label(Manage_frame, text=\"Name\", font=(\"times new roman\", 20, \"bold\"), fg=\"black\", bg=\"RosyBrown1\")\n\t\tlbl_name.grid(row=2, column=0, padx=20, pady=5, sticky=\"w\")\n\t\tname_entry = ttk.Entry(Manage_frame, textvariable=self.name_var, font=(\"times new roman\", 15, \"bold\"))\n\t\tname_entry.grid(row=2, column=0, padx=180, pady=5, sticky=\"w\")\n\n\t\tlbl_email = Label(Manage_frame, text=\"Email\", font=(\"times new roman\", 20, \"bold\"), fg=\"black\", bg=\"RosyBrown1\")\n\t\tlbl_email.grid(row=3, column=0, padx=20, pady=5, sticky=\"w\")\n\t\temail_entry = ttk.Entry(Manage_frame, textvariable=self.email_var, font=(\"times new roman\", 15, \"bold\"))\n\t\temail_entry.grid(row=3, column=0, padx=180, pady=5, sticky=\"w\")\n\n\t\tlbl_gender = Label(Manage_frame, text=\"Gender\", font=(\"times new roman\", 20, \"bold\"), fg=\"black\", bg=\"RosyBrown1\")\n\t\tlbl_gender.grid(row=4, column=0, padx=20, pady=5, sticky=\"w\")\n\t\tcombo_gender = ttk.Combobox(Manage_frame, textvariable=self.gender_var, width=18, font=(\"times new roman\", 15, \"bold\"), state=\"readonly\")\n\t\tcombo_gender[\"values\"] = (\"Male\", \"Female\", \"other\")\n\t\tcombo_gender.grid(row=4, column=0, padx=180, pady=5, sticky=\"w\")\n\n\t\tlbl_contact = Label(Manage_frame, text=\"Contact\", font=(\"times new roman\", 20, \"bold\"), fg=\"black\",bg=\"RosyBrown1\")\n\t\tlbl_contact.grid(row=5, column=0, padx=20, pady=5, sticky=\"w\")\n\t\tcontact_entry = ttk.Entry(Manage_frame, textvariable=self.contact_var, font=(\"times new roman\", 15, \"bold\"))\n\t\tcontact_entry.grid(row=5, column=0, padx=180, pady=5, sticky=\"w\")\n\n\t\tlbl_dob = Label(Manage_frame, text=\"DOB\", font=(\"times new roman\", 20, \"bold\"), fg=\"black\", bg=\"RosyBrown1\")\n\t\tlbl_dob.grid(row=6, column=0, padx=20, pady=5, sticky=\"w\")\n\t\tdob_entry = ttk.Entry(Manage_frame, textvariable=self.dob_var, font=(\"times new roman\", 15, \"bold\"))\n\t\tdob_entry.grid(row=6, column=0, padx=180, pady=5, sticky=\"w\")\n\n\t\tlbl_address = Label(Manage_frame, text=\"Address\", font=(\"times new roman\", 20, \"bold\"), fg=\"black\", bg=\"RosyBrown1\")\n\t\tlbl_address.grid(row=7, column=0, padx=20, pady=5, sticky=\"w\")\n\t\tself.address_text = Text(Manage_frame, width=20, height=3, font=(\"times new roman\", 15, \"bold\"))\n\t\tself.address_text.grid(row=7, column=0, padx=180, pady=5, sticky=\"w\")\n\n\t\t# Buttons Frame\n\t\tbtn_frame = Frame(self.root, bd=4, relief=RIDGE, bg=\"RosyBrown1\")\n\t\tbtn_frame.place(x=40, y=560, width=450)\n\n\t\tadd_btn = Button(btn_frame, text=\"ADD\", command=self.add_orphans, width=11, height=2, fg=\"black\", bg=\"seashell\")\n\t\tadd_btn.grid(row=0, column=0, padx=10, pady=10)\n\n\t\tupdate_btn = Button(btn_frame, text=\"UPDATE\", command=self.update_orphans, width=11, height=2, fg=\"black\", bg=\"seashell\")\n\t\tupdate_btn.grid(row=0, column=1, padx=10, pady=10)\n\n\t\tdelete_btn = Button(btn_frame, text=\"DELETE\", command=self.delete_orphans, width=11, height=2, fg=\"black\", bg=\"seashell\")\n\t\tdelete_btn.grid(row=0, column=2, padx=10, pady=10)\n\n\t\tclear_btn = Button(btn_frame, text=\"CLEAR\", command=self.clear, width=11, height=2, fg=\"black\", bg=\"seashell\")\n\t\tclear_btn.grid(row=0, column=3, padx=10, pady=10)\n\n\t\t# Details Frame\n\t\tDetails_frame = Frame(self.root, bd=4, relief=RIDGE, bg=\"RosyBrown1\")\n\t\tDetails_frame.place(x=540, y=80, width=710, height=580)\n\n\t\tsearch_lbl = Label(Details_frame, text=\"Search By\", font=(\"times new roman\", 18, \"bold\"), fg=\"black\", bg=\"RosyBrown1\")\n\t\tsearch_lbl.grid(row=1, column=0, padx=5, pady=10, sticky=\"w\")\n\n\t\tsearch_combo = ttk.Combobox(Details_frame, width=16, font=(\"times new roman\", 13, \"bold\"), state=\"readonly\")\n\t\tsearch_combo[\"values\"] = (\"Select option\", \"Id\", \"Name\", \"Contact\")\n\t\tsearch_combo.grid(row=1, column=1, padx=5, pady=10, sticky=\"w\")\n\t\tsearch_combo.current(0)\n\n\t\tsearch_entry = ttk.Entry(Details_frame, font=(\"times new roman\", 13, \"bold\"))\n\t\tsearch_entry.grid(row=1, column=2, padx=5, pady=10, sticky=\"w\")\n\n\t\tsearch_btn = Button(Details_frame, command=self.search_orphans, text=\"Search\", width=8, height=2, fg=\"black\", bg=\"seashell\")\n\t\tsearch_btn.grid(row=1, column=3, padx=10, pady=10)\n\n\t\tshowall_btn = Button(Details_frame, text=\"Show All\", width=8, height=2, fg=\"black\", bg=\"seashell\")\n\t\tshowall_btn.grid(row=1, column=4, padx=10, pady=10)\n\n\t\t# Table Frame\n\n\t\tTable_frame = Frame(Details_frame, bd=4, relief=RIDGE, bg=\"RosyBrown1\")\n\t\tTable_frame.place(x=10, y=70, width=680, height=490)\n\n\t\tscroll_x = ttk.Scrollbar(Table_frame, orient=HORIZONTAL)\n\t\tscroll_y = ttk.Scrollbar(Table_frame, orient=VERTICAL)\n\n\t\tself.orphans_table = ttk.Treeview(Table_frame, column=(\"id\", \"name\", \"email\", \"dob\", \"gender\", \"contact\", \"address\"), xscrollcommand=scroll_x.set, yscrollcommand=scroll_y.set)\n\t\tscroll_x.pack(side=BOTTOM, fill=X)\n\t\tscroll_y.pack(side=RIGHT, fill=Y)\n\n\t\tscroll_x.config(command=self.orphans_table.xview)\n\t\tscroll_y.config(command=self.orphans_table.yview)\n\n\t\tself.orphans_table.heading(\"id\", text=\"Id\")\n\t\tself.orphans_table.heading(\"name\", text=\"Name\")\n\t\tself.orphans_table.heading(\"email\", text=\"Email\")\n\t\tself.orphans_table.heading(\"dob\", text=\"DOB\")\n\t\tself.orphans_table.heading(\"gender\", text=\"Gender\")\n\t\tself.orphans_table.heading(\"contact\", text=\"Contact\")\n\t\tself.orphans_table.heading(\"address\", text=\"Address\")\n\n\t\tself.orphans_table[\"show\"] = \"headings\"\n\t\tself.orphans_table.column(\"id\", width=90)\n\t\tself.orphans_table.column(\"name\", width=130)\n\t\tself.orphans_table.column(\"email\", width=130)\n\t\tself.orphans_table.column(\"dob\", width=100)\n\t\tself.orphans_table.column(\"gender\", width=100)\n\t\tself.orphans_table.column(\"contact\", width=100)\n\t\tself.orphans_table.column(\"address\", width=180)\n\t\tself.orphans_table.pack(fill=BOTH, expand=1)\n\n\t\tself.orphans_table.bind(\"\",self.get_cursor)\n\n\t\tself.fetch_data()\n\n\tdef add_orphans(self):\n\n\t\tconn = mysql.connector.connect(host=\"localhost\", username=\"root\", password=\"Loveurlife762\", database=\"orphanage\")\n\n\t\tmy_cursor = conn.cursor()\n\t\tmy_cursor.execute(\"insert into orphans values(%s,%s,%s,%s,%s,%s,%s)\", (\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.id_var.get(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.name_var.get(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.email_var.get(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.gender_var.get(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.dob_var.get(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.contact_var.get(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.address_text.get(\"1.0\", END)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t))\n\n\t\tconn.commit()\n\t\tself.fetch_data()\n\t\tconn.close()\n\n\tdef fetch_data(self):\n\t\tconn = mysql.connector.connect(host=\"localhost\", username=\"root\", password=\"Loveurlife762\", database=\"orphanage\")\n\t\tmy_cursor = conn.cursor()\n\t\tmy_cursor.execute(\"select * from orphans\")\n\t\trows = my_cursor.fetchall()\n\t\tif len(rows) != 0:\n\t\t\tself.orphans_table.delete(*self.orphans_table.get_children())\n\t\t\tfor i in rows:\n\t\t\t\tself.orphans_table.insert(\"\", END, values=i)\n\t\t\tconn.commit()\n\t\t\tconn.close()\n\n\tdef clear(self):\n\t\tself.id_var.set(\"\")\n\t\tself.name_var.set(\"\")\n\t\tself.email_var.set(\"\")\n\t\tself.gender_var.set(\"\")\n\t\tself.dob_var.set(\"\")\n\t\tself.contact_var.set(\"\")\n\t\tself.address_text.delete(\"1.0\", END)\n\n\tdef get_cursor(self,event=\"\"):\n\t\tcursor_row=self.orphans_table.focus()\n\t\tcontent=self.orphans_table.item(cursor_row)\n\t\trow=content[\"values\"]\n\t\tself.id_var.set(row[0])\n\t\tself.name_var.set(row[1])\n\t\tself.email_var.set(row[2])\n\t\tself.gender_var.set(row[3])\n\t\tself.dob_var.set(row[4])\n\t\tself.contact_var.set(row[5])\n\t\tself.address_text.delete(\"1.0\", END)\n\t\tself.address_text.insert(END,row[6])\n\n\tdef update_orphans(self):\n\t\tconn = mysql.connector.connect(host=\"localhost\", username=\"root\", password=\"Loveurlife762\", database=\"orphanage\")\n\t\tmy_cursor = conn.cursor()\n\t\tmy_cursor.execute(\"update orphans set name=%s,email=%s,gender=%s,dob=%s,contact=%s,address=%s where id=%s\", (\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.name_var.get(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.email_var.get(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.gender_var.get(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.dob_var.get(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.contact_var.get(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.address_text.get(\"1.0\", END),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.id_var.get()\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t))\n\n\t\tconn.commit()\n\t\tself.fetch_data()\n\t\tself.clear()\n\t\tconn.close()\n\t\tmessagebox.showinfo(\"Update\",\"Record has been updated successfully!\")\n\n\tdef delete_orphans(self):\n\t\tconn = mysql.connector.connect(host=\"localhost\", username=\"root\", password=\"Loveurlife762\", database=\"orphanage\")\n\t\tmy_cursor = conn.cursor()\n\t\tquery=\"delete from orphans where id=%s\"\n\t\tvalue=(self.id_var.get(),)\n\t\tmy_cursor.execute(query,value)\n\n\t\tconn.commit()\n\t\tconn.close()\n\t\tself.fetch_data()\n\t\tself.clear()\n\n\t\tmessagebox.showinfo(\"Delete\",\"Record has been deleted successfully\")\n\n\tdef search_orphans(self):\n\t\tconn = mysql.connector.connect(host=\"localhost\", username=\"root\", password=\"Loveurlife762\", database=\"orphanage\")\n\t\tmy_cursor = conn.cursor()\n\t\tmy_cursor.execute(\"select * from orphans where \"+str(self.search_by.get())+\"LIKE\"+str(self.search_txt.get())+\"%\")\n\t\trows = my_cursor.fetchall()\n\n\t\tif len(rows)!=0:\n\t\t\tself.orphans_table.delete(*self.orphans_table.get_children())\n\t\t\tfor i in rows:\n\t\t\t\tself.orphans_table.insert(\"\",END,values=i)\n\t\t\tconn.commit()\n\t\tconn.close()\n\n\t\tbackbtn1 = Button(self.root, text='Next', font=(\"times new roman\", 15, \"bold\"), borderwidth=0, bg='grey56')\n\t\tbackbtn1.place(x=1150, y=25, width=80, height=30)\n\n\t\tprebtn1 = Button(self.root, text='Previous', font=(\"times new roman\", 15, \"bold\"), borderwidth=0, bg='grey56')\n\t\tprebtn1.place(x=40, y=25, width=80, height=30)\n\n\nroot = Tk()\nob = orphans(root)\nroot.mainloop()\n","sub_path":"orphan.py","file_name":"orphan.py","file_ext":"py","file_size_in_byte":10899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"121292441","text":"from websocket import create_connection, ABNF\nimport requests\n\nprint(\"Sending test binary to websocket\")\nws = create_connection(\"ws://localhost:8080/socket\")\nws.send(\"filecontent\", ABNF.OPCODE_BINARY)\nresult = ws.recv()\nprint(\"Received response '%s'\" % result)\nws.close()\n\nprint(\"Getting info of the last modified file\")\nr = requests.get('http://localhost:8080/last-file-info')\nprint(\"Response: %s\" % r.content)","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"539330761","text":"import pandas as pd\nimport json\n\ndef average_emotion(target_tweet_analysis, target_emotion):\n\n number_keywords = len(target_tweet_analysis[\"tweet_analysis\"][\"keywords\"])\n total_emotion = 0.0\n for keyword_index in range(number_keywords):\n target_keyword = target_tweet_analysis[\"tweet_analysis\"][\"keywords\"][keyword_index]\n try:\n total_emotion += target_keyword[\"emotion\"][target_emotion]*target_keyword[\"relevance\"]\n except KeyError:\n total_emotion += 0\n\n return total_emotion\n\n\ndef average_sentiment(target_tweet_analysis):\n\n number_keywords = len(target_tweet_analysis[\"tweet_analysis\"][\"keywords\"])\n total_sentiment = 0.0\n for keyword_index in range(number_keywords):\n target_keyword = target_tweet_analysis[\"tweet_analysis\"][\"keywords\"][keyword_index]\n try:\n total_sentiment += target_keyword[\"sentiment\"][\"score\"]*target_keyword[\"relevance\"]\n except KeyError:\n total_sentiment += 0\n\n return total_sentiment\n\n#If the input file is a JSON or a csv file.\n#One of them (only one) must me True\n\nJSON = False\nCSV = True\nSEP = '|' #separator for the csv file\nFORMATTED_INPUT_FILE = '10_tweets_formatted_tweets.csv'\nSENTIMENT_FILE = 'response.json'\nOUTPUT_FILE = \"tweets_dataset.csv\"\n\n\nif JSON:\n with open('formatted_tweets.json') as json_data:\n formatted_tweets = json.load(json_data)\n\nelif CSV:\n formatted_tweets = pd.read_csv(FORMATTED_INPUT_FILE, sep=SEP)\n\nelse:\n raise Exception(\"Both JSON and CSV are false\")\n\n\nwith open(SENTIMENT_FILE) as json_data:\n tweets_analysis = json.load(json_data)\n\ntweets_csv = open(OUTPUT_FILE, \"w\")\ntweets_csv.write(\"tweet_id,fake,joy,sadness,anger,fear,disgust,sentiment\\n\")\n\ntweet_formatted_index = 0\n\nfor tweet_analysis_index in range(len(tweets_analysis[\"tweets\"])):\n\n #print(tweet_analysis_index, len(tweets_analysis[\"tweets\"]))\n while True:\n if JSON:\n target_tweet_formatted_id = formatted_tweets[\"tweets\"][tweet_formatted_index][\"id\"]\n elif CSV:\n target_tweet_formatted_id = formatted_tweets[\"id\"][tweet_formatted_index]\n\n target_tweet_analysis_id = tweets_analysis[\"tweets\"][tweet_analysis_index][\"tweet_id\"]\n if str(target_tweet_formatted_id) == str(target_tweet_analysis_id):\n break\n else:\n tweet_formatted_index += 1\n\n target_tweet_analysis = tweets_analysis[\"tweets\"][tweet_analysis_index]\n\n tweets_csv.write(str(target_tweet_analysis[\"tweet_id\"]))\n tweets_csv.write(\",\")\n\n if JSON:\n tweets_csv.write(str(formatted_tweets[\"tweets\"][tweet_formatted_index][\"is_fake\"]))\n\n elif CSV:\n tweets_csv.write(str(formatted_tweets.iloc[tweet_formatted_index][\"is_fake\"]))\n\n tweets_csv.write(\",\")\n\n total_joy = average_emotion(target_tweet_analysis, \"joy\")\n tweets_csv.write(str(total_joy))\n tweets_csv.write(\",\")\n\n total_sadness = average_emotion(target_tweet_analysis, \"sadness\")\n tweets_csv.write(str(total_sadness))\n tweets_csv.write(\",\")\n\n total_anger = average_emotion(target_tweet_analysis, \"anger\")\n tweets_csv.write(str(total_anger))\n tweets_csv.write(\",\")\n\n total_fear = average_emotion(target_tweet_analysis, \"fear\")\n tweets_csv.write(str(total_fear))\n tweets_csv.write(\",\")\n\n total_disgust = average_emotion(target_tweet_analysis, \"disgust\")\n tweets_csv.write(str(total_disgust))\n tweets_csv.write(\",\")\n\n total_sentiment = average_sentiment(target_tweet_analysis)\n tweets_csv.write(str(total_sentiment))\n tweets_csv.write(\"\\n\")\n\n tweet_formatted_index += 1\n\ntweets_csv.close()\n","sub_path":"backend/features_builder.py","file_name":"features_builder.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"115242957","text":"from pyrogram import Client, Filters\nimport time\nimport re\nimport os\nimport chromeprinter\n\na = chromeprinter.Client()\n\n@Client.on_message(Filters.command(\"print\", prefixes = ['!','/']))\ndef print(client, message):\n url = message.text.split(' ',1)[1]\n ctime = time.time()\n if re.match(r'^[a-z]+://', url):\n url = url\n else:\n url = 'http://'+url\n a.make_screenshot(url,f'{ctime}.png')\n client.send_photo(message.chat.id,f\"{ctime}.png\")\n os.remove(f'{ctime}.png')","sub_path":"plugins/print.py","file_name":"print.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"300276157","text":"from flask import Blueprint\nfrom flask_restful import Api\n\nfrom amodys.api.resources import *\n\n\nblueprint = Blueprint('api', __name__)\napi = Api(blueprint)\n\n#@blueprint.after_request # HACK: uncomment if needed\ndef after_request(resp):\n # HACK: Try to fix encoding\n resp.headers['Content-Type'] = 'application/json; charset=utf-8'\n resp.data = resp.data.decode('utf-8').encode('utf-8')\n print(resp.headers)\n return resp\n\n\napi.add_resource(OrganizationList, '/organizations', endpoint='organizationlist')\napi.add_resource(OrganizationResource, '/', '/organizations/', endpoint='organizationresource')\n\napi.add_resource(UserList, '/users', endpoint='userlist')\napi.add_resource(UserResource, '/users/me', '/users/', endpoint='userresource')\n#api.add_resource(UserCurrentResource, '/users/me')\n\napi.add_resource(MemberList, '/members', endpoint='memberlist')\napi.add_resource(MemberResource, '/members/', endpoint='memberresource')\n#api.add_resource(MemberList, '/organizations//members', endpoint='organizationresource.memberlist')\n#api.add_resource(MemberResource, '/organizations//members/', endpoint='organizationresource.memberresource')\n\napi.add_resource(TeamList, '/teams', endpoint='teamlist')\napi.add_resource(TeamResource, '/teams/', endpoint='teamresource')\n\napi.add_resource(ItemList, '/items', endpoint='itemlist')\napi.add_resource(ItemResource, '/items/', endpoint='itemresource')\n\napi.add_resource(EventList, '/events', endpoint='eventlist')\napi.add_resource(EventResource, '/events/', endpoint='eventresource')\n\napi.add_resource(RoleList, '/roles', endpoint='rolelist')\napi.add_resource(RoleResource, '/roles/', endpoint='roleresource')\n","sub_path":"amodys/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"147707860","text":"import pymongo\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n\"\"\"\nruns conceded by teams:\nThis is an interactive Visualization Script which produces the line graphs of runs conceded by teams \nIt Takes follwing Input:\n 1) Overs Range(start and stop)\n 2) season range(start and stop)\n 3) Innings (1st or 2nd)\n\"\"\"\n\ndef main():\n myClient = pymongo.MongoClient(\"mongodb://localhost:27017\")\n myDb= myClient['IPL']\n RunsScored = myDb['runs_scored']\n\n\n start=int(input('enter the starting over(min 1, max 20) :'))\n stop=int(input('enter the ending over (should be greater than the starting over max 20) :'))\n season_start=int(input('enter the season between [2008---2019] :'))\n season_stop=int(input('enter the ending over (should be greater than the starting over max 2019) :'))\n innings=int(input('enter innings [1,2] :' ))\n\n avg_overs={'_id':'$team'}\n \n query=[\n {'$match':{\n 'season':{'$gte':season_start,'$lte':season_stop},\n 'over':{'$gte':start,'$lte':stop},\n 'innings':innings\n }\n },\n {'$group':{\n '_id': {'team':\"$bowling_team\",'over':\"$over\"},\n 'averageRunsOver': {\n '$avg': \"$runs\"\n }\n }\n },\n {'$sort':{\n \"_id.over\": 1\n }\n\n }\n ]\n\n runs_by_team=list(RunsScored.aggregate(query))\n overs=[i for i in range(start,stop+1)]\n df_dict={'overs':overs}\n\n \n for team in runs_by_team:\n try:\n df_dict[team['_id']['team']].append(team['averageRunsOver'])\n except:\n df_dict[team['_id']['team']]=[team['averageRunsOver'],]\n \n\n \n \n df=pd.DataFrame.from_dict(df_dict)\n print(df)\n\n\n teams=list(df.columns)\n teams.remove('overs')\n\n if innings==1:\n inng='1st'\n else:\n inng='2nd'\n\n ax=df.plot.line(x='overs',y=teams,\\\n title='Average runs given by teams bowling {} between the overs {} and {} in season {}-{} '.format(inng,start,stop,season_start,season_stop))\n ax.set_ylabel(\"average_runs\")\n plt.show()\n \n \n \n \n\nif __name__ =='__main__':\n main()","sub_path":"src/analysis/Visualization/runs_given.py","file_name":"runs_given.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"411544639","text":"from pyrecord import Record\nimport random\nimport numpy as np\n\n# Define el registro como tipo de datos\nNota = Record.create_type(\"Nota\",\"alumno\",\"nota1\",\"nota2\", \n \"nota3\",alumno = '',nota1 = 0,nota2 = 0,nota3 = 0)\n\n\ndef cargar(vector,archivo):\n archivo = open(archivo, 'r')\n contador_de_lineas = 0\n for linea in archivo:\n vector[contador_de_lineas] = Nota()\n carga_aleatoria(vector[contador_de_lineas],linea.rstrip('\\n'))\n contador_de_lineas += 1\n archivo.close\n return contador_de_lineas,vector\n\n\n\ndef carga_aleatoria(registro, nombre):\n registro.alumno = nombre\n registro.nota1 = random.randint(1,10)\n registro.nota3 = random.randint(1,10)\n registro.nota2 = random.randint(1,10)\n\ndef mostrar_registro(registro):\n print(\"Nombre : \",registro.alumno, \" | Nota 1 : \",registro.nota1,\" | Nota 2 : \",registro.nota2,\" | Nota 3 : \",registro.nota3, \" |\")\n\ndef mostrar_registro_2(registro,valor_adicional):\n print(\"Nombre : \",registro.alumno, \" | Nota 1 : \",registro.nota1,\" | Nota 2 : \",registro.nota2,\" | Nota 3 : \",registro.nota3, \" | \",valor_adicional, \" | \")\n\n\ndef mostrar_en_rango(vector,minimo,maximo,elementos,etiqueta):\n print(\" ------ Los que \",etiqueta,\" son ------------------------------\")\n for n in range(elementos):\n promedio = (vector[n].nota1 + vector[n].nota2 + vector[n].nota3 ) / 3\n if (promedio >= minimo and promedio < maximo):\n mostrar_registro_2(vector[n],promedio)\n\ndef main():\n cantidad_maxima = 100\n cant_real = 0 \n\n # Define un vector de 100 elementos de tipo Nota pero vacio\n notas = np.empty([cantidad_maxima,], dtype=Nota)\n # Pedir que se cargue el vector con los datos del archivo\n cant_real,notas = cargar(notas,\"alumnos.txt\")\n # Mostrar el vector\n for i in range(cant_real):\n mostrar_registro(notas[i])\n\n # Calcular Quines quedaron Integrando\n mostrar_en_rango(notas,6,11,cant_real,\"integran\")\n\n # Calcular Quines quedaron Integrando\n #mostrar_regular(notas,cant_real)\n mostrar_en_rango(notas,4,6,cant_real,\"quedan regulares\")\n\n # Calcular Quines quedaron Integrando\n #mostrar_libres(notas,cant_real)\n mostrar_en_rango(notas,1,4,cant_real,\"quedan libres\")\n \n #dato_tipo_nota = Nota()\n #carga_aleatoria(dato_tipo_nota,\"Juan Perez\")\n #mostrar_registro(dato_tipo_nota)\n\n\nmain()\n\n\n\n\n\n","sub_path":"prueba_registro_uno.py","file_name":"prueba_registro_uno.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"411020535","text":"from sqlalchemy import create_engine\nimport pandas as pd\nimport statsmodels.api as sm\nfrom scipy.optimize import curve_fit\nimport numpy as np\nfrom pyecharts.charts import *\nfrom pyecharts import options as opts\n\ndef UsaZhexian():\n # 初始化数据库连接,使用pymysql模块\n engine = create_engine(\"mysql+pymysql://{}:{}@{}/{}?charset={}\".format('root', 'welcome2019', '127.0.0.1:3306', 'datascience','utf8mb4'))\n\n # 查询语句,选出employee表中的所有数据\n sql = '''\n SELECT distinct thedate,curedCount,deadCount,confirmedCount FROM world\n WHERE provinceName=\"美国\"\n '''\n # read_sql_query的两个参数: sql语句, 数据库连接\n USA_data = pd.read_sql_query(sql, engine)\n\n # 输出employee表的查询结果\n Y = USA_data['confirmedCount']\n Y_dead = USA_data['deadCount']\n x = [i + 100 for i in range(1, 62)]\n\n Y = USA_data['confirmedCount']\n x = [i + 100 for i in range(1, 62)]\n\n def logistic(t, K, P0, r): # 定义logistic函数\n exp_value = np.exp(r * (t))\n return (K * exp_value * P0) / (K + (exp_value - 1) * P0)\n\n coef, pcov = curve_fit(logistic, x, Y) # 拟合\n print(coef) # logistic函数参数\n y_values = [logistic(i, coef[0], coef[1], coef[2]) for i in range(101, 301)]\n xx = [i for i in range(1, 62)]\n Y_dead = USA_data['deadCount']\n\n res_dead = np.polyfit(xx, Y_dead, 3, rcond=None, full=False, w=None, cov=False)\n A, B, C, D = res_dead[0], res_dead[1], res_dead[2], res_dead[3]\n Y_dead_prediction_3 = []\n for number in range(1, 200):\n Y_dead_prediction_3.append(A * number * number * number + B * number * number + C * number + D)\n time = []\n for t in range(1, 31):\n time.append(\"2020-04-{}\".format(t))\n for t in range(1, 32):\n time.append(\"2020-05-{}\".format(t))\n for t in range(1, 31):\n time.append(\"2020-06-{}\".format(t))\n for t in range(1, 32):\n time.append(\"2020-07-{}\".format(t))\n # for t in range(1,32):\n # time.append(\"2020-08-{}\".format(t))\n # for t in range(1,31):\n # time.append(\"2020-09-{}\".format(t))\n # for t in range(1,32):\n # time.append(\"2020-10-{}\".format(t))\n # for t in range(1,31):\n # time.append(\"2020-11-{}\".format(t))\n # for t in range(1,32):\n # time.append(\"2020-12-{}\".format(t))\n\n x_data = list(USA_data['thedate'])\n # 累计确诊\n y1_data = list(USA_data['confirmedCount'])\n # 累计治愈\n y2_data = list(USA_data['curedCount'])\n # 累计死亡\n y3_data = list(USA_data['deadCount'])\n\n line = (Line()\n .add_xaxis(time)\n .add_yaxis('累计确诊', y1_data, color='#10aeb5')\n .add_yaxis('累计治愈', y2_data, color='#e83132')\n .add_yaxis('累计死亡', y3_data, color='#000000')\n .add_yaxis('预测确诊', y_values, color='#eb97dc')\n .add_yaxis('预测死亡', Y_dead_prediction_3, color='#ab12ba')\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(\n title_opts=opts.TitleOpts(title='美国')\n ))\n\n return line","sub_path":"usaZhexian.py","file_name":"usaZhexian.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"458965756","text":"from flask import Flask,render_template, request, redirect, url_for\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:12345678@127.0.0.1:5432/todoapp'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False #for warning\ndb = SQLAlchemy(app)\n\n\nclass Todo(db.Model):\n __tablename__ = 'todos'\n id = db.Column(db.Integer, primary_key = True)\n description = db.Column(db.String(), nullable = False)\n \n def __repr__(self):\n return f\"\"\n\ndb.create_all()\n@app.route('/todos/create', methods = ['POST'])\ndef create_todo():\n description = request.form.get('description','') # an empty string if nothing comes in\n todo = Todo(description=description)\n db.session.add(todo)\n db.session.commit()\n return redirect(url_for('index')) # our index route\n@app.route('/')\ndef index():\n return render_template('index.html', data = Todo.query.all())\nif __name__ == '__main__':\n app.run(debug=True)\n\n","sub_path":"Full-Stack-Developement/todoapp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"522328440","text":"import os\n\n\ndef path_log(path):\n def log_func(old_func):\n def new_func(*args, **kwargs):\n from datetime import datetime\n call_time = datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\")\n result = old_func(*args, **kwargs)\n if result is None:\n result = 'Функция ничего не возвращает'\n with open(f'{path}/file.log', 'a', encoding='utf-8') as file:\n try:\n file.write(f'Время вызова функции: {call_time}\\n'\n f'Имя функции: {old_func.__name__}\\n'\n f'Аргументы функции: {args}_{kwargs}\\n'\n f'Возвращаемое значение: {result}\\n'\n f'\\n')\n except Exception as er:\n file.write(f'Время вызова функции: {call_time}\\n'\n f'Имя функции: {old_func.__name__}\\n'\n f'Аргументы функции: {args}_{kwargs}\\n'\n f'Возвращаемая ошибка: {er}\\n'\n f'\\n')\n return result\n\n return new_func\n\n return log_func\n\nclass Contact:\n def __init__(self, name, surname, number_phone, *args, favorite_contact=False, **kwargs):\n self.name = name\n self.surname = surname\n self.number_phone = number_phone\n self.favorite_contact = favorite_contact\n\n if not self.favorite_contact:\n self.favorite_contact = 'нет'\n else:\n self.favorite_contact = 'да'\n\n self.info = ''\n for item in args:\n self.info += f'{\"\":5}{item}\\n'\n for key, value in kwargs.items():\n self.info = self.info + f'{\"\":5}{key} : {value}\\n'\n\n def __str__(self):\n str_print = f'Имя: {self.name}\\n' \\\n f'Фамилия: {self.surname}\\n' \\\n f'Телефон: {self.number_phone}\\n' \\\n f'В избранных: {self.favorite_contact}\\n' \\\n f'Дополнительная информация:\\n' \\\n f'{self.info}'\n return str_print\n\n\nclass PhoneBook:\n\n def __init__(self, name):\n self.name = name\n self.data = []\n\n def add_contact(self, contact):\n self.data.append(contact)\n\n def get_contact(self):\n for item in self.data:\n print(item)\n\n def del_contact(self, number_phone):\n for item in self.data:\n if number_phone == item.number_phone:\n self.data.remove(item)\n\n def get_favorite_contact(self):\n for item in self.data:\n if item.favorite_contact == 'да':\n print(item.number_phone)\n\n @path_log(os.getcwd())\n def get_search_contact(self, name, surname):\n for item in self.data:\n if name == item.name and surname == item.surname:\n print(item)\n\n\nif __name__ == '__main__':\n john = Contact('John', 'Smith', '+71234567809', telegram='@jhony', email='jhony@smith.com')\n peter = Contact('Peter', 'Wills', '+71676767809', favorite_contact=True, telegram='@peter', email='peter@wills.com')\n mary = Contact('Mary', 'Dick', '+79844489944', favorite_contact=True, telegram='@mary', email='mary@dick.com')\n\n book = PhoneBook('My book')\n book.add_contact(john)\n book.add_contact(peter)\n book.add_contact(mary)\n # book.get_contact()\n # book.del_contact('+71676767809')\n # book.get_contact()\n # book.get_favorite_contact()\n book.get_search_contact('Peter', 'Wills')\n","sub_path":"netology_advanced_python_tasks/4_decorators/task/task_decor_param_for_func.py","file_name":"task_decor_param_for_func.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"54816610","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/danyvohl/code/shwirl/extern/vispy/app/timer.py\n# Compiled at: 2017-04-05 22:12:59\n# Size of source mod 2**32: 5780 bytes\nfrom __future__ import division\nfrom ..util.event import Event, EmitterGroup\nfrom ..util.ptime import time as precision_time\nfrom ..ext.six import string_types\nfrom .base import BaseTimerBackend as TimerBackend\nfrom . import use_app, Application\n\nclass Timer(object):\n __doc__ = \"Timer used to schedule events in the future or on a repeating schedule\\n\\n Parameters\\n ----------\\n interval : float | 'auto'\\n Time between events in seconds. The default is 'auto', which\\n attempts to find the interval that matches the refresh rate of\\n the current monitor. Currently this is simply 1/60.\\n connect : function | None\\n The function to call.\\n iterations : int\\n Number of iterations. Can be -1 for infinite.\\n start : bool\\n Whether to start the timer.\\n app : instance of vispy.app.Application\\n The application to attach the timer to.\\n \"\n\n def __init__(self, interval='auto', connect=None, iterations=-1, start=False, app=None):\n self.events = EmitterGroup(source=self, start=Event, stop=Event, timeout=Event)\n if app is None:\n self._app = use_app(call_reuse=False)\n else:\n if isinstance(app, Application):\n self._app = app\n else:\n if isinstance(app, string_types):\n self._app = Application(app)\n else:\n raise ValueError('Invalid value for app %r' % app)\n self._app.native\n self._backend = self._app.backend_module.TimerBackend(self)\n if interval == 'auto':\n interval = 0.016666666666666666\n self._interval = float(interval)\n self._running = False\n self._first_emit_time = None\n self._last_emit_time = None\n self.iter_count = 0\n self.max_iterations = iterations\n if connect is not None:\n self.connect(connect)\n if start:\n self.start()\n\n @property\n def app(self):\n \"\"\" The vispy Application instance on which this Timer is based.\n \"\"\"\n return self._app\n\n @property\n def interval(self):\n return self._interval\n\n @interval.setter\n def interval(self, val):\n self._interval = val\n if self.running:\n self.stop()\n self.start()\n\n @property\n def elapsed(self):\n return precision_time() - self._first_emit_time\n\n @property\n def running(self):\n return self._running\n\n def start(self, interval=None, iterations=None):\n \"\"\"Start the timer.\n\n A timeout event will be generated every *interval* seconds.\n If *interval* is None, then self.interval will be used.\n\n If *iterations* is specified, the timer will stop after\n emitting that number of events. If unspecified, then\n the previous value of self.iterations will be used. If the value is\n negative, then the timer will continue running until stop() is called.\n\n If the timer is already running when this function is called, nothing\n happens (timer continues running as it did previously, without\n changing the interval, number of iterations, or emitting a timer\n start event).\n \"\"\"\n if self.running:\n return\n self.iter_count = 0\n if interval is not None:\n self.interval = interval\n if iterations is not None:\n self.max_iterations = iterations\n self._backend._vispy_start(self.interval)\n self._running = True\n self._first_emit_time = precision_time()\n self._last_emit_time = precision_time()\n self.events.start(type='timer_start')\n\n def stop(self):\n \"\"\"Stop the timer.\"\"\"\n self._backend._vispy_stop()\n self._running = False\n self.events.stop(type='timer_stop')\n\n @property\n def native(self):\n \"\"\" The native timer on which this Timer is based.\n \"\"\"\n return self._backend._vispy_get_native_timer()\n\n def _timeout(self, *args):\n if not self.running:\n return\n if self.max_iterations >= 0 and self.iter_count >= self.max_iterations:\n self.stop()\n return\n now = precision_time()\n dt = now - self._last_emit_time\n elapsed = now - self._first_emit_time\n self._last_emit_time = now\n self.events.timeout(type='timer_timeout', iteration=self.iter_count, elapsed=elapsed, dt=dt, count=self.iter_count)\n self.iter_count += 1\n\n def connect(self, callback):\n \"\"\" Alias for self.events.timeout.connect() \"\"\"\n return self.events.timeout.connect(callback)\n\n def disconnect(self, callback=None):\n \"\"\" Alias for self.events.timeout.disconnect() \"\"\"\n return self.events.timeout.disconnect(callback)","sub_path":"pycfiles/shwirl-0.1.14.tar/timer.cpython-35.py","file_name":"timer.cpython-35.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"141940806","text":"#!/usr/bin/env python3\n\n# INSTAGRAM DOWNLOADER\n# 2021 (c) Micha Johannes Birklbauer\n# https://github.com/michabirklbauer/\n# micha.birklbauer@gmail.com\n\nimport urllib.request as ur\nimport json\nimport sys\nimport os\n\ndef get_image(json_data, insta_url, prefix=\"\"):\n\tdimensions_h = int(json_data[\"dimensions\"][\"height\"])\n\tdimensions_w = int(json_data[\"dimensions\"][\"width\"])\n\tdisplay_resources = json_data[\"display_resources\"]\n\timage_src = \"\"\n\timage_display = \"\"\n\tfor resource in display_resources:\n\t\tif int(resource[\"config_height\"])==dimensions_h and int(resource[\"config_width\"])==dimensions_w:\n\t\t\timage_src = str(resource[\"src\"])\n\t\t\tbreak\n\ttry:\n\t\timage_display = str(json_data[\"display_url\"])\n\texcept:\n\t\tprint(\"Error: Failed to extract image from display_url!\")\n\t\timage_display = \"\"\n\tif image_display == image_src:\n\t\timage = image_display\n\telse:\n\t\tif image_src == \"\" and image_display != \"\":\n\t\t\tprint(\"Warning: image_src and image_display not the same!\")\n\t\t\tprint(\"image_src is NULL\")\n\t\t\tprint(\"Getting display_url!\")\n\t\t\timage = image_display\n\t\telif image_src != \"\" and image_display == \"\":\n\t\t\tprint(\"Warning: image_src and image_display not the same!\")\n\t\t\tprint(\"image_display is NULL\")\n\t\t\tprint(\"Getting display_resources!\")\n\t\t\timage = image_src\n\t\telse:\n\t\t\tprint(\"Warning: image_src and image_display not the same!\")\n\t\t\tprint(\"image_src: \\n\" + image_src)\n\t\t\tprint(\"image_display: \\n\" + image_display)\n\t\t\tprint(\"Default: Getting display_url!\")\n\t\t\timage = image_display\n\tif image == \"\":\n\t\tprint(\"Error: Failed to extract image!\")\n\t\treturn [1]\n\timage_link = image.replace(\"\\\\\", \"\")\n\ttry:\n\t\tif prefix == \"\":\n\t\t\tfile_name = str(image_link).split(\"/\")[-1].split(\"?\")[0]\n\t\telse:\n\t\t\tfile_name = prefix + \"_\" + str(image_link).split(\"/\")[-1].split(\"?\")[0]\n\t\tur.urlretrieve(str(image_link), file_name)\n\t\tprint(\"Successfully extracted and downloaded image!\")\n\t\treturn [0, image_link]\n\texcept:\n\t\terror_msg = \"Error: Failed to extract image from link: \" + insta_url\n\t\tprint(error_msg)\n\t\treturn [1, image_link]\n\ndef get_video(json_data, insta_url, prefix=\"\"):\n\tdimensions_h = int(json_data[\"dimensions\"][\"height\"])\n\tdimensions_w = int(json_data[\"dimensions\"][\"width\"])\n\tdisplay_resources = json_data[\"display_resources\"]\n\timage_src = \"\"\n\timage_display = \"\"\n\tresult = []\n\tfor resource in display_resources:\n\t\tif int(resource[\"config_height\"])==dimensions_h and int(resource[\"config_width\"])==dimensions_w:\n\t\t\timage_src = str(resource[\"src\"])\n\t\t\tbreak\n\ttry:\n\t\timage_display = str(json_data[\"display_url\"])\n\texcept:\n\t\tprint(\"Error: Failed to extract image from display_url!\")\n\t\timage_display = \"\"\n\tif image_display == image_src:\n\t\timage = image_display\n\telse:\n\t\tif image_src == \"\" and image_display != \"\":\n\t\t\tprint(\"Warning: image_src and image_display not the same!\")\n\t\t\tprint(\"image_src is NULL\")\n\t\t\tprint(\"Getting display_url!\")\n\t\t\timage = image_display\n\t\telif image_src != \"\" and image_display == \"\":\n\t\t\tprint(\"Warning: image_src and image_display not the same!\")\n\t\t\tprint(\"image_display is NULL\")\n\t\t\tprint(\"Getting display_resources!\")\n\t\t\timage = image_src\n\t\telse:\n\t\t\tprint(\"Warning: image_src and image_display not the same!\")\n\t\t\tprint(\"image_src: \\n\" + image_src)\n\t\t\tprint(\"image_display: \\n\" + image_display)\n\t\t\tprint(\"Default: Getting display_url!\")\n\t\t\timage = image_display\n\tif image == \"\":\n\t\tprint(\"Error: Failed to extract image!\")\n\t\tprint(\"Trying to get video!\")\n\telse:\n\t\timage_link = image.replace(\"\\\\\", \"\")\n\t\ttry:\n\t\t\tif prefix == \"\":\n\t\t\t\tfile_name = str(image_link).split(\"/\")[-1].split(\"?\")[0]\n\t\t\telse:\n\t\t\t\tfile_name = prefix + \"_\" + str(image_link).split(\"/\")[-1].split(\"?\")[0]\n\t\t\tur.urlretrieve(str(image_link), file_name)\n\t\t\tprint(\"Successfully extracted and downloaded image!\")\n\t\t\tresult.append(image_link)\n\t\texcept:\n\t\t\terror_msg = \"Error: Failed to extract image from link: \" + insta_url\n\t\t\tprint(error_msg)\n\t\t\tprint(\"Trying to get video!\")\n\t\t\tresult.append(\"no image\")\n\tvideo = str(json_data[\"video_url\"])\n\tvideo_link = video.replace(\"\\\\\", \"\")\n\ttry:\n\t\tif prefix == \"\":\n\t\t\tfile_name = str(video_link).split(\"/\")[-1].split(\"?\")[0]\n\t\telse:\n\t\t\tfile_name = prefix + \"_\" + str(video_link).split(\"/\")[-1].split(\"?\")[0]\n\t\tur.urlretrieve(str(video_link), file_name)\n\t\tprint(\"Successfully extracted and downloaded video!\")\n\t\tresult.append(video_link)\n\t\tresult.insert(0, 0)\n\t\treturn result\n\texcept:\n\t\terror_msg = \"Error: Failed to extract video from link: \" + insta_url\n\t\tprint(error_msg)\n\t\tresult.append(video_link)\n\t\tresult.insert(0, 1)\n\t\treturn result\n\ndef instaload(insta_url):\n\n\tinsta_url_api = str(insta_url).rstrip(\"/\") + \"/?__a=1\"\n\trequest_header = { \"User-Agent\" : \"Mozilla/5.0 (Windows NT 6.1; Win64; x64)\" }\n\trequest = ur.Request(insta_url_api, headers=request_header)\n\tdata = ur.urlopen(request).read()\n\n\ttry:\n\t\tjson_data = json.loads(data)\n\texcept:\n\t\tprint(\"Error: Failed to load json data!\")\n\t\treturn 1\n\n\tif str(json_data[\"graphql\"][\"shortcode_media\"][\"__typename\"]) == \"GraphImage\":\n\t\tr = get_image(json_data[\"graphql\"][\"shortcode_media\"], insta_url)\n\t\tif r[0] == 0:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn 1\n\telif str(json_data[\"graphql\"][\"shortcode_media\"][\"__typename\"]) == \"GraphVideo\":\n\t\tprefix = str(json_data[\"graphql\"][\"shortcode_media\"][\"shortcode\"])\n\t\tr = get_video(json_data[\"graphql\"][\"shortcode_media\"], insta_url, prefix)\n\t\tif r[0] == 0:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn 1\n\telif str(json_data[\"graphql\"][\"shortcode_media\"][\"__typename\"]) == \"GraphSidecar\":\n\t\tprefix = str(json_data[\"graphql\"][\"shortcode_media\"][\"shortcode\"])\n\t\tedges = json_data[\"graphql\"][\"shortcode_media\"][\"edge_sidecar_to_children\"][\"edges\"]\n\t\tr = 0\n\t\tfor edge in edges:\n\t\t\tif str(edge[\"node\"][\"__typename\"]) == \"GraphImage\":\n\t\t\t\tr_ = get_image(edge[\"node\"], insta_url, prefix)\n\t\t\telif str(edge[\"node\"][\"__typename\"]) == \"GraphVideo\":\n\t\t\t\tr_ = get_video(edge[\"node\"], insta_url, prefix)\n\t\t\telse:\n\t\t\t\tprint(\"Error: Unrecognized typename!\")\n\t\t\t\treturn 1\n\t\t\tif r_[0] == 1:\n\t\t\t\tr = 1\n\t\treturn r\n\telse:\n\t\tprint(\"Error: Unrecognized typename!\")\n\t\treturn 1\n\ndef is_private(insta_url):\n\turl = str(insta_url)\n\tshortcode = str(url.split(\"instagram.com/p/\")[1]).split(\"/\")[0]\n\tif len(shortcode) > 12:\n\t\treturn True\n\telse:\n\t\treturn False\n\nif __name__ == '__main__':\n\tif len(sys.argv) == 1:\n\t\ti_url = input(\"Please enter an URL to an instagram post e. g. https://www.instagram.com/p/BVKTcWWhyaS/ \\n\")\n\t\tif is_private(i_url):\n\t\t\tprint(\"It appears you entered a link to a private post, script will try to download anyway!\")\n\t\t\ttry:\n\t\t\t\tr = instaload(i_url)\n\t\t\texcept:\n\t\t\t\tprint(\"Failed to download post! Please try manually!\")\n\t\telse:\n\t\t\tr = instaload(i_url)\n\t\tif r == 0:\n\t\t\tprint(\"Download successfully!\")\n\t\telse:\n\t\t\tprint(\"Unknown Error encountered: Download may have failed!\")\n\telif len(sys.argv) == 2:\n\t\tif os.path.isfile(sys.argv[1]):\n\t\t\twith open(sys.argv[1], \"r\") as in_file:\n\t\t\t\tlines = in_file.readlines()\n\t\t\t\tin_file.close()\n\t\t\tcount = int(len(lines))\n\t\t\tcounter = 1\n\t\t\tpercent = [ \\\n\t\t\t\"[--------------------]\", \"[#-------------------]\",\n\t\t\t\"[##------------------]\", \"[###-----------------]\",\n\t\t\t\"[####----------------]\", \"[#####---------------]\",\n\t\t\t\"[######--------------]\", \"[#######-------------]\",\n\t\t\t\"[########------------]\", \"[#########-----------]\",\n\t\t\t\"[##########----------]\", \"[###########---------]\",\n\t\t\t\"[############--------]\", \"[#############-------]\",\n\t\t\t\"[##############------]\", \"[###############-----]\",\n\t\t\t\"[################----]\", \"[#################---]\",\n\t\t\t\"[##################--]\", \"[###################-]\",\n\t\t\t\"[####################]\"]\n\t\t\tr = 0\n\t\t\tfor line in lines:\n\t\t\t\tl = line.lstrip().rstrip()\n\t\t\t\tif is_private(l):\n\t\t\t\t\tprint(\"It appears your link list also contains links to private posts, script will try to download anyway but manually checking is advised! Private links will be filtered and appended to private.txt!\")\n\t\t\t\t\ttry:\n\t\t\t\t\t\tr_ = instaload(l)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\t\t\t\twith open(\"private.txt\", \"a\") as p_file:\n\t\t\t\t\t\tp_file.write(l+\"\\n\")\n\t\t\t\t\t\tp_file.close()\n\t\t\t\telse:\n\t\t\t\t\tr_ = instaload(l)\n\t\t\t\tstatus = counter/count\n\t\t\t\tstatus_bar = \"\"\n\t\t\t\tif status == 1:\n\t\t\t\t\tstatus_bar = percent[20]\n\t\t\t\telif status > 0.95:\n\t\t\t\t\tstatus_bar = percent[19]\n\t\t\t\telif status > 0.9:\n\t\t\t\t\tstatus_bar = percent[18]\n\t\t\t\telif status > 0.85:\n\t\t\t\t\tstatus_bar = percent[17]\n\t\t\t\telif status > 0.8:\n\t\t\t\t\tstatus_bar = percent[16]\n\t\t\t\telif status > 0.75:\n\t\t\t\t\tstatus_bar = percent[15]\n\t\t\t\telif status > 0.7:\n\t\t\t\t\tstatus_bar = percent[14]\n\t\t\t\telif status > 0.65:\n\t\t\t\t\tstatus_bar = percent[13]\n\t\t\t\telif status > 0.6:\n\t\t\t\t\tstatus_bar = percent[12]\n\t\t\t\telif status > 0.55:\n\t\t\t\t\tstatus_bar = percent[11]\n\t\t\t\telif status > 0.5:\n\t\t\t\t\tstatus_bar = percent[10]\n\t\t\t\telif status > 0.45:\n\t\t\t\t\tstatus_bar = percent[9]\n\t\t\t\telif status > 0.4:\n\t\t\t\t\tstatus_bar = percent[8]\n\t\t\t\telif status > 0.35:\n\t\t\t\t\tstatus_bar = percent[7]\n\t\t\t\telif status > 0.3:\n\t\t\t\t\tstatus_bar = percent[6]\n\t\t\t\telif status > 0.25:\n\t\t\t\t\tstatus_bar = percent[5]\n\t\t\t\telif status > 0.2:\n\t\t\t\t\tstatus_bar = percent[4]\n\t\t\t\telif status > 0.15:\n\t\t\t\t\tstatus_bar = percent[3]\n\t\t\t\telif status > 0.1:\n\t\t\t\t\tstatus_bar = percent[2]\n\t\t\t\telif status > 0.05:\n\t\t\t\t\tstatus_bar = percent[1]\n\t\t\t\telse:\n\t\t\t\t\tstatus_bar = percent[0]\n\t\t\t\tstatus_msg = \"Downloaded \" + str(line) + \"\\nDownload at \" + str(status*100) + \"%\\n\" + status_bar + \"\\n\"\n\t\t\t\tcounter = counter + 1\n\t\t\t\tprint(status_msg)\n\t\t\t\tif r_ == 1:\n\t\t\t\t\tr = 1\n\t\t\tif r == 0:\n\t\t\t\tprint(\"Downloaded all Posts successfully!\")\n\t\t\telse:\n\t\t\t\tprint(\"Unknown Error encountered: Downloads of one or more Posts may have failed!\")\n\t\telse:\n\t\t\tif is_private(sys.argv[1]):\n\t\t\t\tprint(\"It appears you entered a link to a private post, script will try to download anyway!\")\n\t\t\t\ttry:\n\t\t\t\t\tr = instaload(sys.argv[1])\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"Failed to download post! Please try manually!\")\n\t\t\telse:\n\t\t\t\tr = instaload(sys.argv[1])\n\t\t\tif r == 0:\n\t\t\t\tprint(\"Download successfully!\")\n\t\t\telse:\n\t\t\t\tprint(\"Unknown Error encountered: Download may have failed!\")\n\telse:\n\t\tprint(\"Wrong usage! Try running without parameters or read documentation!\")\n","sub_path":"gui/instaload.py","file_name":"instaload.py","file_ext":"py","file_size_in_byte":9903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"476335066","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# AUTHOR: Savithru M Lokanath\n# PROJECT: HW_3 (Draw Initial)\n\nimport turtle\n\ndef draw_initial_M():\n\t\n\tinitial.pu()\n\tinitial.setpos(-50,0)\n\tinitial.pd()\n\tinitial.seth(90)\n\tinitial.forward(100)\n\tinitial.right(140)\n\tinitial.forward(60)\n\tinitial.left(100)\n\tinitial.forward(60)\n\tinitial.seth(270)\n\tinitial.forward(100)\n\t\ndef draw_initial_L():\n\t\n\tinitial.pu()\n\tinitial.setpos(50,100)\n\tinitial.pd()\n\tinitial.seth(-90)\n\tinitial.forward(100)\n\tinitial.left(90)\n\tinitial.forward(80)\n\t\n\nif __name__ == '__main__':\n\t\n\twindow = turtle.Screen()\n\twindow.bgcolor(\"red\")\n\t\n\tinitial = turtle.Turtle()\n\tinitial.shape(\"arrow\")\n\tinitial.color(\"yellow\")\n\tinitial.speed(10)\n\n\tdraw_initial_M()\n\tdraw_initial_L()\n\t\n\twindow.exitonclick()\n\n","sub_path":"scripts/initial_draw.py","file_name":"initial_draw.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"538878934","text":"from __future__ import division\nfrom visual import *\n \n#initiate global variables\nscene.lights = 1\nframeRate = 100\nfieldX=50\nfieldY=50\nfieldZ=50\nvScale=.175\nwallSpeed=25\n\n#initiate objects\nball = sphere(pos=vector(0,0,0),radius=1,color=(1,1,1))\nwallLeft = box(pos=vector(-fieldX/2,0,0),length=.001,width=fieldY/3,height=fieldZ/3,color=color.red)\nwallRight = box(pos=vector(fieldX/2,0,0),length=.001,width=fieldY/3,height=fieldZ/3,color=color.blue)\nwallTop = box(pos=vector(0,-fieldZ/2,0),length=fieldX,width=fieldY,height=.001,color=color.green)\nwallBottom = box(pos=vector(0,fieldZ/2,0),length=fieldX,width=fieldY,height=.001,color=color.orange)\nwallBack = box(pos=vector(0,0,-fieldY/2),length=fieldX,width=.001,height=fieldZ,color=color.white)\nvarr = arrow(pos=ball.pos,axis=vector(0,0,0),color=color.yellow)\n\n#initiate local variables\nball.vel = vector(25,5,15)\nball.trail = curve(color=varr.color)\nscene.autoscale=0\n\n#Main Loop\nwhile (1):\n rate(frameRate)\n\n #collision\n if (ball.pos.x+ball.vel.x/frameRate>fieldX/2-.5 or ball.pos.x+ball.vel.x/frameRate<-fieldX/2+.5):\n ball.vel.x=-ball.vel.x\n if (ball.pos.y+ball.vel.y/frameRate>fieldY/2-.5 or ball.pos.y+ball.vel.y/frameRate<-fieldY/2+.5):\n ball.vel.y=-ball.vel.y\n if (ball.pos.z+ball.vel.z/frameRate>fieldZ/2-.5 or ball.pos.z+ball.vel.z/frameRate<-fieldZ/2+.5):\n ball.vel.z=-ball.vel.z\n \n #update variables\n ball.pos += ball.vel/frameRate\n varr.pos=ball.pos\n varr.axis=ball.vel*vScale\n ball.trail.append(pos=ball.pos)\n if (ball.vel.x<0):\n wallLeft.pos.z -= (wallLeft.pos.z-ball.pos.z)/wallSpeed\n wallLeft.pos.y -= (wallLeft.pos.y-ball.pos.y)/wallSpeed\n ball.color=color.blue\n if (ball.vel.x>0):\n wallRight.pos.z -= (wallRight.pos.z-ball.pos.z)/wallSpeed\n wallRight.pos.y -= (wallRight.pos.y-ball.pos.y)/wallSpeed\n ball.color=color.red\n wallLeft.pos.y=max(-2*fieldY/6,min(2*fieldY/6,wallLeft.pos.y))\n wallLeft.pos.z=max(-2*fieldZ/6,min(2*fieldZ/6,wallLeft.pos.z))\n wallRight.pos.y=max(-2*fieldY/6,min(2*fieldY/6,wallRight.pos.y))\n wallRight.pos.z=max(-2*fieldZ/6,min(2*fieldZ/6,wallRight.pos.z))\n","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"255291460","text":"#!/usr/bin/env python\n\"\"\"\nScript Header\n\n$Id: cmPROV24616_3pcc_Remote_Factory_Reset\n\nCopyright (c) 2017-2018 Cisco Systems, Inc.\n\nReferences:\n Tph10111380c\n Tph10111381c\n Tph10111382c\n Tph10111383c\n Tph10111386c\n Tph10111388c\n\nTest Cases:\n test0101_factory_reset\n test0102_factory_reset_noconfirm\n test0103_factory_reset_busy\n test0104_factory_reset_confirm_busy\n test0105_factory_reset_with_param\n test0106_factory_reset_user_level\n\nTopology:\n 2 3PCC Phones\n 2 User IDs\n\nNotes:\n\nKnown Bugs:\n\n\"\"\"\n\nimport tng\nimport logging\nfrom tng_sl.contrib.setup_helper import SetupHelpersTestCase\nfrom tng_sl.device.endpoint.synergylite.synergylite_3pcc_extended import(\n wait_for_ccapi_call_states,\n register_lines,\n wait_for_registration_states)\nfrom cmPROV_3pcc_web_direct_url_base import WebDirectUrlBaseClass\nfrom tng_sl.plugins.synergylite_3pcc_ui import SynergyLite3pccUiHelper\nfrom tng_sl.contrib.mpp.phone_config_helper import PhoneConfigHelper\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneLineRegHelper\n\nlog = logging.getLogger('RemoteReset')\n\n\nclass RemoteResetTestCase(WebDirectUrlBaseClass, SetupHelpersTestCase):\n\n helpers = (PhoneConfigHelper, PhoneLineRegHelper)\n helper_num_devices = 2\n\n def setUp(self):\n # register the phone again because it is factory reset in every case\n register_lines(self.phone_data, [self.oPhone1], [1])\n wait_for_registration_states([self.oPhone1], ['REGISTERED'])\n\n def make_a_call(self, caller, callee, user_id):\n caller.ccapi.dial('null', user_id, '', 1, 0, 1)\n wait_for_ccapi_call_states(\n [caller, callee], (\"PROCEEDING\", \"RINGING\"))\n callee.ccapi.accept('0000')\n wait_for_ccapi_call_states(\n [caller, callee], (\"CONNECTED\", \"CONNECTED\"))\n\n def test0101_factory_reset(self):\n log.info(\"Start of factory reset\")\n\n # configure some params\n self.set_gpp_values()\n\n # Factory reset\n self.remote_reset(self.reset_status_ok)\n\n # check if params are reset\n self.verify_gpp_values()\n\n log.info(\"End of factory reset\")\n\n def test0102_factory_reset_noconfirm(self):\n log.info(\"Start of factory reset\")\n\n # configure some params\n self.set_gpp_values()\n\n # reset\n self.remote_reset(self.reset_status_ok, confirm=False)\n\n # check if params are reset\n self.verify_gpp_values(is_reset=False)\n\n log.info(\"End of factory reset\")\n\n def test0103_factory_reset_busy(self):\n log.info(\"Start of factory reset busy\")\n\n # make a call\n self.make_a_call(self.oPhone1, self.oPhone2, self.user_id2)\n\n # can not reset now\n self.remote_reset(self.reset_status_busy, confirm=False)\n\n self.oPhone1.ccapi.hangUp('0000')\n wait_for_ccapi_call_states(self.devices, (\"IDLE\", \"IDLE\"))\n\n log.info(\"End of factory reset busy\")\n\n def test0104_factory_reset_confirm_busy(self):\n log.info(\"Start of factory reset confirm busy\")\n\n # reset\n self.remote_reset(self.reset_status_ok, confirm=False)\n\n # make a call\n self.make_a_call(self.oPhone1, self.oPhone2, self.user_id2)\n\n # can not confirm now\n webcontent = self.oPhone1.http.get('admin/direct-factory-reset')\n self.verify_web_content(webcontent, self.reset_status_busy)\n\n self.oPhone1.ccapi.hangUp('0000')\n # Check two phones are in correct status\n wait_for_ccapi_call_states(self.devices, (\"IDLE\", \"IDLE\"))\n\n log.info(\"End of factory reset confirm busy\")\n\n def test0105_factory_reset_with_param(self):\n log.info(\"Start of factory reset with param\")\n\n # configure some params\n self.set_gpp_values()\n\n # reset\n self.remote_reset(self.reset_status_ok, 1, '? p1 p2')\n\n # check if params are reset\n self.verify_gpp_values()\n\n log.info(\"End of factory reset with param\")\n\n def test0106_factory_reset_user_level(self):\n log.info(\"Start of factory reset in user level\")\n\n # reset\n webcontent = self.oPhone1.http.get('factory-reset')\n self.verify_web_content(webcontent, self.reset_status_forbidden)\n\n log.info(\"End of factory reset in user level\")\n\n\n# this is called by 'tng run'\ndef main():\n tng.api.runner()\n\n\nif __name__ == '__main__':\n tng.run(main)\n","sub_path":"common/Provisioning/cmPROV24616_3pcc_Remote_Factory_Reset.py","file_name":"cmPROV24616_3pcc_Remote_Factory_Reset.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"451395210","text":"\"\"\"\nModule that returns a list of image directories containing\nfiles of a specified type.\n\"\"\"\nimport argparse\nimport os\nimport pathlib\n\nDATA_PATH = pathlib.Path.cwd().with_name('data')\nRESULT = []\n\n\ndef parse_cmd_arguments():\n \"\"\"\n parses command line arguments entered for debug level, source file name,\n and output file name.\n :return: arguments entered at command line\n \"\"\"\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-p', '--path', type=str, required=True,\n metavar='', help='Path to iterate over', default=1)\n args = parser.parse_args()\n return args\n\n\ndef list_jpg_files(path_: str):\n \"\"\"\n User creates list of directory containing files\n of a specific type\n :param path_:\n :return: list of lists\n \"\"\"\n for i in os.walk(path_):\n final_result = []\n for _f in i[-1]:\n if 'png' in _f:\n k = i[0], i[-1]\n RESULT.append(k)\n\n for item in RESULT:\n if list(item) not in final_result:\n final_result.append(list(item))\n return final_result\n\n\nif __name__ == '__main__':\n INARGS = parse_cmd_arguments()\n print(list_jpg_files(INARGS.path))\n","sub_path":"students/jeff_shabani/lesson09/assignment/src/jpgdiscover.py","file_name":"jpgdiscover.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"561789916","text":"#!/usr/bin/env python3\n#author:Alnk(李成果)\n#生产者 - 消费者 1 对 多 的话\n#默认为轮询机制,依次分发给消费者。\n'''\n按照业务逻辑来说,生产者一定是等消费者把这条消息处理完了以后,生产者在把消息从队列中删除。\n消费者处理完消息,应该给生产者一个回复。\n'''\n#模拟消费者处理一半断电了。\n#生产者还是用小例子中的生产者\n#no_ack=True 处理完或者不处理完都不给服务器端发确认。一般都不加,rabbitmq���认要回复服务端\n#结果:多启动几个消费者就能看出如果消费者不给生产者回复,那么生产者就不会把该条消息从消息队列中删除,\n#会重新发给下一个消费者\n\nimport pika\nimport time\nconnection = pika.BlockingConnection( pika.ConnectionParameters('localhost') )\nchannel = connection.channel()\nchannel.queue_declare(queue='hello')\n\ndef callback(ch,method,properties,body):\n print('--->',ch,method,properties)\n time.sleep(30) #模拟程序处理这个消息需要30s\n print(\"[x] received %r\" %body)\n #处理完成发一条消息告诉服务端,该消息已经处理完了,让服务端删除消息队列的该条消息\n channel.basic_ack(delivery_tag=method.delivery_tag)\n\nchannel.basic_consume(\n callback, #如果收到消息,就调用callback函数来处理消息\n queue='hello', #从哪个队列里收消息\n #no_ack=True #处理完或者不处理完都不给服务器端发确认。一般都不加,rabbitmq默认要回复服务端\n)\nprint('[*] waitting for message. to exit press ctrl+c')\nchannel.start_consuming()","sub_path":"day11/01笔记/06rabbitmq消息分发轮询.py","file_name":"06rabbitmq消息分发轮询.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"447110862","text":"import json\nimport requests\nfrom google.cloud import bigquery\n\nRECORDED_VOICES_URL = (\n \"https://vikvok-anldg2io3q-ew.a.run.app/recordedvoices/original/{}\"\n)\nUSERS_URL = \"https://vikvok-anldg2io3q-ew.a.run.app/users/{}\"\n\n\ndef get_score():\n client = bigquery.Client(\"speech-similarity\")\n\n query_job = client.query(\n \"\"\"\n select\n Score, RecordedVoiceId\n from \n statistics.recorded_voices \n \"\"\"\n )\n\n users_tried = query_job.result()\n result = {}\n for row in users_tried:\n result[row.RecordedVoiceId] = row.Score\n return result\n\n\ndef original_voice_recorded_voices(request):\n request_json = request.get_json(silent=True)\n request_args = request.args\n if request_json and \"originalVoiceId\" in request_json:\n voice_id = request_json[\"originalVoiceId\"]\n elif request_args and \"originalVoiceId\" in request_args:\n voice_id = request_args[\"originalVoiceId\"]\n else:\n # error\n return \"originalVoiceId not found!\"\n\n voices_json = requests.get(RECORDED_VOICES_URL.format(voice_id)).json()\n print(voices_json[0])\n result = get_score()\n cache = {}\n for i, voice in enumerate(voices_json):\n \n user_id = voice[\"userId\"]\n if user_id in cache:\n user = cache[user_id]\n else:\n user = requests.get(USERS_URL.format(user_id)).json()\n cache[user_id] = user\n voices_json[i][\"user\"] = user\n del voices_json[i][\"userId\"]\n rec_id = str(voices_json[i]['recordedVoiceId'])\n voices_json[i][\"score\"] = 0 if rec_id not in result else result[rec_id]\n\n return json.dumps(voices_json)\n","sub_path":"functions/voice/original/id/recordedvoices/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"512978382","text":"import collections\nimport itertools\nimport logging as lg\nimport re\n\nimport tabulate\nfrom path import Path\nfrom pyTrnsysType import (\n TypeVariable,\n TrnsysModel,\n Component,\n StudioHeader,\n MetaData,\n AnchorPoint,\n ComponentCollection,\n)\nfrom pyTrnsysType.statements import (\n Version,\n NaNCheck,\n OverwriteCheck,\n TimeReport,\n List,\n Simulation,\n Tolerances,\n Limits,\n DFQ,\n NoCheck,\n NoList,\n Map,\n EqSolver,\n End,\n Solver,\n Statement,\n Width,\n)\nfrom pyTrnsysType.utils import print_my_latex, TypeVariableSymbol, get_rgb_from_int\nfrom shapely.geometry import LineString, Point\nfrom sympy import Expr, Symbol\n\nfrom .trnsymodel import (\n ParameterCollection,\n InputCollection,\n ExternalFileCollection,\n _studio_to_linestyle,\n)\n\n\nclass Name(object):\n \"\"\"Handles the attribution of user defined names for :class:`TrnsysModel`,\n :class:`EquationCollection` and more.\n \"\"\"\n\n existing = [] # a list to store the created names\n\n def __init__(self, name=None):\n \"\"\"Pick a name. Will increment the name if already used\n\n Args:\n name:\n \"\"\"\n self.name = self.create_unique(name)\n\n def create_unique(self, name):\n \"\"\"Check if name has already been used. If so, try to increment until\n not used\n\n Args:\n name:\n \"\"\"\n if not name:\n return None\n i = 0\n key = name\n while key in self.existing:\n i += 1\n key = key.split(\"_\")\n key = key[0] + \"_{}\".format(i)\n the_name = key\n self.existing.append(the_name)\n return the_name\n\n def __repr__(self):\n return str(self.name)\n\n\nclass UnitType(object):\n def __init__(self, n=None, m=None, comment=None):\n \"\"\"\n Args:\n n (int): the UNIT number of the component. Allowable UNIT numbers\n are integers between 1 and n, where n is set in\n TrnsysConstants.f90 (default = 999).\n m (int): the TYPE number of the component. Allowable TYPE numbers\n are integers between 1 and 999.\n comment (str): Comment is an optional comment. The comment is\n reproduced on the output but is otherwise disregarded. Its\n function is primarily to help the user associate the UNIT and\n TYPE numbers with a particular component in the system.\n \"\"\"\n self.Comment = comment\n self.n = n\n self.m = m\n\n def __repr__(self):\n \"\"\"Overload __repr__() and str() to implement self._to_deck()\"\"\"\n return self.to_deck()\n\n def to_deck(self):\n \"\"\"Returns the string representation for the Input File (.dck)\"\"\"\n return \"UNIT {n} TYPE {m} {Comment}\\n\".format(\n n=self.n, m=self.m, Comment=self.Comment\n )\n\n\nclass Parameters(object):\n def __init__(self, param_collection, n=None):\n \"\"\"\n Args:\n param_collection (ParameterCollection): tuple of parameters\n n (int, optional): the number of PARAMETERS to follow on the next\n line(s). Typically this is the number of parameters required by\n the component, but may be less if more than one PARAMETERS\n statement is used for a given component.\n \"\"\"\n self.v = param_collection\n if not n:\n self.n = self.v.size\n else:\n self.n = n\n\n def __repr__(self):\n \"\"\"Overload __repr__() and str() to implement self._to_deck()\"\"\"\n return self.to_deck()\n\n def to_deck(self):\n \"\"\"Returns the string representation for the Input File (.dck)\"\"\"\n head = \"PARAMETERS {}\\n\".format(self.n)\n # loop through parameters and print the (value, name) tuples.\n v_ = (\n (self.v[param].value.m, \"! {}\".format(self.v.data[param].name))\n for param in self.v\n )\n params_str = tabulate.tabulate(v_, tablefmt=\"plain\", numalign=\"left\")\n return head + params_str + \"\\n\"\n\n\nclass Inputs(object):\n def __init__(self, input_collection, n=None):\n \"\"\"\n Args:\n input_collection (InputCollection):\n n:\n \"\"\"\n self.inputs = input_collection\n if not n:\n self.n = input_collection.size\n else:\n self.n = n\n\n def __repr__(self):\n \"\"\"Overload __repr__() and str() to implement self._to_deck()\"\"\"\n return self.to_deck()\n\n def to_deck(self):\n \"\"\"Returns the string representation for the Input File (.dck)\"\"\"\n head = \"INPUTS {}\\n\".format(self.n)\n # \"{u_i}, {o_i}\": is an integer number referencing the number of the\n # UNIT to which the ith INPUT is connected. is an integer number\n # indicating to which OUTPUT (i.e., the 1st, 2nd, etc.) of UNIT\n # number ui the ith INPUT is connected.\n _ins = []\n for input in self.inputs.values():\n if input.is_connected:\n if isinstance(input.connected_to, TypeVariable):\n _ins.append(\n \"{}, {}\".format(\n input.connected_to.model.unit_number,\n input.connected_to.one_based_idx,\n )\n )\n else:\n _ins.append(input.connected_to.name)\n else:\n _ins.append(\"0,0\")\n core = \"\\t\\t\".join(_ins) + \"\\n\"\n return str(head) + str(core)\n\n\nclass ExternalFiles(object):\n\n # todo: Implement DESIGNATE vs ASSIGN. See TRNSYS Manual, section 6.3.17.\n # The DESIGNATE Statement and Logical Unit Numbers.\n\n def __init__(self, external_collection):\n \"\"\"\n Args:\n external_collection (ExternalFileCollection):\n \"\"\"\n self.external_files = external_collection\n\n def __repr__(self):\n \"\"\"Overload __repr__() and str() to implement self._to_deck()\"\"\"\n return self.to_deck()\n\n def to_deck(self):\n \"\"\"Returns the string representation for the external files (.dck)\"\"\"\n if self.external_files:\n head = \"*** External files\\n\"\n v_ = (\n (\"ASSIGN\", ext_file.value.normcase(), ext_file.logical_unit)\n for ext_file in self.external_files.values()\n )\n core = tabulate.tabulate(v_, tablefmt=\"plain\", numalign=\"left\")\n\n return str(head) + str(core)\n else:\n return \"\"\n\n\nclass Derivatives:\n # Todo: Implement Derivatives\n pass\n\n\nclass Trace:\n # Todo: Implement Trace\n pass\n\n\nclass Format:\n # Todo: Implement Format\n pass\n\n\nclass Constant(Statement):\n \"\"\"The CONSTANTS statement is useful when simulating a number of systems\n with identical component configurations but with different parameter values,\n initial input values, or initial values of time dependent variables.\n \"\"\"\n\n _new_id = itertools.count(start=1)\n instances = {}\n\n def __init__(self, name=None, equals_to=None, doc=None):\n \"\"\"\n Args:\n name (str): The left hand side of the equation.\n equals_to (str, TypeVariable): The right hand side of the equation.\n doc (str, optional): A small description optionally printed in the\n deck file.\n \"\"\"\n super().__init__()\n try:\n c_ = Constant.instances[name]\n except:\n self._n = next(self._new_id)\n self.name = name\n self.equals_to = equals_to\n self.doc = doc\n else:\n self._n = c_._n\n self.name = c_.name\n self.equals_to = c_.equals_to\n self.doc = c_.doc\n finally:\n Constant.instances.update({self.name: self})\n\n @classmethod\n def from_expression(cls, expression, doc=None):\n \"\"\"Create a Constant from a string expression. Anything before the equal\n sign (\"=\") will become the Constant's name and anything after will\n become the equality statement.\n\n Hint:\n The simple expressions are processed much as FORTRAN arithmetic\n statements are, with one significant exceptions. Expressions are\n evaluated from left to right with no precedence accorded to any\n operation over another. This rule must constantly be borne in mind\n when writing long expressions.\n\n Args:\n expression (str): A user-defined expression to parse.\n doc (str, optional): A small description optionally printed in the\n deck file.\n \"\"\"\n if \"=\" not in expression:\n raise ValueError(\n \"The from_expression constructor must contain an expression \"\n \"with the equal sign\"\n )\n a, b = expression.split(\"=\")\n return cls(a.strip(), b.strip(), doc=doc)\n\n @property\n def constant_number(self):\n \"\"\"The equation number. Unique\"\"\"\n return self._n\n\n def _to_deck(self):\n return self.equals_to\n\n\nclass ConstantCollection(collections.UserDict, Component):\n \"\"\"A class that behaves like a dict and that collects one or more\n :class:`Constants`.\n\n You can pass a dict of Equation or you can pass a list of Equation. In\n the latter, the :attr:`Equation.name` attribute will be used as a key.\n \"\"\"\n\n def __init__(self, mutable=None, name=None):\n \"\"\"Initialize a new ConstantCollection.\n\n Example:\n >>> c_1 = Constant.from_expression(\"A = 1\")\n >>> c_2 = Constant.from_expression(\"B = 2\")\n >>> ConstantCollection([c_1, c_2])\n\n Args:\n mutable (Iterable, optional): An iterable.\n name (str): A user defined name for this collection of constants.\n This name will be used to identify this block of constants in\n the .dck file;\n \"\"\"\n if isinstance(mutable, list):\n _dict = {f.name: f for f in mutable}\n else:\n _dict = mutable\n super().__init__(_dict)\n self.name = Name(name)\n self._unit = next(TrnsysModel.new_id)\n\n def __getitem__(self, key):\n \"\"\"\n Args:\n key:\n \"\"\"\n value = super().__getitem__(key)\n return value\n\n def __repr__(self):\n return self._to_deck()\n\n def __hash__(self):\n return self.unit_number\n\n def update(self, E=None, **F):\n \"\"\"D.update([E, ]**F). Update D from a dict/list/iterable E and F.\n If E is present and has a .keys() method, then does: for k in E: D[\n k] = E[k]\n If E is present and lacks a .keys() method, then does: for cts.name,\n cts in E: D[cts.name] = cts\n In either case, this is followed by: for k in F: D[k] = F[k]\n\n Args:\n E (list, dict or Constant): The constant to add or update in D (\n self).\n F (list, dict or Constant): Other constants to update are passed.\n \"\"\"\n if isinstance(E, Constant):\n E.model = self\n _e = {E.name: E}\n elif isinstance(E, list):\n _e = {cts.name: cts for cts in E}\n else:\n for v in E.values():\n if not isinstance(v, Constant):\n raise TypeError(\n \"Can only update an ConstantCollection with a\"\n \"Constant, not a {}\".format(type(v))\n )\n _e = {v.name: v for v in E.values()}\n k: Constant\n for k in F:\n if isinstance(F[k], dict):\n _f = {v.name: v for k, v in F.items()}\n elif isinstance(F[k], list):\n _f = {cts.name: cts for cts in F[k]}\n else:\n raise TypeError(\n \"Can only update an ConstantCollection with a\"\n \"Constant, not a {}\".format(type(F[k]))\n )\n _e.update(_f)\n super(ConstantCollection, self).update(_e)\n\n @property\n def size(self):\n return len(self)\n\n @property\n def unit_number(self):\n return self._unit\n\n def _to_deck(self):\n \"\"\"To deck representation\n\n Examples::\n\n CONSTANTS n\n NAME1 = ... constant 1 ...\n NAME2 = ... constant 2 ...\n •\n •\n •\n NAMEn = ... constant n ...\n \"\"\"\n header_comment = '* CONSTANTS \"{}\"\\n\\n'.format(self.name)\n head = \"CONSTANTS {}\\n\".format(len(self))\n v_ = ((equa.name, \"=\", str(equa)) for equa in self.values())\n core = tabulate.tabulate(v_, tablefmt=\"plain\", numalign=\"left\")\n return str(header_comment) + str(head) + str(core)\n\n def _get_inputs(self):\n \"\"\"inputs getter. Sorts by order number each time it is called\n \"\"\"\n return self\n\n def _get_outputs(self):\n \"\"\"outputs getter. Since self is already a dict, return self.\n \"\"\"\n return self\n\n\nclass Equation(Statement):\n \"\"\"The EQUATIONS statement allows variables to be defined as algebraic\n functions of constants, previously defined variables, and outputs from\n TRNSYS components. These variables can then be used in place of numbers in\n the TRNSYS input file to represent inputs to components; numerical values of\n parameters; and initial values of inputs and time-dependent variables. The\n capabilities of the EQUATIONS statement overlap but greatly exceed those of\n the CONSTANTS statement described in the previous section.\n\n Hint:\n In pyTrnsysType, the Equation class works hand in hand with the\n :class:`EquationCollection` class. This class behaves a little bit like\n the equation component in the TRNSYS Studio, meaning that you can list\n equation in a block, give it a name, etc. See the\n :class:`EquationCollection` class for more details.\n \"\"\"\n\n _new_id = itertools.count(start=1)\n\n def __init__(self, name=None, equals_to=None, doc=None, model=None):\n \"\"\"\n Args:\n name (str): The left hand side of the equation.\n equals_to (str, TypeVariable): The right hand side of the equation.\n doc (str, optional): A small description optionally printed in the\n deck file.\n \"\"\"\n super().__init__()\n self._n = next(self._new_id)\n self.name = name\n self.equals_to = equals_to\n self.doc = doc\n self.model = model\n\n @classmethod\n def from_expression(cls, expression, doc=None):\n \"\"\"Create an equation from a string expression. Anything before the\n equal sign (\"=\") will become a Constant and anything after will become\n the equality statement.\n\n Example:\n Create a simple expression like so:\n\n >>> equa1 = Equation.from_expression(\"TdbAmb = [011,001]\")\n\n Args:\n expression (str): A user-defined expression to parse.\n doc (str, optional): A small description optionally printed in the\n deck file.\n \"\"\"\n if \"=\" not in expression:\n raise ValueError(\n \"The from_expression constructor must contain an expression \"\n \"with the equal sign\"\n )\n a, b = expression.split(\"=\")\n return cls(a.strip(), b.strip(), doc=doc)\n\n @classmethod\n def from_symbolic_expression(cls, name, exp, *args, doc=None):\n \"\"\"Crate an equation with a combination of a generic expression (with\n placeholder variables) and a list of arguments. The underlying engine\n will use Sympy and symbolic variables. You can use a mixture of\n :class:`TypeVariable` and :class:`Equation`, :class:`Constant` as\n well as the python default :class:`str`.\n\n .. Important::\n\n If a `str` is passed in place of an expression argument (\n :attr:`args`), make sure to declare that string as an Equation or\n a Constant later in the routine.\n\n Examples:\n In this example, we define a variable (var_a) and we want it to be\n equal to the 'Outlet Air Humidity Ratio' divided by 12 + log(\n Temperature to heat source). In a TRNSYS deck file one would have to\n manually determine the unit numbers and output numbers and write\n something like : '[1, 2]/12 + log([1, 1])'. With the\n :func:`~from_symbolic_expression`, we can do this very simply:\n\n 1. first, define the name of the variable:\n\n >>> name = \"var_a\"\n\n 2. then, define the expression as a string. Here, the variables `a`\n and `b` are symbols that represent the two type outputs. Note that\n their name has bee chosen arbitrarily.\n\n >>> exp = \"log(a) + b / 12\"\n >>> # would be also equivalent to\n >>> exp = \"log(x) + y / 12\"\n\n 3. here, we define the actual variables (the type outputs) after\n loading our model from its proforma:\n\n >>> from pyTrnsysType import TrnsysModel\n >>> fan = TrnsysModel.from_xml(\"fan_type.xml\")\n >>> vars = (fan.outputs[0], fan.outputs[1])\n\n .. Important::\n\n The order of the symbolic variable encountered in the string\n expression (step 2), from left to right, must be the same for\n the tuple of variables. For instance, `a` is followed by `b`,\n therefore `fan.outputs[0]` is followed by `fan.outputs[1]`.\n\n 4. finally, we create the Equation. Note that vars is passed with\n the '*' declaration to unpack the tuple.\n\n >>> from pyTrnsysType.input_file import Equation\n >>> eq = Equation.from_symbolic_expression(name, exp, *vars)\n >>> print(eq)\n [1, 1]/12 + log([1, 2])\n\n Args:\n name (str): The name of the variable (left-hand side), of the\n equation.\n exp (str): The expression to evaluate. Use any variable name and\n mathematical expression.\n *args (tuple): A tuple of :class:`TypeVariable` that will replace\n the any variable name specified in the above expression.\n doc (str, optional): A small description optionally printed in the\n deck file.\n\n Returns:\n Equation: The Equation Statement object.\n \"\"\"\n from sympy.parsing.sympy_parser import parse_expr\n\n exp = parse_expr(exp)\n\n if len(exp.free_symbols) != len(args):\n raise AttributeError(\n \"The expression does not have the same number of \"\n \"variables as arguments passed to the symbolic expression \"\n \"parser.\"\n )\n for i, arg in enumerate(sorted(exp.free_symbols, key=lambda sym: sym.name)):\n new_symbol = args[i]\n if isinstance(new_symbol, TypeVariable):\n exp = exp.subs(arg, TypeVariableSymbol(new_symbol))\n elif isinstance(new_symbol, (Equation, Constant)):\n exp = exp.subs(arg, Symbol(new_symbol.name))\n else:\n exp = exp.subs(arg, Symbol(new_symbol))\n return cls(name, exp)\n\n @property\n def eq_number(self):\n \"\"\"The equation number. Unique\"\"\"\n return self._n\n\n @property\n def unit_number(self):\n return self.model.unit_number\n\n def __repr__(self):\n return \" = \".join([self.name, self._to_deck()])\n\n def __str__(self):\n return self.__repr__()\n\n def _to_deck(self):\n if isinstance(self.equals_to, TypeVariable):\n return \"[{unit_number}, {output_id}]\".format(\n unit_number=self.equals_to.model.unit_number,\n output_id=self.equals_to.one_based_idx,\n )\n elif isinstance(self.equals_to, Expr):\n return print_my_latex(self.equals_to)\n else:\n return self.equals_to\n\n\nclass EquationCollection(collections.UserDict, Component):\n \"\"\"A class that behaves like a dict and that collects one or more\n :class:`Equations`. This class behaves a little bit like the equation\n component in the TRNSYS Studio, meaning that you can list equation in a\n block, give it a name, etc.\n\n You can pass a dict of Equation or you can pass a list of Equation. In\n this case, the :attr:`Equation.name` attribute will be used as a key.\n\n Hint:\n Creating equations in PyTrnsysType is done trough the :class:`Equation`\n class. Equations are than collected in this EquationCollection. See the\n :class:`Equation` class for more details.\n \"\"\"\n\n def __init__(self, mutable=None, name=None):\n \"\"\"Initialize a new EquationCollection.\n\n Example:\n >>> equa1 = Equation.from_expression(\"TdbAmb = [011,001]\")\n >>> equa2 = Equation.from_expression(\"rhAmb = [011,007]\")\n >>> EquationCollection([equa1, equa2])\n\n Args:\n mutable (Iterable, optional): An iterable (dict or list).\n name (str): A user defined name for this collection of equations.\n This name will be used to identify this block of equations in\n the .dck file;\n \"\"\"\n if isinstance(mutable, list):\n _dict = {f.name: f for f in mutable}\n else:\n _dict = mutable\n super().__init__(_dict)\n self.name = Name(name)\n self._unit = next(TrnsysModel.new_id)\n self.studio = StudioHeader.from_trnsysmodel(self)\n\n def __getitem__(self, key):\n \"\"\"\n Args:\n key:\n \"\"\"\n if isinstance(key, int):\n value = list(self.data.values())[key]\n else:\n value = super().__getitem__(key)\n return value\n\n def __hash__(self):\n return self.unit_number\n\n def __repr__(self):\n return self._to_deck()\n\n def __setitem__(self, key, value):\n # optional processing here\n super().__setitem__(key, value)\n\n def update(self, E=None, **F):\n \"\"\"D.update([E, ]**F). Update D from a dict/list/iterable E and F.\n If E is present and has a .keys() method, then does: for k in E: D[\n k] = E[k]\n If E is present and lacks a .keys() method, then does: for eq.name,\n eq in E: D[eq.name] = eq\n In either case, this is followed by: for k in F: D[k] = F[k]\n\n Args:\n E (list, dict or Equation): The equation to add or update in D (\n self).\n F (list, dict or Equation): Other Equations to update are passed.\n\n Returns:\n None\n \"\"\"\n if isinstance(E, Equation):\n E.model = self\n _e = {E.name: E}\n elif isinstance(E, list):\n _e = {eq.name: eq for eq in E}\n else:\n for v in E.values():\n if not isinstance(v, Equation):\n raise TypeError(\n \"Can only update an EquationCollection with an\"\n \"Equation, not a {}\".format(type(v))\n )\n _e = {v.name: v for v in E.values()}\n k: Equation\n for k in F:\n if isinstance(F[k], dict):\n _f = {v.name: v for k, v in F.items()}\n elif isinstance(F[k], list):\n _f = {eq.name: eq for eq in F[k]}\n else:\n raise TypeError(\n \"Can only update an EquationCollection with an\"\n \"Equation, not a {}\".format(type(F[k]))\n )\n _e.update(_f)\n super(EquationCollection, self).update(_e)\n\n def setdefault(self, key, value=None):\n if key not in self:\n self[key] = value\n return self[key]\n\n @property\n def size(self):\n return len(self)\n\n @property\n def unit_number(self):\n return self._unit\n\n @property\n def unit_name(self):\n \"\"\"This type does not have a unit_name. Return component name\"\"\"\n return self.name\n\n @property\n def model(self):\n \"\"\"This model does not have a proforma. Return class name.\"\"\"\n return self.__class__.__name__\n\n def _to_deck(self):\n \"\"\"To deck representation\n\n Examples::\n\n EQUATIONS n\n NAME1 = ... equation 1 ...\n NAME2 = ... equation 2 ...\n •\n •\n •\n NAMEn = ... equation n ...\n \"\"\"\n header_comment = '* EQUATIONS \"{}\"\\n\\n'.format(self.name)\n head = \"EQUATIONS {}\\n\".format(len(self))\n v_ = ((equa.name, \"=\", equa._to_deck()) for equa in self.values())\n core = tabulate.tabulate(v_, tablefmt=\"plain\", numalign=\"left\")\n return str(header_comment) + str(head) + str(core)\n\n def _get_inputs(self):\n \"\"\"inputs getter. Sorts by order number each time it is called\n \"\"\"\n return self\n\n def _get_outputs(self):\n \"\"\"outputs getter. Since self is already a dict, return self.\n \"\"\"\n return self\n\n def _get_ordered_filtered_types(self, classe_, store):\n \"\"\"\n Args:\n classe_:\n store:\n \"\"\"\n return collections.OrderedDict(\n (attr, self._meta[store][attr])\n for attr in sorted(\n self._get_filtered_types(classe_, store),\n key=lambda key: self._meta[store][key].order,\n )\n )\n\n def _get_filtered_types(self, classe_, store):\n \"\"\"\n Args:\n classe_:\n store:\n \"\"\"\n return filter(\n lambda kv: isinstance(self._meta[store][kv], classe_), self._meta[store]\n )\n\n\nclass ControlCards(object):\n \"\"\"The :class:`ControlCards` is a container for all the TRNSYS Simulation\n Control Statements and Listing Control Statements. It implements the\n :func:`_to_deck` method which pretty-prints the statements with their\n docstrings.\n \"\"\"\n\n def __init__(\n self,\n version=None,\n simulation=None,\n tolerances=None,\n limits=None,\n nancheck=None,\n overwritecheck=None,\n timereport=None,\n dfq=None,\n width=None,\n nocheck=None,\n eqsolver=None,\n solver=None,\n nolist=None,\n list=None,\n map=None,\n ):\n \"\"\"Each simulation must have SIMULATION and END statements. The other\n simulation control statements are optional. Default values are assumed\n for TOLERANCES, LIMITS, SOLVER, EQSOLVER and DFQ if they are not present\n\n Args:\n width:\n version (Version): The VERSION Statement. labels the deck with the\n TRNSYS version number. See :class:`Version` for more details.\n simulation (Simulation): The SIMULATION Statement.determines the\n starting and stopping times of the simulation as well as the\n time step to be used. See :class:`Simulation` for more details.\n tolerances (Tolerances, optional): Convergence Tolerances (\n TOLERANCES). Specifies the error tolerances to be used during a\n TRNSYS simulation. See :class:`Tolerances` for more details.\n limits (Limits, optional): The LIMITS Statement. Sets limits on the\n number of iterations that will be performed by TRNSYS during a\n time step before it is determined that the differential\n equations and/or algebraic equations are not converging. See\n :class:`Limits` for more details.\n nancheck (NaNCheck, optional): The NAN_CHECK Statement. An optional\n debugging feature in TRNSYS. If the NAN_CHECK statement is\n present, then the TRNSYS kernel checks every output of each\n component at each iteration and generates a clean error if ever\n one of those outputs has been set to the FORTRAN NaN condition.\n See :class:`NaNCheck` for more details.\n overwritecheck (OverwriteCheck, optional): The OVERWRITE_CHECK\n Statement. An optional debugging feature in TRNSYS. Checks to\n make sure that each Type did not write outside its allotted\n space. See :class:`OverwriteCheck` for more details.\n timereport (TimeReport, optional): The TIME_REPORT Statement. Turns\n on or off the internal calculation of the time spent on each\n unit. See :class:`TimeReport` for more details.\n dfq (DFQ, optional): Allows the user to select one of three\n algorithms built into TRNSYS to numerically solve differential\n equations. See :class:`DFQ` for more details.\n nocheck (NoCheck, optional): The Convergence Check Suppression\n Statement. Remove up to 20 inputs for the convergence check. See\n :class:`NoCheck` for more details.\n eqsolver (EqSolver, optional): The Equation Solving Method\n Statement. The order in which blocks of EQUATIONS are solved is\n controlled by the EQSOLVER statement. See :class:`EqSolver` for\n more details.\n solver (Solver, optional): The SOLVER Statement. Select the\n computational scheme. See :class:`Solver` for more details.\n nolist (NoList, optional): The NOLIST Statement. See :class:`NoList`\n for more details.\n list (List, optional): The LIST Statement. See :class:`List` for\n more details.\n map (Map, optional): The MAP Statement. See :class:`Map` for more\n details.\n\n Note:\n Some Statements have not been implemented because only TRNSYS \n gods 😇\n use them. Here is a list of Statements that have been ignored:\n\n - The Convergence Promotion Statement (ACCELERATE)\n - The Calling Order Specification Statement (LOOP)\n \"\"\"\n super().__init__()\n self.version = version\n self.simulation = simulation\n\n self.tolerances = tolerances\n self.limits = limits\n self.nancheck = nancheck\n self.overwritecheck = overwritecheck\n self.timereport = timereport\n\n self.dfq = dfq\n self.nocheck = nocheck\n self.eqsolver = eqsolver\n self.solver = solver\n\n # Listing Control Statements\n self.nolist = nolist\n self.list = list\n self.map = map\n\n self.end = End()\n\n @classmethod\n def all(cls):\n \"\"\"Returns a SimulationCard with all available Statements initialized\n with their default values. This class method is not recommended since\n many of the Statements are a time consuming process and should be used\n as a debugging tool.\n \"\"\"\n return cls(\n Version(),\n Simulation(),\n Tolerances(),\n Limits(),\n NaNCheck(),\n OverwriteCheck(),\n TimeReport(),\n DFQ(),\n Width(),\n NoCheck(),\n EqSolver(),\n Solver(),\n NoList(),\n List(),\n Map(),\n )\n\n @classmethod\n def debug_template(cls):\n \"\"\"Returns a SimulationCard with useful debugging Statements.\"\"\"\n return cls(\n Version(),\n Simulation(),\n map=Map(),\n nancheck=NaNCheck(),\n overwritecheck=OverwriteCheck(),\n )\n\n @classmethod\n def basic_template(cls):\n \"\"\"Returns a SimulationCard with only the required Statements\"\"\"\n return cls(Version(), Simulation())\n\n def _to_deck(self):\n \"\"\"Creates a string representation. If the :attr:`doc` where specified,\n a small description is printed in comments\n \"\"\"\n head = \"*** Control Cards\\n\"\n v_ = (\n (str(param), \"! {}\".format(param.doc))\n for param in self.__dict__.values()\n if hasattr(param, \"doc\")\n )\n statements = tabulate.tabulate(v_, tablefmt=\"plain\", numalign=\"left\")\n return str(head) + str(statements)\n\n def set_statement(self, statement):\n self.__setattr__(statement.__class__.__name__.lower(), statement)\n\n\n__statements__ = [\"\"]\n\n\nclass Deck(object):\n \"\"\"\"\"\"\n\n def __init__(self, name, control_card):\n self.models = ComponentCollection()\n self.control_card = control_card\n self.name = name\n\n @classmethod\n def _from_deckfile(cls, file, proforma_root=None):\n file = Path(file)\n with open(file) as dcklines:\n dck = cls(name=file.basename, control_card=None)\n cc = ControlCards()\n dck._control_card = cc\n line = dcklines.readline()\n while line:\n # at each line check for a match with a regex\n key, match = dck._parse_line(line)\n\n if key == \"version\":\n version = match.group(\"version\")\n v_ = Version.from_string(version.strip())\n cc.set_statement(v_)\n\n # identify a ConstantCollection\n if key == \"constants\":\n n_cnts = match.group(key)\n cb = ConstantCollection()\n for n in range(int(n_cnts)):\n line = next(dcklines)\n cb.update(Constant.from_expression(line))\n cc.set_statement(cb)\n\n if key == \"simulation\":\n sss = match.group(key)\n s_ = Simulation(*map(Constant, sss.split()))\n repr(s_.start)\n cc.set_statement(s_)\n\n if key == \"tolerances\":\n sss = match.group(key)\n t_ = Tolerances(*(map(float, map(str.strip, sss.split()))))\n cc.set_statement(t_)\n\n if key == \"limits\":\n sss = match.group(key)\n l_ = Limits(*(map(int, map(str.strip, sss.split()))))\n cc.set_statement(l_)\n\n if key == \"dfq\":\n k = match.group(key)\n cc.set_statement(DFQ(k.strip()))\n\n if key == \"width\":\n w = match.group(key)\n # todo: Implement Width\n\n if key == \"list\":\n k = match.group(key)\n cc.set_statement(List(*k.strip().split()))\n\n if key == \"solver\":\n k = match.group(key)\n cc.set_statement(Solver(*k.strip().split()))\n\n if key == \"nancheck\":\n k = match.group(key)\n cc.set_statement(NaNCheck(*k.strip().split()))\n\n if key == \"overwritecheck\":\n k = match.group(key)\n cc.set_statement(OverwriteCheck(*k.strip().split()))\n\n if key == \"timereport\":\n k = match.group(key)\n cc.set_statement(TimeReport(*k.strip().split()))\n\n if key == \"eqsolver\":\n k = match.group(key)\n cc.set_statement(EqSolver(*k.strip().split()))\n\n if key == \"userconstants\":\n line = dcklines.readline()\n key, match = dck._parse_line(line)\n\n # identify an equation block (EquationCollection)\n if key == \"equations\":\n # extract number of line, number of equations\n n_equations = match.group(\"equations\")\n line = dcklines.readline()\n # read each line of the table until a blank line\n ec = EquationCollection()\n for n in range(int(n_equations)):\n # extract number and value\n value = line.strip()\n # create equation\n eq = Equation.from_expression(value)\n ec.update(eq)\n\n line = dcklines.readline() # go to next line\n # append the dictionary to the data list\n dck.append_model(ec)\n\n # read studio markup\n if key == \"unitnumber\":\n unit_number = match.group(key)\n ec._unit = int(unit_number)\n if key == \"unitname\":\n unit_name = match.group(key)\n ec.name = unit_name\n if key == \"layer\":\n layer = match.group(key)\n ec.change_component_layer(layer)\n if key == \"position\":\n pos = match.group(key)\n ec.set_canvas_position(map(float, pos.strip().split()))\n\n # identify a unit (TrnsysModel)\n if key == \"unit\":\n models = []\n # extract unit_number, type_number and name\n u = match.group(\"unitnumber\").strip()\n t = match.group(\"typenumber\").strip()\n n = match.group(\"name\").strip()\n\n _meta = MetaData(type=t)\n model = TrnsysModel(_meta, name=n)\n model._unit = int(u)\n line = dcklines.readline()\n\n # read studio markup\n for n in range(4):\n key, match = dck._parse_line(line)\n if key == \"unitname\":\n unit_name = match.group(key)\n model.name = unit_name\n if key == \"layer\":\n layer = match.group(key)\n model.change_component_layer(layer)\n if key == \"position\":\n pos = match.group(key)\n model.set_canvas_position(\n map(float, pos.strip().split()), True\n )\n if key == \"model\":\n _mod = match.group(\"model\")\n xml = Path(_mod.replace(\"\\\\\", \"/\"))\n xml_basename = xml.basename()\n try:\n new_meta = MetaData.from_xml(xml)\n except:\n # replace extension with \".xml\" and retry\n xml_basename = xml_basename.stripext() + \".xml\"\n proforma_root = Path(proforma_root)\n if proforma_root is None:\n proforma_root = Path.getcwd()\n xmls = proforma_root.glob(\"*.xml\")\n xml = next(\n (x for x in xmls if x.basename() == xml_basename),\n None,\n )\n if not xml:\n msg = (\n \"The proforma {} could not be found \"\n \"\"\n \"\"\n \"\"\n \"\"\n \"\"\n \"\"\n \"\"\n \"at\"\n ' \"{}\"'.format(xml_basename, proforma_root)\n )\n lg.warning(msg)\n break\n new_meta = MetaData.from_xml(xml)\n model.update_meta(new_meta)\n line = dcklines.readline()\n\n dck.append_model(model)\n\n # identify linkstyles\n if key == \"link\":\n # identify u,v unit numbers\n u, v = match.group(key).strip().split(\":\")\n\n line = dcklines.readline()\n key, match = dck._parse_line(line)\n\n # identify linkstyle attributes\n if key == \"linkstyle\":\n _lns = match.groupdict()\n path = _lns[\"path\"].strip().split(\":\")\n\n mapping = AnchorPoint(\n dck.models[int(u)]\n ).studio_anchor_reverse_mapping\n\n def find_closest(mappinglist, coordinate):\n def distance(a, b):\n a_ = Point(a)\n b_ = Point(b)\n return a_.distance(b_)\n\n return min(\n mappinglist, key=lambda x: distance(x, coordinate)\n )\n\n u_coords = (int(_lns[\"u1\"]), int(_lns[\"u2\"]))\n v_coords = (int(_lns[\"v1\"]), int(_lns[\"v2\"]))\n loc = (\n mapping[find_closest(mapping.keys(), u_coords)],\n mapping[find_closest(mapping.keys(), v_coords)],\n )\n color = get_rgb_from_int(int(_lns[\"color\"]))\n linestyle = _studio_to_linestyle(int(_lns[\"linestyle\"]))\n linewidth = int(_lns[\"linewidth\"])\n\n path = LineString([list(map(int, p.split(\",\"))) for p in path])\n\n try:\n dck.models[int(u)].set_link_style(\n dck.models[int(v)],\n loc,\n color,\n linestyle,\n linewidth,\n path,\n )\n except:\n pass\n\n line = dcklines.readline()\n\n # assert missing types\n # todo: list types that could not be parsed\n return dck\n\n def _parse_line(self, line):\n \"\"\"\n Do a regex search against all defined regexes and\n return the key and match result of the first matching regex\n\n \"\"\"\n\n for key, rx in self._setup_re().items():\n match = rx.search(line)\n if match:\n return key, match\n # if there are no matches\n return None, None\n\n def _setup_re(self):\n # set up regular expressions\n # use https://regexper.com to visualise these if required\n rx_dict = {\n \"version\": re.compile(\n r\"(?i)(?P^version)(?P.*?)(?=(?:!|\\\\n|$))\"\n ),\n \"constants\": re.compile(\n r\"(?i)(?P^constants)(?P.*?)(?=(?:!|\\\\n|$))\"\n ),\n \"simulation\": re.compile(\n r\"(?i)(?P^simulation)(\" r\"?P.*?)(?=(?:!|$))\"\n ),\n \"tolerances\": re.compile(\n r\"(?i)(?P^tolerances)(\" r\"?P.*?)(?=(\" r\"?:!|$))\"\n ),\n \"limits\": re.compile(r\"(?i)(?P^limits)(?P.*?)(?=(\" r\"?:!|$))\"),\n \"dfq\": re.compile(r\"(?i)(?P^dfq)(?P.*?)(?=(?:!|$))\"),\n \"width\": re.compile(r\"(?i)(?P^width)(?P.*?)(?=(\" r\"?:!|$))\"),\n \"list\": re.compile(r\"(?i)(?P^list)(?P.*?)(?=(\" r\"?:!|$))\"),\n \"solver\": re.compile(r\"(?i)(?P^solver)(?P.*?)(?=(\" r\"?:!|$))\"),\n \"nancheck\": re.compile(\n r\"(?i)(?P^nan_check)(?P.*?)(?=(\" r\"?:!|$))\"\n ),\n \"overwritecheck\": re.compile(\n r\"(?i)(?P^overwrite_check)(?P.*?)(?=(\" r\"?:!|$))\"\n ),\n \"timereport\": re.compile(\n r\"(?i)(?P^time_report)(?P.*?)(?=(\" r\"?:!|$))\"\n ),\n \"eqsolver\": re.compile(\n r\"(?i)(?P^eqsolver)(?P.*?)(?=(\" r\"?:!|$))\"\n ),\n \"equations\": re.compile(\n r\"(?i)(?P^equations)(?P.*?)(?=(?:!|$))\"\n ),\n \"unitnumber\": re.compile(\n r\"(?i)(?P^\\*\\$unit_number)(\" r\"?P.*?)(?=(?:!|$))\"\n ),\n \"unitname\": re.compile(\n r\"(?i)(?P^\\*\\$unit_name)(?P.*?)(?=(?:!|$))\"\n ),\n \"layer\": re.compile(r\"(?i)(?P^\\*\\$layer)(?P.*?)(?=(?:!|$))\"),\n \"position\": re.compile(\n r\"(?i)(?P^\\*\\$position)(?P.*?)(?=(?:!|$))\"\n ),\n \"unit\": re.compile(\n r\"(?i)(^unit)(?P.*?)(type)(?P.*\\s)(\"\n r\"?P\\s.*?)(\"\n r\"?=(?:!|$))\"\n ),\n \"model\": re.compile(\n r\"(?i)(?P^\\*\\$model)(?P.*?)(?=(\" r\"?:!|$))\"\n ),\n \"link\": re.compile(r\"(?i)(^\\*!link\\s)(?P.*?)(?=(?:!|$))\"),\n \"linkstyle\": re.compile(\n r\"(?i)(?:^\\*!connection_set )(?P.*?):(?P.*?):(\"\n r\"?P.*?):(?P.*?):(?P.*?):(?P.*?):(\"\n r\"?P.*?):(?P.*?):(?P.*?):(\"\n r\"?P.*?$)\"\n ),\n \"userconstants\": re.compile(\n r\"(?i)(?P^\\*\\$user_constants)(\" r\"?=(?:!|$))\"\n ),\n }\n return rx_dict\n\n def append_model(self, model):\n self.models.update(model)\n","sub_path":"pyTrnsysType/input_file.py","file_name":"input_file.py","file_ext":"py","file_size_in_byte":46028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"563722510","text":"from django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom payments.models import Invoice, RazorpayKeys\nfrom payments.razorpay.razorpay_payments import RazorpayPayments\nfrom payments.models import Payment, Order\n\nimport json\n\n@csrf_exempt\ndef webhook(request):\n if request.method == 'POST':\n keys = RazorpayKeys.objects.first()\n payment = RazorpayPayments(keys.api_key, keys.api_secret)\n data = json.loads(request.body)\n if 'payload' not in data or 'invoice' not in data['payload']:\n return JsonResponse({\"message\": \"Invalid Data\"})\n\n invoice_entity = data['payload']['invoice']['entity']\n order_entity = data['payload']['order']['entity']\n payment_entity = data['payload']['payment']['entity']\n\n invoice = Invoice.objects.get(invoice_id=invoice_entity['id'])\n invoice.status = invoice_entity['status']\n invoice.save()\n payment.save_payment(payment_entity)\n payment.save_order(order_entity)\n return JsonResponse({\"message\": \"Success\"})\n\n return JsonResponse({\"message\": \"Method Not Allowed\"})\n\n\ndef sync(request):\n keys = RazorpayKeys.objects.first()\n payment = RazorpayPayments(keys.api_key, keys.api_secret)\n invoices = Invoice.objects.all()\n for invoice in invoices:\n invoice_details = payment.fetch_invoices(invoice.invoice_id)\n invoice.status = invoice_details['status']\n invoice.save()\n if invoice.status == 'paid':\n orders = Order.objects.filter(order_id=invoice_details['order_id'])\n if len(orders) == 0:\n order_details = payment.fetch_orders(\n invoice_details['order_id'])\n payment.save_order(order_details)\n if invoice_details['payment_id']:\n payments = Payment.objects.filter(payment_id=invoice_details['payment_id'])\n if len(payments) == 0:\n payment_details = payment.fetch_payment(invoice_details['payment_id'])\n payment.save_payment(payment_details)\n\n return JsonResponse({\"message\": \"synced\"})","sub_path":"payments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"579888643","text":"from datetime import datetime\n\nfrom django import forms\nfrom django.utils import timezone\nfrom django.core.files.images import get_image_dimensions\n\nfrom eventix.models import Event, Location, Seat\n\n\nclass EventForm(forms.ModelForm):\n location = forms.ModelChoiceField(queryset=Location.objects.all())\n event_date = forms.DateField(widget=forms.widgets.DateInput(attrs={'type': 'date'}))\n\n class Meta:\n model = Event\n fields = ('title', 'content', 'event_date', 'location', 'poster')\n\n def clean_event_date(self):\n event_date = self.cleaned_data.get('event_date')\n if event_date < datetime.date(timezone.now()):\n raise forms.ValidationError(\"You cannot set an event in the past!\")\n return event_date\n\n def clean(self):\n location = self.cleaned_data.get('location')\n event_date = self.cleaned_data.get('event_date')\n for event in Event.objects.filter(location=location).exclude(pk=self.instance.pk):\n if event.event_date == event_date:\n raise forms.ValidationError(\"This location is already reserved for another event on that date!\")\n return super().clean()\n\n def clean_poster(self):\n poster = self.cleaned_data.get('poster')\n if poster == None:\n return poster\n width, height = get_image_dimensions(poster)\n if width < 500 or height < 550:\n raise forms.ValidationError(\"Minimum poster resolution should be 500x550px\")\n return poster\n\n\nclass SeatForm(forms.ModelForm):\n class Meta:\n model = Seat\n fields = ('position', 'price', 'special_seat')\n\n def clean(self):\n position = self.cleaned_data.get('position')\n location = self.cleaned_data.get('location')\n for seat in Seat.objects.fitler(location=location):\n if position == seat.position:\n raise forms.ValidationError(\"There is already a seat with this number!\")\n return super().clean()\n","sub_path":"proiectul_colectiv/eventix/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"227482725","text":"from twisted.plugin import IPlugin\nfrom heufybot.moduleinterface import BotModule, IBotModule\nfrom zope.interface import implements\n\n\nclass IRCv3ChgHost(BotModule):\n implements(IPlugin, IBotModule)\n\n name = \"ChgHost\"\n capName = \"chghost\"\n core = True\n\n def actions(self):\n return [ (\"listcaps\", 1, self.addToCapList),\n (\"pre-handlecommand-CHGHOST\", 1, self.handleChgHost),\n (\"caps-acknowledged\", 1, self.finishHandler) ]\n\n def addToCapList(self, server, caps):\n caps.append(self.capName)\n\n def handleChgHost(self, server, nick, ident, host, params):\n if not self.bot.moduleHandler.runActionUntilTrue(\"has-cap-enabled\", server, self.capName):\n return False\n\n if nick not in self.bot.servers[server].users:\n self.bot.log.warn(\"[{server}] Received CHGHOST message for unknown user {nick}\", server=server, nick=nick)\n return False\n\n user = self.bot.servers[server].users[nick]\n user.ident = params[0]\n user.host = params[1]\n\n return False\n\n def finishHandler(self, server, caps):\n if self.capName in caps:\n self.bot.moduleHandler.runGenericAction(\"cap-handler-finished\", server, self.capName)\n\n\nchghost = IRCv3ChgHost()\n","sub_path":"heufybot/modules/ircv3/chghost.py","file_name":"chghost.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"113339159","text":"#this is learned from MOFAN, LINK: https://morvanzhou.github.io/\n##About the basic training of the data, y=ax+b\n\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef add_layer(inputs,in_size,out_size,activation_function=None):\n #Matrix\n Weights=tf.Variable(tf.random_normal([in_size,out_size]))\n #vector\n biases=tf.Variable(tf.zeros([1,out_size])+0.1)\n Wx_plus_b=tf.matmul(inputs,Weights)+biases\n if activation_function is None:\n outputs=Wx_plus_b\n else:\n outputs=activation_function(Wx_plus_b)\n return outputs\n\nx_data=np.linspace(-1,1,300)[:,np.newaxis]\nnoise=np.random.normal(0,0.05,x_data.shape)\ny_data=np.square(1-x_data*x_data)+noise\n\n\nxs=tf.placeholder(tf.float32,[None,1])\nys=tf.placeholder(tf.float32,[None,1])\n\nl1=add_layer(xs,1,10,activation_function=tf.nn.relu)\nl2=add_layer(l1,10,20,activation_function=tf.nn.relu)\nl3=add_layer(l2,20,10,activation_function=tf.nn.relu)\nprediction=add_layer(l3,10,1,activation_function=None)\n\nloss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),\n reduction_indices=[1]))\ntrain_step=tf.train.AdamOptimizer(0.01).minimize(loss)\n\n#initialize the Variable\ninit=tf.global_variables_initializer()\n\nsess=tf.Session()\nsess.run(init)\n\n#create plt\nfig=plt.figure()\nax=fig.add_subplot(1,1,1)\nax.scatter(x_data,y_data)\n#make the plt show not chock\nplt.ion()\nplt.show()\n\nfor i in range(1000):\n sess.run(train_step,feed_dict={xs:x_data,ys:y_data})\n if i%10==1:\n print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))\n prediction_value=sess.run(prediction,feed_dict={xs:x_data})\n try:\n #remove old and can create new\n ax.lines.remove(lines[0])\n except Exception:\n pass\n #red line,width=5\n lines=ax.plot(x_data,prediction_value,'r-',lw=1)\n plt.pause(0.01)\n","sub_path":"mf_samples/tf_mf1_2017_11_1_plt_Visualization4.py","file_name":"tf_mf1_2017_11_1_plt_Visualization4.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"510231949","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fd:\n long_desc = fd.read()\n\n\nsetuptools.setup(\n name=\"dataiku-plugin-tests-utils\",\n version=\"0.0.1\",\n description=\"The common tooling needed for each plugin\",\n author=\"Dataiku\",\n long_description=long_desc,\n long_description_content_type=\"text/markdown\",\n url=\"https://www.dataiku.com\",\n packages=setuptools.find_packages(),\n entry_points={\"pytest11\": [\"pytest_plugin = dku_plugin_test_utils.pytest_plugin.plugin\"]},\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Software Development :: Libraries',\n 'Programming Language :: Python',\n 'Operating System :: OS Independent'\n ],\n python_requires='>=2.7',\n install_requires=[\n \"dataiku-api-client\",\n \"allure-pytest==2.8.22\"\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"558178704","text":"from torchvision.datasets import VOCDetection, VOCSegmentation\nimport torch\nimport torchvision.transforms as T\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport random\n\ntrain_data_dir = '/home/jrlees/datasets/VOC2012/'\ntrain_anno_dir = '/home/jrlees/datasets/VOC2012/VOCdevkit/VOC2012/Annotations/'\ntrain_img_dir = '/home/jrlees/datasets/VOC2012/VOCdevkit/VOC2012/JPEGImages/'\n\ndatas_train = VOCDetection(root=train_data_dir, image_set='train', download=False,\n transform=T.Compose([ T.RandomHorizontalFlip(0.5), T.Resize((448, 448)), T.ToTensor()]))\nmasks_train = VOCSegmentation(root=train_data_dir, image_set='train', download=False,\n transform=T.Compose([T.Resize((448, 448)), T.ToTensor(),\n T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n )\ndatas_val = VOCDetection(root=train_data_dir, image_set='val', download=False,\n transform=T.Compose([T.Resize((448, 448)), T.ToTensor()]))\nmasks_val = VOCSegmentation(root=train_data_dir, image_set='val', download=False,\n transform=T.Compose([T.Resize((448, 448)), T.ToTensor(),\n T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]))\nobject_index = ['person', 'bird', 'cat', 'cow', 'dog', 'horse', 'sheep', 'aeroplane', 'bicycle', 'boat', 'bus',\n 'motorbike', 'car', 'train', 'bottle', 'chair', 'diningtable', 'pottedplant', 'sofa', 'tvmonitor']\n\n\nclass VOCDataset(Dataset):\n def __init__(self, image_set='train'):\n if image_set == 'train':\n self.data = datas_train\n self.mask = masks_train\n else:\n self.data = datas_val\n self.mask = masks_val\n\n def __getitem__(self, item):\n images, target = self.data[item]\n name = target['annotation']['filename']\n bboxs = []\n object_class = []\n objects = target['annotation']['object']\n import os\n img = Image.open(os.path.join(train_img_dir, name))\n h, w = img.size\n raitox, ratioy = 448 / h, 448 / w\n if not isinstance(objects, list):\n objects = [objects]\n for object in objects:\n bbox = object['bndbox']\n bbox = [eval(bbox['xmin']) * raitox, eval(bbox['ymin']) * ratioy,\n eval(bbox['xmax']) * raitox, eval(bbox['ymax']) * ratioy]\n bboxs.append(bbox)\n object_class.append(object_index.index(object['name']))\n # print(name)\n # name = [name]\n # print(type(images))\n # print(type(object_class))\n # print(len(bboxs))\n return images, np.array(bboxs), np.array(object_class), name, np.array([raitox, ratioy], dtype=np.float32)\n\n def __len__(self):\n return len(self.data)\n\n def collate_fn(self, batch):\n images, bboxes, object_classes, name, ratios = list(zip(*batch))\n images = torch.stack([img for img in images], dim=0)\n names = [na for na in name]\n idxs = []\n bbox = []\n ratioss = []\n for i, batch_box in enumerate(bboxes):\n for _ in batch_box:\n idxs.append(i)\n ratioss.append(ratios[i])\n # print('bbox', batch_box.shape)\n # print('ocls', batch_object_class.shape)\n bbox.append(batch_box)\n # print(object_classes)\n ocl = np.vstack([ocls.reshape(-1, 1) for ocls in object_classes])\n # print(bbox)\n bbox = np.vstack(bbox)\n bbox = np.hstack([bbox, np.array(idxs).reshape(-1, 1)])\n ocls = np.hstack([ocl.reshape(-1, 1), np.array(idxs).reshape(-1, 1)])\n return images, bbox.astype(np.float32), ocls, names, ratioss\n\n\n\n","sub_path":"datasets/voc2012_loader.py","file_name":"voc2012_loader.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"138069513","text":"import os\nimport string, random\nimport datetime\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.template.loader import render_to_string\nfrom django.utils.encoding import iri_to_uri\n\n\nfrom .. import links_left\nfrom ...tools.ms1 import file_manager\nfrom .input_form import NtaInputs\nfrom ...app.ms1.nta_task import run_nta_dask\n\n# hard-coded example file names for testing found in nta_app/input/ms1/\nexample_pos_filename = 'pooled_blood_pos_MPP.csv'\nexample_neg_filename = 'pooled_blood_neg_MPP.csv'\nexample_tracer_filename = 'pooled_blood_tracers.csv'\nexample_run_sequence_pos_filename = 'pooled_blood_run_sequence_pos.csv'\nexample_run_sequence_neg_filename = 'pooled_blood_run_sequence_neg.csv'\n\ndef input_page(request, form_data=None, form_files=None):\n\n model = 'ms1'\n header = \"Run NTA MS1 Tool\"\n page = 'run_model'\n\n # generate a timestamp with the current time and date\n current_datetime = datetime.datetime.now()\n\n # define inputParameters dictionary containing all the parameters and their attributes, labels, and initial values\n inputParameters = {'project_name': ['Project Name', None],\n 'datetime': ['Date & Time', str(current_datetime)],\n 'test_files': ['Run test files only (debugging)', None],\n 'pos_input': ['Positive MPP file (csv)', None],\n 'neg_input': ['Negative MPP file (csv)', None],\n 'mass_accuracy_units': ['Adduct mass accuracy units', None],\n 'mass_accuracy': ['Adduct mass accuracy', None],\n 'rt_accuracy': ['Adduct retention time accuracy (mins)', None],\n 'run_sequence_pos_file': ['Run sequence positive mode file (csv; optional)', None],\n 'run_sequence_neg_file': ['Run sequence negative mode file (csv; optional)', None],\n 'tracer_input': ['Tracer file (csv; optional)', None],\n 'mass_accuracy_units_tr': ['Tracer mass accuracy units', None],\n 'mass_accuracy_tr': ['Tracer mass accuracy', None],\n 'rt_accuracy_tr': ['Tracer retention time accuracy (mins)', None],\n 'min_replicate_hits': ['Min replicate hits', None],\n 'min_replicate_hits_blanks': ['Min replicate hits in blanks', None],\n 'max_replicate_cv': ['Max replicate CV', None],\n 'parent_ion_mass_accuracy': ['Parent ion mass accuracy (ppm)', None],\n 'minimum_rt': ['Discard features below this retention time (mins)', None],\n 'search_dsstox': ['Search DSSTox for possible structures', None],\n 'search_hcd': ['Search Hazard Comparison Dashboard for toxicity data', None],\n 'search_mode': ['Search dashboard by', None],\n 'top_result_only': ['Save top result only?', None],\n 'api_batch_size': ['DSSTox search batch size (debugging)', None]\n }\n print(\"input_page: inputParameters: {} \".format(inputParameters))\n\n if (request.method == \"POST\"):\n print(\"POST: {}\".format(request.POST))\n\n # the form data is sent as a combination of two types of data: POST data and FILES data. The \n # POST data contains the form fields' values, while the FILES data contains any uploaded files.In \n # order to handle both types of data, you need to pass them to the form's constructor. Django \n # provides a convenient way to do this by using the request.POST and request.FILES dictionaries. \n # By passing these dictionaries as parameters to the form's constructor, Django automatically \n # populates the form fields with the submitted data.\n form = NtaInputs(request.POST, request.FILES)\n\n # if input 'test_files' is 'no', then the user has not selected to run the test files and at \n # least one input file is required\n if request.POST['test_files'] == 'no':\n form.fields['pos_input'].required = True\n form.fields['neg_input'].required = True\n if 'pos_input' in request.FILES.keys():\n # since the 'pos_input' file is present, the 'neg_input' file is not required\n form.fields['neg_input'].required = False\n if 'neg_input' in request.FILES.keys():\n # since the 'neg_input' file is present, the 'pos_input' file is not required\n form.fields['pos_input'].required = False\n\n if (form.is_valid()):\n print(\"form is valid\")\n\n # get parameters from the Request object. Note that the parameters are in the form of a QueryDict.\n parameters = request.POST\n print(\"1. parameters: {}\".format(parameters))\n parameters = parameters.dict()\n print(\"2. parameters: {}\".format(parameters))\n\n # get the uploaded files from the Request object. Note that the files are in the form of a\n # MultiValueDict. The MultiValueDict is a subclass of the standard Python dictionary that\n # provides a multiple values for the same key. This is necessary because some HTML form elements,\n # such as \n \"\"\"\n messageLabel = \"\"\"\n \n \"\"\"\n textArea = ''\n\n submit = ''\n form = ('
' + rotLable + rotInput + '
' +\n messageLabel + textArea + '
' + submit + '
')\n\n header = '

Web Caesar

'\n\n return header + form\n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n\n content = build_page(\"\")\n self.response.write(content)\n\n def post(self):\n\n message = self.request.get(\"message\")\n rot = int(self.request.get(\"rot\"))\n encryptedMessage = caesar.encrypt(message, rot)\n escapedMessage = cgi.escape(encryptedMessage)\n content = build_page(escapedMessage)\n self.response.write(content)\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler)\n], debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"645869405","text":"import webapp2\nimport server\nimport json\n\nimport unittest\nfrom google.appengine.ext import testbed\n\nclass AppEngineTest(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n self.INVALID_ID = \"9999999999999\"\n super(AppEngineTest, self).__init__(*args)\n\n def setUp(self):\n super(AppEngineTest, self).setUp()\n self.endpoint = ''\n self.method = 'GET'\n self.response = ''\n self.params = {}\n self.response_data = ''\n\n # First, create an instance of the Testbed class.\n self.testbed = testbed.Testbed()\n # Then activate the testbed, which prepares the service stubs for use.\n self.testbed.activate()\n # Next, declare which service stubs you want to use.\n self.testbed.init_datastore_v3_stub()\n self.testbed.init_memcache_stub()\n\n def tearDown(self):\n self.testbed.deactivate()\n\n # Note, currently doesn't support requests other than POST and GET\n def send(self):\n if self.method == 'POST':\n request = webapp2.Request.blank(self.endpoint, POST=self.params)\n elif self.method == \"GET\":\n endpoint_with_params = self.endpoint + \"?\"\n prefix = \"\"\n for key,value in self.params.items():\n endpoint_with_params += prefix + key + \"=\" + value\n prefix = \"&\"\n request = webapp2.Request.blank(endpoint_with_params)\n #PUT or DELETE requests (handle params like POST requests)\n else:\n request = webapp2.Request.blank(self.endpoint, POST=self.params)\n request.method = self.method\n\n self.response = request.get_response(server.app)\n\n try:\n self.response_data = json.loads(self.response.body)\n except ValueError:\n self.response_data = {}\n\n def expect_resp_code(self, code):\n self.assertEqual(self.response.status_int, code)\n\n def expect_resp_param(self, name, value=None):\n if value is None:\n self.assertIsNotNone(self.response_data[name])\n else:\n self.assertEqual(self.response_data[name], value)\n\n #Checks to see if the response matches the provided json template\n def expect_resp_match_template(self, template):\n for key,value in template.items():\n if value and isinstance(value, basestring):\n self.assertEqual(value, self.response_data[key])\n else:\n self.assertIsNotNone(self.response_data[key])\n\n def expect_resp_conforms(self, contract):\n self.check_contract_conforms(contract, self.response_data)\n\n def check_contract_conforms(self, contract, data):\n for key, value in contract.items():\n # recursively follow subdirectories, contract follows down a level as well\n if isinstance(value, dict):\n try:\n self.check_contract_conforms(value, data[key])\n except KeyError:\n self.check_partial_for_requires(value)\n elif value == \"+\" and isinstance(value, basestring):\n if isinstance(data, list):\n for list_item in data:\n self.check_contract_conforms(contract, list_item)\n else:\n try:\n data_value = data[key]\n except KeyError:\n self.assertTrue(False, \"contract key '\" + key + \"' was not found\")\n self.assertIsNotNone(data_value, key + \" is None and it is required\")\n\n elif value == \"*\" and isinstance(value, basestring):\n pass\n elif value == \"!\" and isinstance(value, basestring):\n if isinstance(data, list):\n for list_item in data:\n self.check_contract_conforms(contract, list_item)\n else:\n try:\n val = data[key]\n self.assertTrue(False, \"value was included when contract excluded it\")\n except KeyError:\n continue\n else:\n if isinstance(data, list):\n for list_item in data:\n self.check_contract_conforms(contract, list_item)\n else:\n try:\n data_value = data[key]\n except KeyError:\n self.assertTrue(False, \"contract key '\" + key + \"' was not found\")\n self.assertEqual(data_value, value)\n\n def check_partial_for_requires(self, partial):\n for key, value in partial.items():\n if isinstance(value, dict):\n self.check_partial_for_requires(value)\n elif isinstance(value, list):\n for list_item in value:\n self.check_partial_for_requires(list_item)\n else:\n self.assertEqual(value, \"*\", \"If data doesn't have a key for this nested element, \"\n \"all fields must be wildcard allowed\")\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":5116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"140757952","text":"import sys\nimport random\nimport time\nimport copy\nimport numpy as np\nfrom collections import OrderedDict\nfrom agent_zoo.weight_writer import weight_writer\n\nfrom agent_zoo.Eval import Eval\n\nNAME = 'B5'\nHIGH = 1.5\nLOW = .5\nSEED = 12\nMAX_GEN = 100\nMAX_POPULATION = 20\nCLONE_RATE = .1\nCLONES = int(.05 * MAX_POPULATION)\nMAX_FRAME = 200 # how many frames is the robot simulated for\n\n# #B3\n# HIGH = 5\n# LOW = -5\n# SEED = 12\n# MAX_GEN = 1000\n# MAX_POPULATION = 30\n# CLONE_RATE = .05\n# CLONES = int(.5 * MAX_POPULATION)\n# MAX_FRAME = 50 # how many frames is the robot simulated for\n\nclass Individual(object):\n\n def __init__(self, svd_dic, genotype=None):\n self.svd_dic = svd_dic #a reference to all the precomputed SVDs\n if genotype is None:\n self.genotype = [ random.uniform(LOW, HIGH) for i in range(len(svd_dic))]\n else:\n self.genotype = genotype\n self.fitness = None\n\n def get_weights(self):\n weights = {}\n i = 0\n for layer in self.svd_dic:\n if self.svd_dic[layer][0] is True:\n U, s, V = copy.deepcopy(self.svd_dic[layer][1])\n s *= self.genotype[i]\n weights[layer] = np.matmul( U ,np.matmul(np.diag(s), V))\n else: # a bias layer so matrix multiplication is not necessary\n weights[layer] = self.genotype[i] * copy.deepcopy(self.svd_dic[layer][1])\n i += 1\n return weights\n\n def mate(self, partner):\n gt = []\n for i in range(len(self.genotype)):\n if random.randint(0,99) % 2 == 0:\n gt.append(self.genotype[i])\n else:\n gt.append(partner.genotype[i])\n return Individual(self.svd_dic, genotype=gt)\n\n# returns a list individuals that have been selected\ndef select_parents(population, selection_rate):\n total_fitness = 0\n for indiv in population:\n if indiv.fitness is not None:\n total_fitness += indiv.fitness\n selected = []\n\n # create a random list of indices\n order = [i for i in range(len(population))]\n random.shuffle(order)\n how_many = int(selection_rate * len(population))\n index = 0\n while len(selected) < how_many:\n indiv = population[order[index]]\n if (indiv.fitness / total_fitness) > random.random():\n selected.append(indiv)\n index += 1\n if index > (len(population) -1):\n index = 0\n\n return selected\n\ndef mutate(individual):\n index = random.randint(0, len(individual.genotype) -1)\n # individual.genotype[index] = random.uniform(LOW, HIGH) * individual.genotype[index]\n individual.genotype[index] = random.uniform(LOW, HIGH)\ndef clone(individuals):\n clones = []\n for indiv in individuals:\n for i in range(CLONES):\n clones.append(Individual(indiv.svd_dic, copy.deepcopy(indiv.genotype)))\n return clones\n\ndef main():\n start_time = time.process_time()\n random.seed(SEED)\n\n weightfile = 'RoboschoolAnt_v1_2017jul.weights'\n original = {}\n exec(open(weightfile).read(), original)\n layerNames = ['weights_dense1_w', 'weights_dense1_b', 'weights_dense2_w', 'weights_dense2_b', 'weights_final_w',\n 'weights_final_b']\n\n svd_dict = OrderedDict()\n for layer in layerNames:\n if len(original[layer].shape) == 2:\n U, s, V = np.linalg.svd( original[layer], full_matrices=False)\n svd_dict[layer] = True, (U, s, V)\n else:\n svd_dict[layer] = False, original[layer]\n\n\n\n\n #generate initial population\n population = [Individual(svd_dict) for i in range(MAX_POPULATION)]\n\n print('Starting evolution')\n # base_indiv_fitness = evaluate_individual(original)\n with open('Experiment{}_results.csv'.format(NAME), 'w') as writer_results:\n with open('logEvalb.csv', 'w') as logger:\n logger.write('time\\n')\n header = 'generation, run_time, avg_fitness, top_fitness'\n print(header)\n writer_results.write(header + '\\n')\n\n\n for generation in range(MAX_GEN):\n start = time.process_time()\n for indiv in population:\n if indiv.fitness is None:\n indiv.fitness = Eval().evaluate_individual(MAX_FRAME, indiv.get_weights(), logger)\n #select individuals for reproduction\n selected = select_parents(population, CLONE_RATE)\n #generate children\n children = clone(selected)\n\n for child in children:\n mutate(child)\n #evaluate children\n for child in children:\n if child.fitness is None:\n child.fitness = Eval().evaluate_individual(MAX_FRAME, child.get_weights(), logger)\n\n population.extend(children)\n\n population = sorted(population, key=lambda x: x.fitness, reverse=True)\n\n survivors_indices = [random.randint(3, len(population) -1) for i in range(MAX_POPULATION -1)]\n survivors = []\n survivors.append(population[0])\n survivors.append(population[1])\n survivors.append(population[2])\n for index in survivors_indices:\n survivors.append(population[index])\n\n population = survivors\n\n total_fitness = 0\n for indiv in population:\n total_fitness += indiv.fitness\n avg_fitness = total_fitness/len(population)\n run_time = time.process_time() - start\n result = '{}, {}, {}, {}'.format(generation, run_time, avg_fitness, population[0].fitness)\n print(result)\n writer_results.write(result +'\\n')\n for indiv in population:\n indiv.fitness = None\n\n with open('Elite_Individual_Experiment{}.weights'.format(NAME), 'w') as wrt:\n weight_writer(wrt, population[0].get_weights())\n\n total_time = time.process_time() - start_time\n print('RunTime: {}'.format(str(total_time)))\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"agent_zoo/ExperimentB.py","file_name":"ExperimentB.py","file_ext":"py","file_size_in_byte":6200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"586903310","text":"import random\nimport particle\n\n\ndef setup():\n global Balls\n Balls = []\n for i in range (1): \n Ball = particle.Particle()\n Balls.append(Ball)\n \n \n size(400,400)\n background(0)\n \n \ndef draw():\n background(0)\n stroke(255) \n global Balls\n for i in range(len(Balls)):\n Ball = Balls[i]\n Ball.draw()\n Ball.move()\n","sub_path":"Question_2/Question_2.pyde","file_name":"Question_2.pyde","file_ext":"pyde","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"245502106","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.9-x86_64/egg/islatu/cropping.py\n# Compiled at: 2020-04-21 07:21:09\n# Size of source mod 2**32: 2162 bytes\n\"\"\"\nOften the detector is a lot larger than the reflected intensity peak. \nTherefore, we crop the image down, these functions help with this.\n\"\"\"\nimport numpy as np\n\ndef crop_2d(array, x_start, x_end, y_start, y_end):\n \"\"\"\n Crop the data (`array`) with some given start and stop point.\n\n Args:\n array (np.ndarray): The intensity map collected by the 2\n dimensional detector.\n x_start (int): Start point in x-axis.\n x_end (int): End point in x-axis.\n y_start (int): Start point in y-axis.\n y_end (int): End point in y-axis.\n\n Returns:\n (np.ndarray): A cropped intensity map.\n \"\"\"\n cropped_array = array[x_start:x_end, y_start:y_end]\n return cropped_array\n\n\ndef crop_around_peak_2d(array, array_e=None, x_size=20, y_size=20):\n \"\"\"\n Crop the data (`array`) around the most intense peak, creating an array\n of dimensions [x_size, y_size].\n\n Args:\n array (np.ndarray): Intensity map collected by the 2\n dimensional detector.\n array_e (np.ndarray): Uncertainty map collected by the 2-D detector.\n x_size (int, optional): Size of the cropped image in x-axis. Defaults to 20.\n y_size (int, optional): Size of the cropped image in y-axis. Defaults to 20. \n\n Returns:\n (np.ndarray): A cropped intensity map.\n \"\"\"\n max_inten = np.unravel_index(np.argmax(array, axis=None), array.shape)\n half_x_size = int(x_size / 2)\n half_y_size = int(y_size / 2)\n cropped_array = crop_2d(array, max_inten[0] - half_x_size, max_inten[0] + half_x_size, max_inten[1] - half_y_size, max_inten[1] + half_y_size)\n if array_e is not None:\n cropped_array_error = crop_2d(array_e, max_inten[0] - half_x_size, max_inten[0] + half_x_size, max_inten[1] - half_y_size, max_inten[1] + half_y_size)\n return (\n cropped_array, cropped_array_error)\n return cropped_array","sub_path":"pycfiles/islatu-0.0.1-py3.7/cropping.cpython-37.py","file_name":"cropping.cpython-37.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"4491865","text":"# -*- coding: utf-8 -*-\n\nfile = open('in_2.txt')\n\nnames = sorted(list(map(lambda x: x.strip(),file.readlines()))[1:])\ntotal_score = 0\n\nfor i,name in enumerate(names):\n pos = i+1\n name_score = 0\n \n for c in name:\n name_score += 1+ ord(c.lower()) - ord('a')\n \n total_score += pos*name_score\n\nprint(total_score,end='')\n\nfile.close()\n\n\n\n","sub_path":"sessao_04/names/names_solver.py","file_name":"names_solver.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"578765488","text":"#!/usr/bin/env python2\n# XXX: Refactor to a comand line tool and remove pylint disable\n\"\"\"Add header row.\"\"\"\nimport argparse\nimport csv\n\nparser = argparse.ArgumentParser(description=\"Add header row\")\nparser.add_argument(\"in_file\", help=\"In-file\")\nparser.add_argument(\"out_file\", help=\"Out-file\")\nargs = parser.parse_args()\n\nwith open(args.out_file, \"wb\") as outcsv:\n writer = csv.writer(outcsv, delimiter=\"\\t\")\n writer.writerow([\"Gene\", \"Expression\"])\n\n with open(args.in_file, \"r+\") as incsv:\n reader = csv.reader(incsv, delimiter=\"\\t\")\n writer.writerows(row for row in reader)\n","sub_path":"resolwe_bio/tools/add_header.py","file_name":"add_header.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"91270440","text":"import numpy as np\nfrom bokeh.plotting import figure\nfrom bokeh.io import show\nfrom bokeh.models import ColumnDataSource, Panel\nfrom bokeh.transform import factor_cmap\n\n\ndef scatterPlot_tab(data_frame_nasa):\n # the data\n x = data_frame_nasa['duringTime']\n y = data_frame_nasa['month']\n index_cmap = factor_cmap('class_type_simple', palette=['red', 'blue', 'green', 'yellow'],\n factors=sorted(data_frame_nasa['class_type_simple'].unique()))\n # determine best fit line\n p = figure(plot_width=1200, plot_height=900, title=\"During Time for every month\", toolbar_location=None,\n tools=\"hover\")\n p.scatter('duringTime', 'month', source=data_frame_nasa, fill_color=index_cmap, fill_alpha=0.6, size=10,\n legend_field='class_type_simple')\n p.xaxis.axis_label = 'Minutes'\n p.yaxis.axis_label = 'Month'\n p.legend.location = \"top_left\"\n\n tab = Panel(child=p, title='scatter Plot')\n return tab\n","sub_path":"scripts/scatterPlot.py","file_name":"scatterPlot.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"931452","text":"# encoding: utf-8\n\n\"\"\"\n# @Time : 2019-09-25 09:45\n# @Author : Function\n# @FileName : adminx.py\n# @Software: PyCharm\n\"\"\"\nimport xadmin\nfrom .models import City,CourseOrg,Teacher\n\n\nclass CityAdmin:\n list_display = ['name', 'desc', 'add_time'] # 默认显示项\n search_fields = ['name', 'desc'] # 字段搜索\n list_filter = ['name', 'desc', 'add_time'] # 过滤器\n\nclass CourseOrgAdmin:\n list_display =['name','desc','click_num','fav_num','image','address','city','add_time']\n search_fields = ['name','desc','click_num','fav_num','image','address','city']\n list_filter = ['name','desc','click_num','fav_num','image','address','city']\n relfield_style = 'fk-ajax'\n\n\nclass TeacherAdmin:\n\n list_display =['org','name','work_years','work_company','work_position','points','click_num','fav_num','add_time']\n search_fields= ['org','name','work_years','work_company','work_position','points','click_num','fav_num']\n list_filter =['org','name','work_years','work_company','work_position','points','click_num','fav_num','add_time']\n\n\nxadmin.site.register(City,CityAdmin)\nxadmin.site.register(CourseOrg,CourseOrgAdmin)\nxadmin.site.register(Teacher,TeacherAdmin)\n\n","sub_path":"apps/organization/adminx.py","file_name":"adminx.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"632757708","text":"from pandas import DataFrame\n\n\nclass Report:\n \"\"\"\n :class:`Report` is a string representation of the performance matrix retrieved by :class:`Evaluation` methods.\n\n :param performance_matrix: `numpy.array` of shape `(n_instances, n_metrics)`. Performance matrix containing\n performance values for each set instance row-wise and each set performance metric column-wise.\n :param instances: `list` of instances been evaluated which is aligned with the performance_matrix on axis `0`.\n :param metrics: `list` of metric functions which is aligned with the performance_matrix on axis `1`.\n \"\"\"\n def __init__(self, performance_matrix, instances, metrics):\n if performance_matrix.shape[0] != len(instances):\n raise TypeError(\"`performance_matrix` (axis 0) is not aligned with the number of instances.\")\n if performance_matrix.shape[1] != len(metrics):\n raise TypeError(\"`performance_matrix` (axis 1) is not aligned with the number of metrics.\")\n\n self.records = {}\n self.instance_names = self.__generate_instance_names(instances)\n self.metric_names = [metric if type(metric) is str else metric.__name__ for metric in metrics]\n\n for i, pv in enumerate(performance_matrix):\n self.records[self.instance_names[i]] = pv\n\n @staticmethod\n def __generate_instance_names(instances):\n instance_names = []\n for instance in instances:\n if isinstance(instance, str):\n instance_names.append(instance)\n else:\n instance_names.append(type(instance).__name__)\n instance_names_ = list(instance_names)\n for i, instance_name in enumerate(instance_names):\n if instance_names_.count(instance_name) > 1:\n instance_names[i] = \"{:<35}\".format(instance_name + \" [\" + str(i) + \"]\")\n else:\n instance_names[i] = \"{:<35}\".format(instance_name)\n return instance_names\n\n def __str__(self):\n return str(DataFrame.from_dict(data=self.records, orient='index', columns=self.metric_names))\n","sub_path":"pusion/model/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"505160171","text":"\"\"\"Set up and run dustybox calculations.\n\nCheck accuracy by changing C_force.\n\nNeed to set the following variables:\n SIMULATION\n The simulation type; here it is 'dustybox'.\n PARAMETERS\n The parameters dictionary of dictionaries for each run.\n RUN_DIRECTORY\n The path to the directory to store the runs.\n PATCH_FILE\n An optional Phantom patch file.\n\nThe PARAMETERS variable is a dictionary of parameter dictionaries.\n\nThe dictionary is as follows:\n {\n 'name_of_run1': parameters1,\n 'name_of_run2': parameters2,\n ...\n }\n\nThe 'parameters' dictionary has keys with the name of the run, which\nwill be the name of its directory, and the values are the parameters\ndictionaries for that run.\n\nEach dictionary for each run needs the following keys:\n\n 'prefix'\n 'length_unit'\n 'mass_unit'\n 'time_unit'\n 'sound_speed'\n 'box_width'\n 'lattice'\n 'number_of_particles_in_x_gas'\n 'number_of_particles_in_x_dust'\n 'density_gas'\n 'dust_to_gas_ratio'\n 'drag_method'\n 'grain_size'\n 'grain_density'\n 'velocity_delta'\n 'maximum_time'\n 'number_of_dumps'\n\nOptional parameters:\n\n 'C_force'\n\nAll float or ndarray variables can have units.\n\nThe length of 'dust_to_gas_ratio', 'grain_size', and 'velocity_delta'\nshould be the same, i.e. the number of dust species.\n\"\"\"\n\nimport copy\nimport sys\nimport pathlib\n\npath = pathlib.Path(__file__).parent / '..' / 'modules'\nsys.path.insert(0, str(path))\n\nfrom multigrain import run_script\nfrom multigrain.config import UNITS\n\n# Variables to set\nSIMULATION = None\nPARAMETERS = None\nRUN_DIRECTORY = None\nPATCH_FILE = None\n\n# ------------------------------------------------------------------------------------ #\n# MAKE CHANGES BELOW AS REQUIRED\n\nSIMULATION = 'dustybox'\nRUN_DIRECTORY = '~/runs/multigrain/dustybox/accuracy'\nPATCH_FILE = (\n pathlib.Path(__file__).resolve().parent.parent\n / 'patches'\n / 'phantom-666da9e8-dustybox.patch'\n)\n\n# Dictionary of parameters common to all runs.\n_parameters = {\n 'prefix': 'dustybox',\n 'length_unit': 1.0 * UNITS['au'],\n 'mass_unit': 1.99e33 * UNITS['g'],\n 'time_unit': 1.0 * UNITS['year'],\n 'sound_speed': 0.2e5 * UNITS['cm/s'],\n 'box_width': 1.0 * UNITS['au'],\n 'lattice': 'close packed',\n 'number_of_particles_in_x_gas': 32,\n 'number_of_particles_in_x_dust': 32,\n 'density_gas': 1.0e-13 * UNITS['g / cm^3'],\n 'drag_method': 'Epstein/Stokes',\n 'grain_density': 1.0 * UNITS['g / cm^3'],\n 'maximum_time': 3.0e7 * UNITS['s'],\n 'number_of_dumps': 3,\n}\n\n# Parameter in Phantom patch: \"comparing\" dt_force and dt_drag\nDTFORCE_TO_DTDRAG = 1.0\n\n# Grain sizes\ngrain_size = [0.01, 0.03]\nn_dust = len(grain_size)\n_parameters['grain_size'] = grain_size * UNITS['cm']\n_parameters['velocity_delta'] = [1.0 for _ in range(n_dust)] * UNITS['cm / s']\n\n# Generate one simulation per element of the Cartesian product of lists below\nlargest = 0.20\nnum = 5\ndtdrag_fac = [largest / 2 ** n for n in range(num)]\ndust_to_gas = [0.01, 0.5]\nhfacts = [1.0, 2.5]\n\n# Iterate over dtdrag_fac and dust_to_gas to generate simulations\nPARAMETERS = dict()\nfor hfact in hfacts:\n for eps in dust_to_gas:\n for dtdrag in dtdrag_fac:\n C_force = dtdrag / DTFORCE_TO_DTDRAG\n label = f'hfact_{hfact:.1f}-eps_{eps:.2f}-C_force_{C_force:.4f}'\n PARAMETERS[label] = copy.copy(_parameters)\n PARAMETERS[label]['C_force'] = C_force\n PARAMETERS[label]['dust_to_gas_ratio'] = [\n eps / n_dust for _ in range(n_dust)\n ]\n\n# ------------------------------------------------------------------------------------ #\n# DO NOT CHANGE BELOW\n\nrun_script(\n simulation_to_setup=SIMULATION,\n parameters_dict=PARAMETERS,\n run_directory=RUN_DIRECTORY,\n phantom_patch_file=PATCH_FILE,\n)\n","sub_path":"code/scripts/dustybox_setup_and_run_accuracy.py","file_name":"dustybox_setup_and_run_accuracy.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"131796447","text":"import sys, random, math, itertools, pickle\nimport time\nimport psycopg2 as pg\nimport monetdb.sql as mdb\nfrom numpy import *\n\n\ndef getDatabaseType(databaseType):\n\tif databaseType == \"none\":\n\t\tprint(\"no database\")\n\t\texit()\n\telif databaseType == \"monetdb\":\n\t\tconn = mdb.connect(username=\"monetdb\", password=\"monetdb\", database=\"test\")\n\telif databaseType == \"postgres\":\n\t\tconn = pg.connect(dbname=\"postgres\")\n\telse:\n\t\tprint(\"wrong database type\")\n\t\texit()\n\n\treturn conn\n\ndef checkLevel1(x):\n\twhile (((x % 2) == 0) and x > 1): #While x is even and > 1\n\t\tx >>= 1\n\treturn (x == 1)\n\ndef checkLevel2(x):\n\treturn bin(x).count('1') == 2\n\ndef findPercent(nodeCount, sizeDC):\n\treturn 100*(nodeCount/sizeDC)\n\ndef createTable(cur, name, numCol, b=0, l=0):\n\n\tif(b == 1):\n\t\tif(l == 1):\n\t\t\tcols = \"(col0 bigint PRIMARY KEY,\"\n\t\telse:\n\t\t\tcols = \"(col0 int PRIMARY KEY,\"\n\t\t\n\t\tfor x in range(1, numCol):\n\t\t\tcols += \"col\" + str(x) + \" double precision,\"\n\telse:\n\t\tcols = \"(\"\n\t\tfor x in range(numCol):\n\t\t\tcols += \"col\" + str(x) + \" int,\"\n\t\n\tcols = cols[:-1]\n\n\tcols += \")\"\n\n\tcur.execute(\"CREATE TABLE \" + name + \" \" + cols)\n\ndef idChunkCombine(idn, chunk, numChunks):\n\treturn ((idn << math.ceil(math.log(numChunks, 2))) | chunk)\n\ndef createDCTableSetup(conn, table, levels, numChunks, numCols, numRows):\n\tcur = conn.cursor()\n\n\tif(numCols + math.ceil(math.log(numChunks, 2)) >= 32):\n\t\tcreateTable(cur, 'dc_' + table, 6, 1, 1)\n\telse:\n\t\tcreateTable(cur, 'dc_' + table, 6, 1)\n\n\tconn.commit()\n\ndef createDCTableLevel1(conn, table, levels, numChunks, numCols, numRows):\n\tcur = conn.cursor()\n\n\tcur.execute(\"SELECT * FROM \" + table)\n\tcolList = [x[0] for x in cur.description]\n\n\tsizeDC = numChunks * (2**numCols - 1)\n\tnodeCount = 0\n\tprevPercent = 0\n\tsizeChunk = math.ceil(numRows/numChunks)\n\n\tID = 1\n\tfor c in range(numChunks):\n\t\tfor i in range(numCols):\n\n\t\t\tcur.execute(\"SELECT AVG(\" + colList[i] + \"), STDDEV_SAMP(\" + colList[i] + \"), VAR_SAMP(\" + colList[i] + \") FROM (SELECT \" + colList[i] + \", ROW_NUMBER() OVER() as rnum FROM \" \n\t\t\t\t+ table + \") as foo WHERE rnum > \" + str(c*sizeChunk) + \" AND rnum < \" + str(sizeChunk + c*sizeChunk))\n\t\t\t\n\t\t\tavg, std, var = cur.fetchone()\n\t\t\tprint((avg, std, var))\n\t\t\tmed = 0 #median\n\n\t\t\t#cur.execute(\"SELECT TOP 1 COUNT( ) val, freq FROM \" + table + \" GROUP BY \" + colList[j] + \" ORDER BY COUNT( ) DESC\")\n\t\t\t#mod = int(cur.fetchone()[0])\n\t\t\tmod = 0\n\n\t\t\tID = 1<= 5):\n\t\t\t\tprint(str(random.randint(23,28123)) + \"|\" + str(p) + \"&\", sep=\"\")\n\t\t\t\tprevPercent = p\n\t\t\t\tsys.stdout.flush()\n\n\tconn.commit()\n\treturn nodeCount\n\ndef createDCTableLevel2(conn, table, levels, numChunks, numCols, numRows, nodeCount):\n\tcur = conn.cursor()\n\n\tcur.execute(\"SELECT * FROM \" + table)\n\tcolList = [x[0] for x in cur.description]\n\n\tsizeDC = numChunks * (2**numCols - 1)\n\tprevPercent = findPercent(nodeCount, sizeDC)\n\tsizeChunk = math.ceil(numRows/numChunks)\n\n\tfor c in range(numChunks):\n\t\tfor i in range(numCols - 1):\n\t\t\tfor j in range(i+1, numCols):\n\n\t\t\t\tcur.execute(\"SELECT CORR(cl1, cl2) FROM (SELECT \" + colList[i] + \" as cl1,\" + colList[j] + \" as cl2, ROW_NUMBER() OVER() as rnum FROM \" \n\t\t\t\t\t+ table + \") as foo WHERE rnum > \" + str(c*sizeChunk) + \" AND rnum < \" + str(sizeChunk + c*sizeChunk))\n\n\t\t\t\tcur.execute(\"INSERT INTO dc_\" + table + \" (col0, col1) VALUES (%s, %s)\", \n\t\t\t\t\t[idChunkCombine(2**i + 2**j, c, numChunks),float(cur.fetchone()[0])])\n\n\t\t\t\tnodeCount+=1\n\n\t\t\t\tp = findPercent(nodeCount, sizeDC)\n\t\t\t\tif(p - prevPercent >= 5):\n\t\t\t\t\tprint(str(random.randint(23,28123)) + \"|\" + str(p) + \"&\", sep=\"\")\n\t\t\t\t\tprevPercent = p\n\t\t\t\t\tsys.stdout.flush()\n\n\tconn.commit()\n\treturn nodeCount\n\ndef createDCTableLeveln(conn, table, levels, numChunks, numCols, numRows, nodeCount):\n\tcur = conn.cursor()\n\n\tsizeDC = numChunks * (2**numCols - 1)\n\tprevPercent = findPercent(nodeCount, sizeDC)\n\n\tfor c in range(numChunks):\n\t\tfor i in range(1, 2**numCols):\n\t\t\tif(checkLevel1(i) == 1 or checkLevel2(i) == 1):\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tvals = []\n\t\t\tkids = []\n\t\t\tfor x in range(numCols):\n\t\t\t\tif((i >> x) & 1 == 1):\n\t\t\t\t\tfor y in range(x+1, numCols):\n\t\t\t\t\t\tif((i >> y) & 1 == 1):\n\t\t\t\t\t\t\tcur.execute(\"SELECT col1 FROM dc_\" + table + \" WHERE col0 = \" \n\t\t\t\t\t\t\t\t+ str(idChunkCombine(2**x + 2**y, c, numChunks)))\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tvals.append(cur.fetchone()[0])\t\n\t\t\t\t\tkids.append(x)\n\n\t\t\tcorrelation = sum(vals)\n\n\t\t\tcur.execute(\"INSERT INTO dc_\" + table + \" (col0, col1) VALUES (%s, %s)\", \n\t\t\t\t[idChunkCombine(i, c, numChunks), correlation])\n\n\t\t\tnodeCount+=1\n\n\t\t\tp = findPercent(nodeCount, sizeDC)\n\t\t\tif(p - prevPercent >= 5):\n\t\t\t\tprint(str(random.randint(23,28123)) + \"|\" + str(p) + \"&\", sep=\"\")\n\t\t\t\tprevPercent = p\n\t\t\t\tsys.stdout.flush()\n\n\tconn.commit()\n\ndef insertData(cur, table, dataSet, length):\n\n\tcur.execute(\"SELECT * FROM \" + table)\n\tcolList = [x[0] for x in cur.description]\n\n\tfor x in range(int(length)):\n\t\texe = \"INSERT INTO \" + table + \" (\"\n\n\t\tfor i in colList:\n\t\t\texe += i + \",\"\n\n\t\texe = exe[:-1]\n\t\texe += \") values (\"\n\n\t\tfor i in range(len(colList)):\n\t\t\texe += \"%s, \"\n\n\t\texe = exe[:-2]\n\t\texe += \")\"\n\n\t\tcur.execute(exe, [dataSet[x][i] for i in range(len(colList))])\n\ndef demo():\n\tdataSetType = sys.argv[1]\n\tfileName = dataSetType + \".pkl\"\n\tdataFile = open(fileName, \"rb\")\n\tdataSet = pickle.load(dataFile)\n\tdataFile.close()\n\n\tconn = getDatabaseType(sys.argv[2])\n\tcur = conn.cursor()\n\tnumRows, numCols = dataSet.shape\n\tnumChunks = int(sys.argv[3])\n\n\tname = \"demop\" + str(random.randint(0, 12412099999999))\n\n\tcreateTable(cur, name, numCols)\n\tinsertData(cur, name, dataSet, numRows)\n\tconn.commit()\n\n\tcreateDCTableSetup(conn, name, numCols, numChunks, numCols, numRows)\n\t#print(\"setup done\")\n\tnodeCount = createDCTableLevel1(conn, name, numCols, numChunks, numCols, numRows)\n\t#print(\"level 1 made\")\n\tnodeCount = createDCTableLevel2(conn, name, numCols, numChunks, numCols, numRows, nodeCount)\n\t#print(\"level 2 made\")\n\tcreateDCTableLeveln(conn, name, numCols, numChunks, numCols, numRows, nodeCount)\n\t#print(\"done\")\n\n\tconn.commit()\n\n\t#drop table here?\n\n\tprint(\"done\")\n\t#print(time.time() - startTime)\n\n\tcur.execute(\"DROP TABLE \" + name)\n\tcur.execute(\"DROP TABLE dc_\" + name)\n\tconn.commit()\n\n\ndef exp(databaseType):\n\tconn = getDatabaseType(databaseType)\n\n\tif(sys.argv[1] == \"setup\"):\n\t\tcreateDCTableSetup(conn, sys.argv[2], int(sys.argv[3]),int( sys.argv[4]), int(sys.argv[5]), int(sys.argv[6]))\n\telif(sys.argv[1] == \"level1\"):\n\t\tcreateDCTableLevel1(conn, sys.argv[2], int(sys.argv[3]),int( sys.argv[4]), int(sys.argv[5]), int(sys.argv[6]))\n\telif(sys.argv[1] == \"level2\"):\n\t\tcreateDCTableLevel2(conn, sys.argv[2], int(sys.argv[3]),int( sys.argv[4]), int(sys.argv[5]), int(sys.argv[6]))\n\telif(sys.argv[1] == \"leveln\"):\n\t\tcreateDCTableLeveln(conn, sys.argv[2], int(sys.argv[3]),int( sys.argv[4]), int(sys.argv[5]), int(sys.argv[6]))\n\n\n\n#if __name__==\"__main__\": startTime = time.time(); exp(\"none\")\nif __name__==\"__main__\": startTime = time.time(); demo()","sub_path":"base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"481554914","text":"import sys\nimport os\nimport arrow\n\ndef new_post(post_name):\n utc = arrow.utcnow()\n local = utc.to('local')\n date = local.format('YYYY-MM-DD')\n time_full = local.format()\n file_name = date+'-'+post_name+'.markdown'\n\n path = './_posts'\n\n full_file_name = os.path.join(path, file_name)\n file_post = open(full_file_name, 'w+')\n\n head = \"---\\nlayout:\\tpost\\ntitle:\\t{0}\\ndate:\\t{1}\\ncategories:\\n---\".format(\n post_name, time_full)\n\n print(head, file=file_post)\n file_post.close()\n \n\n\nif __name__==\"__main__\":\n assert(len(sys.argv)>1), \"No post name specified.\"\n new_post(sys.argv[1])","sub_path":"newpost.py","file_name":"newpost.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"466155926","text":"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=missing-class-docstring, missing-function-docstring\n\n\"\"\"Test credentials.\"\"\"\n\nfrom unittest import mock\n\nfrom qiskit.providers.honeywell import credentials\n\nfrom qiskit.test import QiskitTestCase\n\n\nclass TestCredentials(QiskitTestCase):\n\n @mock.patch.object(credentials, 'read_creds_from_qiskitrc',\n return_value=None)\n @mock.patch.object(credentials, 'read_creds_from_environ',\n return_value=None)\n def test_discover_credentials_no_creds(self, environ_mock, qiskitrc_mock):\n self.assertEqual(None,\n credentials.discover_credentials())\n qiskitrc_mock.assert_called_once_with(\n filename=credentials.DEFAULT_QISKITRC_FILE)\n environ_mock.assert_called_once_with()\n","sub_path":"test/test_honeywell_credentials.py","file_name":"test_honeywell_credentials.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"470334420","text":"################################################################################\n# #\n# ____ _ #\n# | _ \\ ___ __| |_ __ _ _ _ __ ___ #\n# | |_) / _ \\ / _` | '__| | | | '_ ` _ \\ #\n# | __/ (_) | (_| | | | |_| | | | | | | #\n# |_| \\___/ \\__,_|_| \\__,_|_| |_| |_| #\n# #\n# Copyright 2021 Podrum Studios #\n# #\n# Permission is hereby granted, free of charge, to any person #\n# obtaining a copy of this software and associated documentation #\n# files (the \"Software\"), to deal in the Software without restriction, #\n# including without limitation the rights to use, copy, modify, merge, #\n# publish, distribute, sublicense, and/or sell copies of the Software, #\n# and to permit persons to whom the Software is furnished to do so, #\n# subject to the following conditions: #\n# #\n# The above copyright notice and this permission notice shall be included #\n# in all copies or substantial portions of the Software. #\n# #\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS #\n# IN THE SOFTWARE. #\n# #\n################################################################################\n\nimport gzip\nfrom block.block_map import block_map\nfrom nbt_utils.tag_ids import tag_ids\nfrom nbt_utils.tag.byte_tag import byte_tag\nfrom nbt_utils.tag.byte_array_tag import byte_array_tag\nfrom nbt_utils.tag.compound_tag import compound_tag\nfrom nbt_utils.tag.int_tag import int_tag\nfrom nbt_utils.tag.int_array_tag import int_array_tag\nfrom nbt_utils.tag.list_tag import list_tag\nfrom nbt_utils.tag.long_tag import long_tag\nfrom nbt_utils.tag.string_tag import string_tag\nfrom nbt_utils.utils.nbt_be_binary_stream import nbt_be_binary_stream\nimport os\nimport random\nimport sys\nimport time\nfrom world.chunk.block_storage import block_storage\nfrom world.chunk.chunk import chunk\nfrom world.chunk.sub_chunk import sub_chunk\nfrom world.chunk_utils import chunk_utils\nfrom world.provider.anvil.region import region\n\nclass anvil:\n provider_name: str = \"anvil\"\n region_file_extension: str = \"mca\"\n \n def __init__(self, world_dir: str) -> None:\n self.world_dir: str = os.path.abspath(world_dir)\n if not os.path.isdir(self.world_dir):\n os.mkdir(self.world_dir)\n if not os.path.isfile(os.path.join(self.world_dir, \"level.dat\")):\n self.create_options_file()\n region_dir: str = os.path.join(self.world_dir, \"region\")\n if not os.path.isdir(region_dir):\n os.mkdir(region_dir)\n \n @staticmethod\n def get_index(x: int, y: int, z: int) -> int:\n return (x << 8) | (z << 4) | y\n \n @staticmethod\n def deserialize_sub_chunk(blocks: list, metas: list, reorder: object = True) -> object:\n if reorder:\n blocks: list = chunk_utils.reorder_byte_array(blocks)\n metas: list = chunk_utils.reorder_nibble_array(metas)\n i_sub_chunk: object = sub_chunk()\n for i in range(0, 4096):\n try:\n runtime_id: int = block_map.get_runtime_id(blocks[i] & 0xff, chunk_utils.get_nibble_4(metas, i) & 0xff)\n except KeyError:\n runtime_id: int = block_map.get_runtime_id(blocks[i] & 0xff, 0)\n storage: object = i_sub_chunk.get_block_storage(0)\n if runtime_id not in storage.palette:\n storage.palette.append(runtime_id)\n storage.blocks[i]: int = storage.palette.index(runtime_id)\n return i_sub_chunk\n \n @staticmethod\n def deserialize_chunk(data: bytes) -> object:\n stream = nbt_be_binary_stream(data)\n root_tag: object = stream.read_root_tag()\n if not isinstance(root_tag, compound_tag):\n raise Exception(\"Invalid NBT data!\")\n if not root_tag.has_tag(\"Level\"):\n raise Exception(\"Level tag isnt present!\")\n level_tag: object = root_tag.get_tag(\"Level\")\n sub_chunks: dict = {}\n sections_tag: object = level_tag.get_tag(\"Sections\")\n for section_tag in sections_tag.value:\n sub_chunks[section_tag.get_tag(\"Y\").value]: object = anvil.deserialize_sub_chunk(\n section_tag.get_tag(\"Blocks\").value,\n section_tag.get_tag(\"Data\").value\n )\n if level_tag.has_tag(\"BiomeColors\"):\n biomes: list = chunk_utils.convert_biome_colors(level_tag.get_tag(\"BiomeColors\").value)\n elif level_tag.has_tag(\"Biomes\"):\n biomes: list = level_tag.get_tag(\"Biomes\").value\n else:\n biomes: list = []\n i_chunk: object = chunk(\n level_tag.get_tag(\"xPos\").value,\n level_tag.get_tag(\"zPos\").value,\n sub_chunks,\n biomes\n )\n i_chunk.has_changed: bool = level_tag.get_tag(\"TerrainPopulated\").value > 0\n return i_chunk\n \n @staticmethod\n def cr_index(x: int, z: int) -> tuple:\n return x >> 5, z >> 5\n \n @staticmethod\n def rc_index(x: int, z: int) -> tuple:\n return x - ((x >> 5) << 5), z - ((z >> 5) << 5)\n \n def get_chunk(self, x: int, z: int) -> object:\n region_index: tuple = anvil.cr_index(x, z)\n chunk_index: tuple = anvil.rc_index(x, z)\n region_path: str = os.path.join(os.path.join(self.world_dir, \"region\"), f\"r.{region_index[0]}.{region_index[1]}.{self.region_file_extension}\")\n reg: object = region(region_path)\n chunk_data: bytes = reg.get_chunk_data(chunk_index[0], chunk_index[1])\n return anvil.deserialize_chunk(chunk_data)\n \n @staticmethod\n def sub_chunk_to_section(sub_chunk: object) -> object:\n return compound_tag(\"\", [\n byte_array_tag(\"Blocks\", chunk_utils.reorder_byte_array(sub_chunk.ids)),\n byte_array_tag(\"Data\", chunk_utils.reorder_byte_array(sub_chunk.data)),\n byte_array_tag(\"SkyLight\", chunk_utils.reorder_byte_array(sub_chunk.sky_light)),\n byte_array_tag(\"BlockLight\", chunk_utils.reorder_byte_array(sub_chunk.block_light))\n ])\n \n def set_chunk(self, x: int, z: int, chunk_in: object) -> None:\n region_index: tuple = anvil.cr_index(x, z)\n chunk_index: tuple = anvil.rc_index(x, z)\n region_path: str = os.path.join(os.path.join(self.world_dir, \"region\"), f\"r.{region_index[0]}.{region_index[1]}.{self.region_file_extension}\")\n reg: object = region(region_path)\n chunk_data: bytes = reg.get_chunk_data(chunk_index[0], chunk_index[1])\n stream: object = nbt_be_binary_stream(chunk_data)\n sections_tag = list_tag(\"Sections\", [], tag_ids.compound_tag)\n for y, sub_chunk in chunk_in.sub_chunks.items():\n section_tag = anvil.sub_chunk_to_section(sub_chunk)\n section_tag.set_tag(byte_tag(\"Y\", y))\n sections_tag.value.append(section_tag)\n tag: object = compound_tag(\"\", [\n compound_tag(\"Level\", [\n byte_array_tag(\"Biomes\", chunk_in.biomes),\n list_tag(\"TileEntities\", chunk_in.tiles, tag_ids.compound_tag),\n int_tag(\"xPos\", chunk_in.x),\n int_tag(\"zPos\", chunk_in.z),\n int_array_tag(\"HeightMap\", chunk_in.height_map),\n byte_tag(\"V\", 1),\n long_tag(\"LastUpdate\", 0),\n long_tag(\"InhabitedTime\", 0),\n byte_tag(\"LightPopulated\", chunk_in.is_light_populated),\n byte_tag(\"TerrainPopulated\", chunk_in.is_terrain_populated),\n list_tag(\"Entities\", chunk_in.entities, tag_ids.compound_tag),\n sections_tag\n ]),\n int_tag(\"DataVersion\", 1343)\n ])\n stream.write_root_tag(tag)\n reg.put_chunk_data(chunk_index[0], chunk_index[1], stream.data)\n \n def get_option(self, name: str) -> object:\n stream: object = nbt_be_binary_stream(gzip.decompress(open(os.path.join(self.world_dir, \"level.dat\"), \"rb\").read()))\n tag: object = stream.read_root_tag()\n return tag.get_tag(\"Data\").get_tag(name).value\n \n def set_option(self, name: str, value: object) -> None:\n stream: object = nbt_be_binary_stream(gzip.decompress(open(os.path.join(self.world_dir, \"level.dat\"), \"rb\").read()))\n tag: object = stream.read_root_tag()\n data_tag: bytes = tag.get_tag(\"Data\")\n if data_tag.has_tag(name):\n option_tag: object = data_tag.get_tag(name)\n option_tag.value = value\n data_tag.set_tag(option_tag)\n tag.set_tag(data_tag)\n stream.buffer: bytes = b\"\"\n stream.pos: int = 0\n stream.write_root_tag(tag)\n file: object = open(os.path.join(self.world_dir, \"level.dat\"), \"wb\")\n file.write(gzip.compress(stream.data))\n \n def create_options_file(self) -> None:\n stream: object = nbt_be_binary_stream()\n tag: object = compound_tag(\"\", [\n compound_tag(\"Data\", [\n byte_tag(\"hardcore\", 0),\n byte_tag(\"MapFeatures\", 0),\n byte_tag(\"raining\", 0),\n byte_tag(\"Difficulty\", 0),\n byte_tag(\"initialized\", 1),\n byte_tag(\"thundering\", 0),\n int_tag(\"GameType\", 0),\n int_tag(\"generatorVersion\", 1),\n int_tag(\"rainTime\", 0),\n int_tag(\"SpawnX\", 256),\n int_tag(\"SpawnY\", 70),\n int_tag(\"SpawnZ\", 256),\n int_tag(\"thunderTime\", 0),\n int_tag(\"version\", 19133),\n long_tag(\"LastPlayed\", int(time.time() * 1000)),\n long_tag(\"RandomSeed\", random.randint(0, sys.maxsize)),\n long_tag(\"SizeOnDisk\", 0),\n long_tag(\"Time\", 0),\n compound_tag(\"GameRules\", []),\n string_tag(\"generatorName\", \"flat\"),\n string_tag(\"LevelName\", \"world\")\n ])\n ])\n stream.write_root_tag(tag)\n file: object = open(os.path.join(self.world_dir, \"level.dat\"), \"wb\")\n file.write(gzip.compress(stream.data))\n","sub_path":"podrum/world/provider/anvil/anvil.py","file_name":"anvil.py","file_ext":"py","file_size_in_byte":11448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"627990649","text":"import datetime\nimport multiprocessing\nimport os\n\nfrom .commit_analyzer import IsBugCommitAnalyzer\nfrom git import Repo\n# from jira_extractor import JiraExtractor\n# from sourceforge_extractor import SourceforgeExtractor\nfrom termcolor import colored\nfrom .utils import get_from_cache\n\nCACHE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'cache')\n\n\nclass Extractor(object):\n\tEARLIEST_BUG = 0\n\n\tdef __init__(self, repo_dir, inspected_branch):\n\t\tself.repo = Repo(repo_dir)\n\t\tself.inspected_branch = inspected_branch\n\t\tself.manager = multiprocessing.Manager()\n\t\tself.cache_dir = os.path.join(CACHE_DIR, os.path.basename(self.repo.working_dir))\n\t\tif not os.path.isdir(self.cache_dir):\n\t\t\tos.makedirs(self.cache_dir)\n\n\tdef extract_possible_bugs_wrapper(self, use_cache, check_trace=False):\n\t\tif use_cache:\n\t\t\treturn filter(lambda x: self.bugs_filter(x),\n\t\t\t get_from_cache(os.path.join(self.cache_dir, 'possible_bugs.pkl'),\n\t\t\t lambda: self.extract_possible_bugs()))\n\t\telse:\n\t\t\treturn self.extract_possible_bugs(check_trace=check_trace)\n\n\t# Returns tupls of (issue,commit,tests) that may contain bugs\n\tdef extract_possible_bugs(self, **kwargs):\n\t\tans = []\n\t\tfor hey in self.get_all_commits():\n\n\t\t\tprint(colored('### START HANDLING ###', 'red') + hey.hexsha + ' ' + str(datetime.datetime.now().time()))\n\t\t\tif self.is_bug_fix_commit(hey):\n\t\t\t\tans.append(hey)\n\t\t\t\tprint(colored('### APPENDED !###', 'blue'))\n\t\t\tprint(colored('### END HANDLING ###', 'green') + hey.hexsha + ' ' + str(datetime.datetime.now().time()))\n\n\t\tx = 1\n\n\t# Returns the commits relevant to bug_issue\n\tdef get_issue_commits(self, issue):\n\t\tans = []\n\t\tfor commit in all_commits:\n\t\t\tif self.is_associated_to_commit(issue, commit):\n\t\t\t\tans.append(commit)\n\t\treturn ans\n\n\t# Returns true if the commit message contains the issue key exclusively\n\tdef is_associated_to_commit(self, issue, commit):\n\t\tif issue.key in commit.message:\n\t\t\tindex_of_char_after_issue_key = commit.message.find(issue.key) + len(issue.key)\n\t\t\tif index_of_char_after_issue_key == len(commit.message):\n\t\t\t\treturn True\n\t\t\tchar_after_issue_key = commit.message[commit.message.find(issue.key) + len(issue.key)]\n\t\t\treturn not char_after_issue_key.isdigit()\n\t\telse:\n\t\t\treturn False\n\n\tdef get_all_commits(self):\n\t\treturn list(self.repo.iter_commits(self.inspected_branch))\n\n\tdef get_java_commits(self):\n\t\tdata = self.repo.git.log('--pretty=format:\"sha: %H\"', '--name-only').split(\"sha: \")\n\t\tcomms = dict(map(lambda d: (d[0], list(filter(lambda x: x.endswith(\".java\"), d[1:-1]))), list(map(lambda d: d.replace('\"', '').replace('\\n\\n', '\\n').split('\\n'), data))))\n\t\treturn dict(map(lambda x: (self.repo.commit(x), comms[x]), list(filter(lambda x: comms[x], comms))))\n\n\t# Returns boolean. Filter the bugs to inspect\n\tdef bugs_filter(self, possible_bug):\n\t\tif Extractor.EARLIEST_BUG > 0:\n\t\t\tkey = possible_bug[0]\n\t\t\tnumber = int(key.split('-')[1])\n\t\t\treturn number >= EARLIEST_BUG\n\t\treturn True\n\n\tdef has_parent(self, commit):\n\t\treturn self.get_parent(commit) is not None\n\n\tdef get_tests_paths_from_commit(self, commit):\n\t\tif not self.has_parent(commit): return []\n\t\treturn IsBugCommitAnalyzer(commit=commit, parent=self.get_parent(commit),\n\t\t repo=self.repo).analyze().get_test_paths()\n\n\tdef get_changed_components(self, commit):\n\t\tif not self.has_parent(commit): return []\n\t\treturn IsBugCommitAnalyzer(commit=commit, parent=self.get_parent(commit),\n\t\t repo=self.repo).analyze().get_diffed_components()\n\n\tdef get_parent(self, commit):\n\t\tans = None\n\t\tfor curr_parent in commit.parents:\n\t\t\tfor branch in curr_parent.repo.refs:\n\t\t\t\tif branch.name == self.inspected_branch:\n\t\t\t\t\tans = curr_parent\n\t\t\t\t\tbreak\n\t\treturn ans\n","sub_path":"PossibleBugMiner/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"529190","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nimport django.contrib.auth.models\nimport django.utils.timezone\nfrom django.conf import settings\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0006_require_contenttypes_0002'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Client',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('password', models.CharField(max_length=128, verbose_name='password')),\n ('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),\n ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),\n ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username')),\n ('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),\n ('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),\n ('email', models.EmailField(max_length=254, verbose_name='email address', blank=True)),\n ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),\n ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),\n ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),\n ('website', models.CharField(max_length=250)),\n ('company', models.CharField(max_length=200)),\n ('activation_key', models.CharField(max_length=40, blank=True)),\n ('key_expires', models.DateTimeField(default=datetime.date(2015, 9, 15))),\n ('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')),\n ('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),\n ],\n options={\n 'db_table': 'auth_user',\n },\n managers=[\n ('objects', django.contrib.auth.models.UserManager()),\n ],\n ),\n migrations.CreateModel(\n name='AccountContact',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('first_name', models.CharField(max_length=250)),\n ('last_name', models.CharField(max_length=250)),\n ('email', models.EmailField(max_length=254)),\n ('phone', models.CharField(max_length=250)),\n ('company', models.CharField(max_length=250)),\n ('account_holder', models.ForeignKey(related_name='account_contact', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='SecretKey',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('label', models.CharField(max_length=250)),\n ('auth_id', models.CharField(max_length=250)),\n ('auth_token', models.CharField(max_length=250)),\n ('owner', models.ForeignKey(related_name='secret_key', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Subscription',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('plan', models.IntegerField(blank=True, null=True, choices=[(1, b'500/Monat (20 Euro)'), (2, b'1000/Monat (30 Euro)'), (3, b'5000/Monat (50 Euro)'), (4, b'10000/Monat (80 Euro)'), (5, b'25000/Monat (150 Euro)'), (6, b'50000/Monat (280 Euro)'), (7, b'100000/Monat (450 Euro)'), (8, b'unlimited (900)')])),\n ('status', models.IntegerField(blank=True, null=True, choices=[(1, b'abgelaufen'), (2, b'aktiv')])),\n ('usage', models.IntegerField()),\n ('start', models.DateField()),\n ('end', models.DateField()),\n ('owner', models.ForeignKey(related_name='subscription', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='WebsiteKey',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('auth_id', models.CharField(max_length=250)),\n ('host', models.CharField(max_length=250)),\n ('owner', models.ForeignKey(related_name='website_key', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","sub_path":"app/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":5760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"226942083","text":"import torch\n\n\ndef warp(self, x, flo):\n \"\"\"\n warp an image/tensor (im2) back to im1, according to the optical flow\n x: [B, C, H, W] (im2)\n flo: [B, 2, H, W] flow\n \"\"\"\n B, C, H, W = x.size()\n # mesh grid \n xx = torch.arange(0, W).view(1,-1).repeat(H,1)\n yy = torch.arange(0, H).view(-1,1).repeat(1,W)\n xx = xx.view(1,1,H,W).repeat(B,1,1,1)\n yy = yy.view(1,1,H,W).repeat(B,1,1,1)\n # 将横向和纵向的mesh拼起来,变成上下两层,也就是grid\n grid = torch.cat((xx,yy),1).float()\n\n if x.is_cuda:\n grid = grid.cuda()\n # vgrid是普通mesh加上flow\n vgrid = Variable(grid) + flo\n\n # scale grid to [-1,1] \n vgrid[:,0,:,:] = 2.0*vgrid[:,0,:,:]/max(W-1,1)-1.0\n vgrid[:,1,:,:] = 2.0*vgrid[:,1,:,:]/max(H-1,1)-1.0\n\n # 从[B, 2, H, W] 变成 [B, H, W, 2]\n vgrid = vgrid.permute(0, 2, 3, 1) \n output = nn.functional.grid_sample(x, vgrid)\n mask = torch.autograd.Variable(torch.ones(x.size())).cuda()\n mask = nn.functional.grid_sample(mask, vgrid)\n \n mask[mask<0.999] = 0\n mask[mask>0] = 1\n \n return output*mask\n\nwidth = 8\n\nheight = 6\nbatch = 3\nxx = torch.arange(0, width).view(1,-1).repeat(height,1)\nyy = torch.arange(0, height).view(-1,1).repeat(1,width)\n# print(yy)\nxx = xx.view(1,1,height,width).repeat(batch,1,1,1)\n# print(xx)\nyy = yy.view(1,1,height,width).repeat(batch,1,1,1)\ngrid = torch.cat((xx,yy),1).float()\nprint(xx.size())\n","sub_path":"pwc_warp.py","file_name":"pwc_warp.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"113166358","text":"# coding: utf-8\nfrom django.shortcuts import render_to_response,render,redirect,HttpResponseRedirect\nfrom django.http import HttpResponse,Http404\nfrom myApp.forms import *\nfrom myApp.models import *\nfrom django.template import RequestContext\nfrom django.contrib.auth import login,logout,authenticate\nfrom django.conf import settings\nfrom django.contrib.auth.hashers import make_password\nfrom django.core.paginator import Paginator,InvalidPage,EmptyPage,PageNotAnInteger\nfrom django.db.models import F\nfrom myApp.kuaidi import *\nimport time\nfrom django.contrib.admin.views.decorators import staff_member_required #装饰器\n\ndef global_settings(request):\n #站点信息\n MEDIA_URL = settings.MEDIA_URL\n category_list = Category.objects.all()\n #品牌信息\n brand_list = Brand.objects.all()\n #热销榜信息\n hot_list = Furniture.objects.all().order_by('-sales')[:4]\n #标签\n tag_list = Tag.objects.all()\n #购物车\n cart = request.session.get(request.user.id,None)\n ad_list = Ad.objects.all()\n fur_list = Furniture.objects.all()\n fur_list = getPage(request, fur_list)\n return locals()\n\n\n#给家具分页\ndef getPage(request,fur_list):\n paginator = Paginator(fur_list,8) #一页大小\n try:\n page = int(request.GET.get('page',1))\n fur_list = paginator.page(page)\n except (EmptyPage,InvalidPage,PageNotAnInteger):\n fur_list = paginator.page(1)\n return fur_list\n\n#给评价分页\ndef commPage(request,comments):\n paginator = Paginator(comments,4)\n try:\n page = int(request.GET.get('page',1))\n comments = paginator.page(page)\n except (EmptyPage,InvalidPage,PageNotAnInteger):\n comments = paginator.page(1)\n return comments\n\n#主页\ndef index(request):\n return render(request,\"index.html\",locals())\n\n\n\n#产品分类\ndef products(request):\n try:\n cid = request.GET.get('cid',None) #获取分类的id号\n try:\n category = Category.objects.get(pk=cid)#查找对应id的分类\n except Category.DoesNotExist:#分类不存在\n return render(request,'error.html',{\"reason\":\"分类不存在\"})\n fur_list = Furniture.objects.filter(category=category) #查找对应分类的家具\n fur_list = getPage(request,fur_list) #获取一页的家具列表\n except Exception as e:\n pass\n return render(request,'products.html',locals())\n\n#登录\ndef do_login(request):\n try:\n if request.method == 'POST': #方式是提交\n login_form = LoginForm(request.POST) #forms实例化,request.POST是表单中的数据,刚开始为空\n if login_form.is_valid(): #校验表单\n username = login_form.cleaned_data[\"username\"]\n password = login_form.cleaned_data[\"password\"]\n user = authenticate(username=username,password=password) #认证给出的用户名和密码,合法返回一个User对象,密码不合法返回None\n if user is not None:\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n #通过附加 user.backend 属性来记录验证是被哪个配置的 backend 通过的。通常是 django.contrib.auth.backends.ModelBackend.\n login(request,user)\n return render(request,'index.html')\n else:\n return render(request,'error.html',{'reason':'登录验证失败'})\n #return redirect(request.POST.get('source_url')) #request。POST.get()获取login.html表单中的值:source_url\n else:\n return render(request,'error.html',{'reason':login_form.errors})\n else: #GET方法,如第一次进入登陆页面\n login_form = LoginForm()\n except Exception as e:\n #logger.error(e)\n pass\n return render(request,'login.html',locals())\n\n#注册\ndef do_reg(request):\n try:\n if request.method == 'POST': #POST方法\n reg_form = RegForm(request.POST) #提取post的数据\n if reg_form.is_valid(): #校验表单\n user = User.objects.create(username=reg_form.cleaned_data[\"username\"], #创建一个对象,并保存在user中\n email=reg_form.cleaned_data[\"email\"],\n password = make_password(reg_form.cleaned_data[\"password\"]),\n address=reg_form.cleaned_data[\"address\"],\n phone=reg_form.cleaned_data[\"phone\"],)\n #注册后直接登录\n user = authenticate(username=reg_form.cleaned_data[\"username\"],password=reg_form.cleaned_data[\"password\"])\n if user is not None:\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request,user)\n #return redirect(request.POST.get('source_url'))\n return render(request,'index.html',locals())\n else:\n return render(request,'error.html','登陆失败')\n else:\n return render(request,'error.html',{'reason':reg_form.errors})\n else:\n reg_form=RegForm()\n except Exception as e:\n #logger.error\n pass\n return render(request,'register.html',locals())\n\n#退出\ndef do_logout(request):\n try:\n logout(request)\n except Exception as e:\n pass\n return render(request, 'index.html', locals())\n\n\n#查看购物车\ndef view_cart(request):\n if request.user.is_authenticated():\n cart = request.session.get(request.user.id,None) #获取session中存放的cart\n return render(request,'checkout.html',locals())\n else:\n login_form = LoginForm() #若用户还未登录,跳转到登录页面\n return render(request,'login.html',locals())\n\n#商品详情页\ndef detail(request):\n try:\n did =request.GET.get('did',None)\n try:\n fur = Furniture.objects.get(pk=did)\n comments = Comment.objects.filter(fur_id=fur.id) #首先在评论表中筛选出所有当前商品的评论的对象\n comments = commPage(request, comments) #一页的评论列表\n users = [] #评论过当前商品的所有用户\n for c in comments:\n user = User.objects.get(id=c.user_id) #选出给出评价的对应的用户\n users.append(user)\n except Furniture.DoesNotExist:\n return render(request,'error.html',{'reason':'商品不存在'})\n except Exception as e:\n #logger.error(e)\n pass\n return render(request,'single.html',locals())\n\n#清空购物车\ndef cleanCart(request):\n if request.user.is_authenticated():#判断是否登陆\n cart = Cart() #若已登录,初始化一个空的购物车\n request.session[request.user.id] = cart #将空的购物车放入session\n return render(request,'index.html',locals()) #转到结算页\n else:\n login_form=LoginForm() #没有登录,初始化一个form表单 让用户登录\n return render(request,'login.html',locals()) #转到登陆页面\n\n#添加购物车\ndef add_cart(request):\n if request.user.is_authenticated():\n try:\n chid = request.GET.get('chid',None) #获取传值,商品id\n try:\n furniture = Furniture.objects.get(pk=chid) #获取id对应的家具\n except Furniture.DoesNotExist:\n return render(request, 'error.html', {'reason':'��品不存在'})\n cart = request.session.get(request.user.id,None) #获取session中的购物车\n if not cart: #session中没有购物车\n cart = Cart() #实例化一个空的购物车\n cart.add(furniture) #调用cart对象的add方法,不重复的\n request.session[request.user.id] = cart #把购物车重新放入session\n else:\n cart.add(furniture) #session中已有购物车,直接加入家具\n request.session[request.user.id] = cart #保存购物车\n except Exception as e:\n #logger.error(e)\n pass\n return render(request, 'checkout.html', locals())\n else: #还未登陆,重定向到登录页面\n login_form = LoginForm()\n return render(request, 'login.html', locals())\n\n#品牌列表页\ndef brands(request):\n try:\n bid = request.GET.get('bid',None) #获取某一品牌的品牌id\n try:\n brand = Brand.objects.get(pk=bid) #查询该id的品牌\n except Brand.DoesNotExist:\n return render(request,'error.html',{'reason':'品牌不存在'})\n fur_list = Furniture.objects.filter(brand=brand) #查询该品牌对应的家具们的列表\n fur_list = getPage(request,fur_list) #将家具列表分页\n except Exception as e:\n pass\n return render(request,'products.html',locals())\n\n#标签列表页\ndef tags(request):\n try:\n tid = request.GET.get('tid',None) #获取pro_right页面的传参,tag.id\n try:\n tag = Tag.objects.get(pk=tid)\n except Tag.DoesNotExist:\n return render(request,'error.html',{'reason':'标签不存在'})\n fur_list = Furniture.objects.filter(tag=tag)\n fur_list = getPage(request,fur_list)\n except Exception as e:\n pass\n return render(request,'products.html',locals())\n\n\n# 快递查询\ndef getExpress(request):\n try:\n postid = request.POST.get('postid','请输入你要查询的快递单号')\n content, postid = get_content(postid)\n express_form = ExpressForm()\n return render(request,'express.html',locals())\n except:\n express_form = ExpressForm()\n return render(request,'express.html',locals())\n\n\n#生成当前订单\ndef final_order(request):\n #获取购物车上的信息加入数据库\n if request.user.is_authenticated():\n cart = request.session.get(request.user.id,None) #获取session中的购物车\n user = User.objects.get(username=request.user.username) #获取当前登录的用户\n order = Order.objects.create(user=user,price=cart.total_price,order_state='未发货',staff=cart.items[0].furniture.name) #创建订单\n order.save()\n for item in cart.items:\n #创建订单条目\n order_list = Order_list.objects.create(furniture=item.furniture.name,quantity=item.quantity,order=order,user_id=user.id)\n order_list.save()\n #修改销量\n furniture = Furniture.objects.get(name = item.furniture.name)\n furniture.sales+=item.quantity\n furniture.save()\n cart = Cart()#清空购物车\n request.session[request.user.id] = cart\n return HttpResponseRedirect(\"http://localhost:8000/show_order/\")\n #return render(request,'Personal.html',locals())\n else:\n return render(request,'login.html',locals())\n\n\ndef fur_statistic(request):\n temp_brand = get_sales_by_fur()\n fur = temp_brand[0] #上面函数返回的第一个参数,家具名\n fur_number = temp_brand[1] #上面函数返回的第二个参数,销量\n brand_name = temp_brand[2]\n return render(request,'chart.html',locals())\n\n\ndef show_order(request):\n user_name = request.user.username #获取用户名\n user = User.objects.get(username=request.user.username) # 获取当前登录的用户\n orders_list = Order.objects.filter(user=user) #过滤出该用户的所有订单\n reco = recommend_fur(request)\n return render(request,'Personal.html',locals())\n\ndef remind(request):\n oid = request.GET.get('oid', None) #获取订单编号\n order = Order.objects.get(id=oid)\n order.order_state = '已发货'\n order.save()\n return HttpResponseRedirect(\"http://localhost:8000/show_order/\")\n\ndef confirm(request):\n oid = request.GET.get('oid', None) # 获取订单编号\n order = Order.objects.get(id=oid)\n order.order_state = '已收货'\n order.save()\n return HttpResponseRedirect(\"http://localhost:8000/show_order/\")\n\n#显示某一订单下待评论的商品列表\ndef comment(request):\n oid = request.GET.get('oid',None)\n order_list = Order_list.objects.filter(order_id=oid) #从order_list表中获取到order_id=oid的所有订单条目\n order_list = order_list.filter(remark=0) #选出未评价的订单条目\n fur_list = [] #未评价的商品列表\n for order in order_list:\n fur_name = order.furniture #每个订单条目所对应的家具名\n furniture = Furniture.objects.get(name=fur_name) #根据家具名找到对应的家具\n fur_list.append(furniture)\n return render(request,'comment.html',locals())\n\n#显示即将评价的单个商品的评价页面\ndef show_single_comm(request):\n fur_id=request.GET.get('fid',None) #商品id\n fur= Furniture.objects.get(id=fur_id)#获取商品对象\n oid = request.GET.get('oid',None) #属于的订单号\n comm_form = CommentForm()\n return render(request,'desc_comm.html',locals())\n\n#执行评价\ndef do_comment(request):\n try:\n if request.method == 'POST':\n comm_form = CommentForm(request.POST) #根据用户填写的评论表单实例化一个表单对象\n if comm_form.is_valid():\n user_name = request.user.username # 获取用户名\n user = User.objects.get(username=request.user.username) # 获取当前登录的用户\n u_id = user.id\n f_id = request.POST.get('fur_id','None') #商品ID\n order_id = request.POST.get('order_id', 'None') #订单ID\n comment = Comment.objects.create(comm=comm_form.cleaned_data[\"comm\"],fur_id=f_id,user_id=u_id,order_id=order_id) #实例化评价条目对象\n comment.save()\n #修改order_List表\n order_list = Order_list.objects.filter(order_id=order_id)\n fur = Furniture.objects.get(id=f_id)\n furniture = fur.name\n order_list = order_list.filter(furniture=furniture) #order_list中是当前订单号中正在评价的商品所对应的订单条目\n for order in order_list:\n order.remark = 1 #修改当前评价的商品为已评价状态\n order.save()\n #return render(request,'comment.html',locals())\n return HttpResponseRedirect(\"http://localhost:8000/comment/?oid=\"+order_id)\n else:\n return render(request,'error.html',{'reason':comm_form.errors})\n else:\n comm_form = CommentForm()\n except Exception as e:\n pass\n return render(request,'comment.html',locals())\n\n#某订单下所有订单条目均评论完成\ndef finish_comm(request):\n oid = request.GET.get('oid',None) #获取当前订单号\n order_list = Order_list.objects.filter(order_id=oid) #获取该订单的所有订单条目\n order_list = order_list.filter(remark=0) #获取未评论的订单条目\n if not order_list: #order_list列表为空\n order = Order.objects.get(id=oid)\n order.order_state='评价完成' #修改订单状态\n order.save()\n return HttpResponseRedirect(\"http://localhost:8000/show_order/\")\n\n#查看评论\ndef check_remark(request):\n oid = request.GET.get('oid', None) #订单号\n order_list = Order_list.objects.filter(order_id=oid) # 从order_list表中获取到order_id=oid的所有订单条目\n fur_list = [] # 订单对应的商品列表\n comms = []\n for order in order_list:\n fur_name = order.furniture # 每个订单条目所对应的家具名\n furniture = Furniture.objects.get(name=fur_name) # 根据家具名找到对应的家具\n comm = Comment.objects.filter(fur_id=furniture.id)\n comm = comm.filter(order_id=oid)\n fur_list.append(furniture)\n comms.append(comm)\n return render(request, 'check_remark.html', locals())\n\ndef recommend_fur(request):\n try:\n db = pymysql.connect(host='localhost', user='root', password='123456', db='test', charset='utf8')\n cursor = db.cursor()\n user_name = request.user.username\n user = User.objects.get(username=request.user.username)\n cursor.execute(\"SELECT furniture FROM myapp_order_list WHERE user_id=%s ORDER BY list_date DESC\",(user.id)) #查询当前用户已购买的家具,按时间逆序\n results = cursor.fetchall()\n furs = [] #家具,对象\n fur_category = [] #购买所有的家具的分类名,string\n #fur_brand = [] #购买所有家具的品牌名,string\n fur_tag = [] #tag.id\n for row in results:\n fur_name = row[0]\n furniture = Furniture.objects.get(name=fur_name)\n furs.append(furniture)\n fur_category.append(furniture.category.name)\n #fur_brand.append(furniture.brand.name)\n cursor.execute(\"SELECT tag_id from myapp_furniture_tag WHERE furniture_id=%s\", (furniture.id))\n result = cursor.fetchall()\n tag_id = result[0][0]\n tag = Tag.objects.get(pk=tag_id)\n fur_tag.append(tag.id)\n fur = furs[:5] #前5个已购买的家具对象\n #fur_brand = fur_brand[:5]\n fur_tag = fur_tag[:3]\n fur_category = fur_category[:3]\n reco_fur = [] #根据分类推荐的家具列表,对象\n reco_fur_id = [] #与reco_fur一一对应的家具id\n recommend_furniture = []#最终推荐对象\n for c in fur_category:\n cat = Category.objects.get(name=c)\n cursor.execute(\"SELECT * FROM myapp_furniture WHERE category_id=%s\", (cat.id))\n result1 = cursor.fetchall()\n for row in result1:\n fur_id = row[0]\n fur = Furniture.objects.get(pk=fur_id)\n reco_fur_id.append(fur_id)\n reco_fur.append(fur) #根据分类选商品\n for i in fur_tag:\n cursor.execute(\"SELECT * FROM myapp_furniture_tag WHERE tag_id=%s\", (i))\n result2 = cursor.fetchall()\n for row in result2:\n f_id = row[1] #某一风格标签的家具id\n for f in reco_fur_id:\n if f == f_id:\n furniture_c_t = Furniture.objects.get(pk=f_id)\n recommend_furniture.append(furniture_c_t)\n\n for rf in recommend_furniture:\n for f in furs:\n if rf == f:\n recommend_furniture.remove(rf)\n #去重\n #recommend_furniture = list(set(recommend_furniture)\n recommend_furnitures = list(set(recommend_furniture))\n #取4个\n give_furs = []\n give_furs =recommend_furnitures[:4]\n except Exception as e:\n pass\n return give_furs","sub_path":"myTest2/myApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"284230922","text":"# http://adventofcode.com/2017/day/7\n\nfrom collections import defaultdict, namedtuple\nimport re\n\n\nProgram = namedtuple(\"Program\", [\"name\", \"weight\", \"subprogram_names\"])\n\nprograms = {}\n\ncompiled = re.compile(r\"(?P[a-z]+) \\((?P[0-9]+)\\)( -> (?P[a-z]+(, [a-z]+)*))?\")\n\n\nwith open(\"07_01.in\") as f:\n for line in f:\n extracted = compiled.match(line).groupdict()\n\n name = extracted[\"name\"]\n weight = int(extracted[\"weight\"])\n\n subprogram_names = (\n extracted[\"subprogram_names\"].split(\", \")\n if extracted[\"subprogram_names\"]\n else []\n )\n\n programs[name] = Program(name, weight, subprogram_names)\n \n\nfixed_weight = 0\n\n# your code goes in here\n\nprint(fixed_weight)\n","sub_path":"lvl007/07_02.py","file_name":"07_02.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"96782831","text":"# -*- coding: utf-8 -*-\nimport unittest\nimport locators as lc\nfrom time import sleep\nfrom testconfig import config\nfrom selenium import webdriver\n# Decrease logging level in trace_back\nimport logging\nfrom selenium.webdriver.remote.remote_connection import LOGGER\nLOGGER.setLevel(logging.WARNING)\nLOC = lc.Main()\n\nmain_url = 'https://docdoc.ru'\ndoc_url = main_url + '/doctor'\n\n\nclass MainTest(unittest.TestCase):\n driver = None\n\n @classmethod\n def setUpClass(cls):\n cls.osb = config.get('osb')\n cls.browser = config.get('browser')\n if cls.browser in ['firefox', 'Firefox', 'mozilla', 'Mozilla']:\n moz_options = webdriver.FirefoxOptions()\n moz_options.add_argument(\"--no-sandbox\")\n if cls.osb in ['macos', 'MacOS']:\n cls.driver = webdriver.Firefox(firefox_options=moz_options, executable_path='./geckodriver_macos')\n elif cls.osb in ['windows', 'Windows']:\n cls.driver = webdriver.Firefox(firefox_options=moz_options, executable_path='./geckodriver64.exe')\n else:\n cls.driver = webdriver.Firefox(firefox_options=moz_options, executable_path='./geckodriver_unix64')\n else:\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument(\"--no-sandbox\")\n if cls.osb in ['macos', 'MacOS']:\n cls.driver = webdriver.Chrome(chrome_options=chrome_options, executable_path='./chromedriver_macos')\n elif cls.osb in ['windows', 'Windows']:\n cls.driver = webdriver.Chrome(chrome_options=chrome_options, executable_path='./chromedriver32.exe')\n else:\n cls.driver = webdriver.Chrome(chrome_options=chrome_options, executable_path='./chromedriver_unix64')\n\n cls.driver.implicitly_wait(3)\n\n def test_main(cls):\n d = cls.driver\n d.get(doc_url)\n\n # Check default filter value\n dropdown_default, expected_text = LOC.dropdown_button_default(d)\n d.execute_script('return arguments[0].scrollIntoView();', dropdown_default)\n assert dropdown_default.text == expected_text, \\\n 'Некорректный текст заголовка в фильтре дропдауна по дефолту.\\n' \\\n 'Получено: {got}\\nОжидаемо: {exp}'.format(got=dropdown_default.text.encode('utf-8'), exp=expected_text)\n\n # Trying to open filter popup. Not stable. Kostily added\n dropdown_default.click()\n dropdown_opened = LOC.dropdown_opened(d)\n if dropdown_opened is False:\n dropdown_default.click()\n dropdown_opened = LOC.dropdown_opened(d)\n if dropdown_opened is False:\n dropdown_default.click()\n # Asserting popup filter was opened\n dropdown_opened = LOC.dropdown_opened(d)\n assert dropdown_opened is True, \\\n 'Дропдаун выбора фильтров не раскрылся'\n\n # Checking amount of default active filters\n dropdown_filters_active, expected_text = LOC.dropdown_value_default_active(d)\n assert len(dropdown_filters_active) == 1, \\\n 'Некорректное число активных фильтров в дропдауне по дефолту.\\n' \\\n 'Получено: {got}\\nОжидаемо: 1'.format(got=len(dropdown_filters_active))\n\n # Checking value of a default active filter\n assert dropdown_filters_active[0].text == expected_text, \\\n 'Некорректный текст фильтра в дропдауне.\\n' \\\n 'Получено: {got}\\nОжидаемо: {exp}'.format(got=dropdown_filters_active[0].text.encode('utf-8'), exp=expected_text)\n\n # Checking default active filter has a selecting mark\n default_mark = LOC.dropdown_value_marker_displayed(dropdown_filters_active[0])\n assert default_mark is True, \\\n 'Маркер выбранного фильтра не найден для дефолтного фильтра.\\n'\n dropdown_value_tomorrow, expected_text = LOC.dropdown_value_tomorrow(d)\n\n # Checking Tomorrow text value in a dropdown.\n try:\n tomorrow_text = dropdown_value_tomorrow.text.split(', ')[0]\n assert tomorrow_text == expected_text, \\\n 'Некорректный текст фильтра в дропдауне.\\n' \\\n 'Получено: {got}\\nОжидаемо: {exp}'.format(got=tomorrow_text.encode('utf-8'), exp=expected_text)\n except Exception as err:\n cls.fail('Ошибка при получении значения фильтра \"Завтра\", {}'.format(err))\n\n # Checking Tomorrow text applied to a filter header value\n dropdown_value_tomorrow.click()\n dropdown_button_tomorrow, expected_text = LOC.dropdown_button_tomorrow(d)\n assert dropdown_button_tomorrow.text == expected_text, \\\n 'Некорректный текст заголовка в фильтре после выбора \"Завтра\".\\n' \\\n 'Получено: {got}\\nОжидаемо: {exp}'.format(got=dropdown_button_tomorrow.text.encode('utf-8'), exp=expected_text)\n sleep(3)\n\n # Checking elements on page after a page reload\n # Focus on the page results element\n resss = d.find_element_by_class_name('the-doctor-list-items')\n d.execute_script('return arguments[0].scrollIntoView();', resss)\n\n # Checking a Tomorrow value has a selected mark. The same kostyls here.\n dropdown_button_tomorrow.click()\n dropdown_opened = LOC.dropdown_opened(d)\n if dropdown_opened is False:\n dropdown_button_tomorrow.click()\n dropdown_opened = LOC.dropdown_opened(d)\n if dropdown_opened is False:\n dropdown_button_tomorrow.click()\n dropdown_value_tomorrow, expected_text = LOC.dropdown_value_tomorrow(d)\n tomorrow_mark = LOC.dropdown_value_marker_displayed(dropdown_value_tomorrow)\n assert tomorrow_mark is True, \\\n 'Маркер выбранного фильтра не найден после смены фильтра.\\n'\n # Focus on the page results element\n resss = d.find_element_by_class_name('the-doctor-list-items')\n d.execute_script('return arguments[0].scrollIntoView();', resss)\n\n # Checking a grid with results has a correct default length\n results, expected_lenght = LOC.results(d)\n assert len(results) == expected_lenght, \\\n 'Некорректное число результатов выдачи после смены фильтра.\\n' \\\n 'Получено: {got}\\nОжидаемо: 10'.format(got=len(dropdown_filters_active))\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.quit()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":6953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"40538006","text":"from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient\nfrom msrest.authentication import CognitiveServicesCredentials\nimport json\n\ndef get_data(jason_file):\n with open(jason_file) as json_file:\n output_data = []\n input_data = json.load(json_file)\n count = 1\n for input_datum in input_data:\n output_datum = {}\n output_datum['id'] = count\n count += 1\n output_datum['text'] = input_datum['DisplayText']\n output_datum['time'] = input_datum['Offset']\n # time:\n output_data.append(output_datum)\n return output_data\n\ndef get_senti(documents):\n subscription_key = \"f56de4b340b6472f951a0b5b7cfc8f8c\"\n credentials = CognitiveServicesCredentials(subscription_key)\n\n text_analytics_url = \"https://westus2.api.cognitive.microsoft.com/\"\n text_analytics = TextAnalyticsClient(endpoint=text_analytics_url, credentials=credentials)\n\n response = text_analytics.sentiment(documents=documents)\n res = []\n for pos, document in enumerate(response.documents):\n dic = {}\n dic[\"Document Id\"] = int(document.id)\n dic[\"Sentence\"] = documents[pos]['text']\n dic[\"Sentiment Score\"] = float(\"{:.2f}\".format(document.score))\n dic[\"Time\"] = documents[pos]['time']\n res.append(dic)\n return res\n","sub_path":"sentiment_functions.py","file_name":"sentiment_functions.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"343400613","text":"# -*- coding: utf-8 -*-\n# vim:tabstop=4:expandtab:sw=4:softtabstop=4\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.contrib import admin\nfrom decorators import AutoManaged\nfrom django.core.urlresolvers import reverse\nimport utils\n\n(\n PAY_TO_CREATOR,\n PAY_TO_RECIPIENT,\n) = range(2)\n\nPAY_TO = (\n (PAY_TO_CREATOR,'Creator',),\n (PAY_TO_RECIPIENT,'Recipient',)\n)\n\nclass Vaqueiro(models.Manager):\n def _get_or_create_user(self,name,email):\n try:\n user = User.objects.get(email=email)\n except User.DoesNotExist:\n user = User.objects.create_user(email[:30],email)\n user.first_name = name\n user.save()\n return user\n\n def create_vaquinha(self,from_name,from_email,to_name,to_email,title,description,amount,pay_to,close_on,contributors=None):\n from_user = self._get_or_create_user(from_name,from_email)\n if to_email is not None:\n to_user = self._get_or_create_user(to_name,to_email)\n else:\n to_user = None\n\n v = self.create(created_by=from_user,title=title,description=description,ammount=amount,recipient=to_user,pay_to=pay_to,close_on=close_on)\n \n if contributors is not None:\n for (name,email,) in contributors:\n if email:\n Contributor.objects.create(vaquinha=v,user=self._get_or_create_user(name,email))\n return v\n\n\n@AutoManaged\nclass Vaquinha(models.Model):\n objects = Vaqueiro()\n created_by = models.ForeignKey(User,related_name='vaquinhas')\n title = models.CharField(max_length=200)\n description = models.TextField()\n ammount = models.FloatField(default=0.0)\n recipient = models.ForeignKey(User,related_name='gifts')\n pay_to = models.IntegerField(choices=PAY_TO)\n close_on = models.DateTimeField(null=True)\n \n def __unicode__(self):\n return u'Vaquinha for %0.2f Eur' % self.ammount\n\n@AutoManaged\nclass Contributor(models.Model):\n user = models.ForeignKey(User)\n ammount = models.FloatField(default=0.0)\n vaquinha = models.ForeignKey(Vaquinha,null=True)\n\n def __unicode__(self):\n return unicode(self.user)\n\n@receiver(post_save,sender=Vaquinha)\ndef on_vaquinha_created(sender,created=False,instance=None,**kwargs):\n if created:\n ctx = dict()\n ctx['name'] = instance.created_by.first_name\n ctx['vaquinha_url'] = reverse('v-detail',kwargs={'id':instance.id})\n utils.send_email_template('new_vaquinha','A sua vaquinha está aqui',instance.created_by.email,ctx)\n\n@receiver(post_save,sender=Contributor)\ndef on_contribution_saved(sender,created=False,instance=None,**kwargs):\n if created:\n ctx = dict()\n ctx['name'] = instance.user.first_name\n ctx['requester'] = instance.vaquinha.created_by.first_name\n ctx['requester_email'] = instance.vaquinha.created_by.email\n ctx['recipient'] = instance.vaquinha.recipient.first_name\n ctx['title'] = instance.vaquinha.title\n ctx['description'] = instance.vaquinha.description\n ctx['amount'] = '%0.2f Eur' % instance.vaquinha.ammount\n ctx['date'] = instance.vaquinha.close_on\n ctx['pay_link'] = reverse('v-pay',kwargs={'vaquinhaid':instance.vaquinha.id,'userid':instance.user.id})\n utils.send_email_template('vaquinha_request','Vaquinha: %s' % (instance.vaquinha.title),instance.user.email,ctx)\n elif instance is not None:\n if instance.ammount > 0:\n ctx = dict()\n ctx['name'] = instance.vaquinha.created_by.first_name\n ctx['payer_name'] = instance.user.first_name\n ctx['payer_email'] = instance.user.email\n ctx['amount'] = '%0.2f Eur' % instance.ammount\n ctx['title'] = instance.vaquinha.title\n ctx['vaquinha_url'] = reverse('v-detail',kwargs={'id':instance.id})\n utils.send_email_template('vaquinha_paid','Vaquinha: %s' % (instance.vaquinha.title),instance.vaquinha.created_by.email,ctx)\n\n","sub_path":"vaquinha/web/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"447266280","text":"import argparse\nimport textwrap\nfrom importlib import import_module\n\n\nclass CLIError(Exception):\n \"\"\"Error for CLI commands.\n\n A subcommand may raise this. The message will be forwarded to\n the error() method of the argument args.\"\"\"\n\n\n# Important: Following any change to command-line parameters, use\n# python3 -m ase.cli.completion to update autocompletion.\ncommands_ = [\n\n ('findpath', 'mgetool.cli.findpath'),\n ('makebatch', 'mgetool.cli.makebatch'),\n ('cpbatch', 'mgetool.cli.cpbatch'),\n # ('run', 'ase.cli.run'),\n\n]\n\n\ndef main(prog='mgetool', description='mgetool command line tool.', args=None):\n commands = commands_\n parser = argparse.ArgumentParser(prog=prog,\n description=description,\n formatter_class=Formatter)\n parser.add_argument('-T', '--traceback', action='store_true')\n subparsers = parser.add_subparsers(title='Sub-commands',\n dest='command')\n\n subparser = subparsers.add_parser('help',\n description='Help',\n help='Help for sub-command.')\n subparser.add_argument('helpcommand',\n nargs='?',\n metavar='sub-command',\n help='Provide help for sub-command.')\n\n functions = {}\n parsers = {}\n for command, module_name in commands:\n cmd = import_module(module_name).CLICommand\n docstring = cmd.__doc__\n if docstring is None:\n short = cmd.short_description\n long = getattr(cmd, 'description', short)\n else:\n parts = docstring.split('\\n', 1)\n if len(parts) == 1:\n short = docstring\n long = docstring\n else:\n short, body = parts\n long = short + '\\n' + textwrap.dedent(body)\n subparser = subparsers.add_parser(\n command,\n formatter_class=Formatter,\n help=short,\n description=long)\n cmd.add_arguments(subparser)\n functions[command] = cmd.run\n parsers[command] = subparser\n\n args = parser.parse_args(args)\n\n if args.command == 'help':\n if args.helpcommand is None:\n parser.print_help()\n else:\n parsers[args.helpcommand].print_help()\n elif args.command is None:\n parser.print_usage()\n else:\n f = functions[args.command]\n try:\n if f.__code__.co_argcount == 1:\n f(args)\n else:\n f(args, parsers[args.command])\n except KeyboardInterrupt:\n pass\n except CLIError as x:\n parser.error(x)\n except Exception as x:\n if args.traceback:\n raise\n else:\n l1 = '{}: {}\\n'.format(x.__class__.__name__, x)\n l2 = ('To get a full traceback, use: {} -T {} ...'\n .format(prog, args.command))\n parser.error(l1 + l2)\n\n\nclass Formatter(argparse.HelpFormatter):\n \"\"\"Improved help formatter.\"\"\"\n\n def _fill_text(self, text, width, indent):\n assert indent == ''\n out = ''\n blocks = text.split('\\n\\n')\n for block in blocks:\n if block[0] == '*':\n # List items:\n for item in block[2:].split('\\n* '):\n out += textwrap.fill(item,\n width=width - 2,\n initial_indent='* ',\n subsequent_indent=' ') + '\\n'\n elif block[0] == ' ':\n # Indented literal block:\n out += block + '\\n'\n else:\n # Block of text:\n out += textwrap.fill(block, width=width) + '\\n'\n out += '\\n'\n return out[:-1]\n","sub_path":"instance/important_scripts_old/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"517085623","text":"import datetime\nimport socket\nimport os\nimport pwd\nimport argparse\nimport json\nimport sys\n\np = argparse.ArgumentParser()\np.add_argument(\"--description\", \"-d\", required=True, help=\"Set the \\\"description\\\": part of the control file\")\np.add_argument(\"--version\", \"-v\", required=True, help=\"Set the \\\"version\\\": part of the control file\")\np.add_argument(\"--name\", \"-n\", required=True, help=\"Set the \\\"name\\\": part of the control file\")\np.add_argument(\"--summary\", \"-s\", required=True, help=\"Set the \\\"summary\\\": part of the control file\")\nargs = p.parse_args()\nuser = pwd.getpwuid(os.getuid())\nuname = os.uname()\ncontrol_dict = {\n \"name\": args.name,\n \"version\": args.version,\n \"platform\": \"all\",\n \"summary\": args.summary,\n \"maintainer\": user.pw_gecos,\n \"description\": args.description,\n \"x-build-date\": datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"x-build-platform\": uname.machine,\n \"x-build-host\": socket.getfqdn(),\n \"x-build-user\": user.pw_name,\n \"x-build-uname\": f\"('{uname.sysname}', '{uname.nodename}', '{uname.release}', '{uname.version}', '{uname.machine}')\"\n}\n\n\njson.dump(control_dict, sys.stdout, indent=4)\n","sub_path":"scripts/make_ssm_control_file.py","file_name":"make_ssm_control_file.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"133774681","text":"#!/usr/bin/env python\n\"\"\":mod:`serial_graph.gui` -- Graphical interface\n\"\"\"\n\nfrom __future__ import division\n\nimport sys\nfrom cStringIO import StringIO\n\nfrom PySide import QtCore, QtGui, QtSvg\n\nfrom serial_graph.graph import generate_serializability_graph, ParseError\n\nimport networkx\n\n\nclass AspectRatioSvgWidget(QtSvg.QSvgWidget):\n def paintEvent(self, paint_event):\n painter = QtGui.QPainter(self)\n default_width, default_height = self.renderer().defaultSize().toTuple()\n widget_width, widget_height = self.size().toTuple()\n ratio_x = widget_width / default_width\n ratio_y = widget_height / default_height\n if ratio_x < ratio_y:\n new_width = widget_width\n new_height = widget_width * default_height / default_width\n new_left = 0\n new_top = (widget_height - new_height) / 2\n else:\n new_width = widget_height * default_width / default_height\n new_height = widget_height\n new_left = (widget_width - new_width) / 2\n new_top = 0\n self.renderer().render(\n painter,\n QtCore.QRectF(new_left, new_top, new_width, new_height))\n\n\nclass MainWindow(QtGui.QMainWindow):\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n\n # Create menu.\n self.menu_bar = QtGui.QMenuBar()\n self.file_menu = self.menu_bar.addMenu('&File')\n # self.file_action = self.file_menu.addAction('&Open...')\n # self.file_action.setShortcut(QtGui.QKeySequence.Open)\n # self.file_action.triggered.connect(self._load_file)\n self.submit_action = self.file_menu.addAction('Submit')\n # self.submit_action.setShortcut(QtGui.QKeySequence(\n # QtCore.Qt.ControlModifier | QtCore.Qt.Key_Return))\n self.submit_action.triggered.connect(self._submit)\n self.quit_action = self.file_menu.addAction('&Quit')\n self.quit_action.setShortcut(QtGui.QKeySequence.Quit)\n self.quit_action.triggered.connect(self.close)\n self.setMenuBar(self.menu_bar)\n\n # Create central widget.\n self.central_widget = QtGui.QWidget()\n self.central_layout = QtGui.QHBoxLayout(self.central_widget)\n\n # Left side.\n self.form_layout = QtGui.QVBoxLayout()\n self.input_area = QtGui.QPlainTextEdit()\n font = self.input_area.font()\n font.setPointSize(50)\n self.input_area.setFont(font)\n self.form_layout.addWidget(self.input_area)\n self.submit_button = QtGui.QPushButton('Submit')\n self.submit_button.setShortcut(QtGui.QKeySequence(\n QtCore.Qt.ControlModifier | QtCore.Qt.Key_Return))\n self.submit_button.clicked.connect(self._submit)\n self.form_layout.addWidget(self.submit_button)\n self.central_layout.addLayout(self.form_layout, 1)\n\n # Right side.\n self.output_layout = QtGui.QVBoxLayout()\n self.output_area = AspectRatioSvgWidget()\n self.output_layout.addWidget(self.output_area, 1)\n self.conflict_serializable_label = QtGui.QLabel()\n self.output_layout.addWidget(\n self.conflict_serializable_label, 0, QtCore.Qt.AlignHCenter)\n self.central_layout.addLayout(self.output_layout, 1)\n\n self.setCentralWidget(self.central_widget)\n\n # Load up default data.\n self._load_default_data()\n # self._submit()\n\n def _load_default_data(self):\n self.input_area.setPlainText('''r1(X)\nr2(Z)\nr3(X)\nr1(Z)\nr2(Y)\nr3(Y)\nw1(X)\nw2(Z)\nw3(Y)\nw2(Y)''')\n\n def _submit(self):\n schedule_file = StringIO(self.input_area.toPlainText())\n try:\n graph = generate_serializability_graph(schedule_file)\n except ParseError as error:\n QtGui.QMessageBox(QtGui.QMessageBox.Warning, 'Parse error',\n str(error)).exec_()\n return\n finally:\n schedule_file.close()\n\n is_conflict_serializable = (\n networkx.algorithms.simple_cycles(\n networkx.from_pydot(graph)) == [])\n self.conflict_serializable_label.setText(\n 'This schedule is{0} conflict serializable.'.format(\n '' if is_conflict_serializable else ' not'))\n\n self.output_area.load(QtCore.QByteArray(\n graph.create(prog='dot', # default\n format='svg')))\n\n\ndef main(argv):\n app = QtGui.QApplication(argv)\n\n win = MainWindow()\n win.showMaximized()\n win.raise_()\n\n app.exec_()\n return 0\n\nif __name__ == '__main__':\n raise SystemExit(main(sys.argv))\n","sub_path":"serial_graph/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":4620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"128627440","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nfrom selenium import webdriver\nfrom time import sleep\n\ndef main():\n driver = webdriver.Chrome(\"/home/indou/bin/chromedriver\")\n driver.get(\"https://id.heroku.com/login\")\n\n elem = driver.find_element_by_id(\"email\")\n elem.send_keys(\"tatsuo-i@mtb.biglobe.ne.jp\")\n\n elem = driver.find_element_by_id(\"password\")\n elem.send_keys(\"intatsu1645\")\n\n elem = driver.find_element_by_name(\"commit\")\n elem.click()\n\n return 0\n\nif __name__ == '__main__':\n import sys\n import traceback\n try:\n rc = main()\n sys.exit(rc)\n except Exception as e:\n print(traceback.format_exc()) \n sys.exit(8)\n","sub_path":"py/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"63915283","text":"\"\"\"Final Project_Group16_201621494_오다빈 / 참조 : 2016 Kenichiro Tanaka\"\"\"\nimport sys\nfrom math import sqrt\nfrom random import randint\nimport pygame\nfrom pygame.locals import QUIT, KEYDOWN, K_LEFT, K_RIGHT, K_DOWN, K_SPACE\n\n#7개의 블록 종류별로 회전하는 경우까지 모두 표현\nBLOCK_DATA = (\n (\n (0,0,1, 1,1,1, 0,0,0),\n (0,1,0, 0,1,0, 0,1,1),\n (0,0,0, 1,1,1, 1,0,0),\n (1,1,0, 0,1,0, 0,1,0)\n ),\n (\n (2,0,0, 2,2,2, 0,0,0),\n (0,2,2, 0,2,0, 0,2,0),\n (0,0,0, 2,2,2, 0,0,2),\n (0,2,0, 0,2,0, 2,2,0)\n ),\n (\n (0,3,0, 3,3,3, 0,0,0),\n (0,3,0, 0,3,3, 0,3,0),\n (0,0,0, 3,3,3, 0,3,0),\n (0,3,0, 3,3,0, 0,3,0)\n ),\n (\n (4,4,0, 0,4,4, 0,0,0),\n (0,0,4, 0,4,4, 0,4,0),\n (0,0,0, 4,4,0, 0,4,4),\n (0,4,0, 4,4,0, 4,0,0)\n ),\n (\n (0,5,5, 5,5,0, 0,0,0),\n (0,5,0, 0,5,5, 0,0,5),\n (0,0,0, 0,5,5, 5,5,0),\n (5,0,0, 5,5,0, 0,5,0)\n ),\n (\n (6,6,6,6),\n (6,6,6,6),\n (6,6,6,6),\n (6,6,6,6)\n ),\n (\n (0,7,0,0, 0,7,0,0, 0,7,0,0, 0,7,0,0),\n (0,0,0,0, 7,7,7,7, 0,0,0,0, 0,0,0,0),\n (0,0,7,0, 0,0,7,0, 0,0,7,0, 0,0,7,0),\n (0,0,0,0, 0,0,0,0, 7,7,7,7, 0,0,0,0)\n )\n)\n\nclass Block:\n def __init__(self, count): #블록 개체\n self.turn = randint(0, 3) #생성되는 블럭의 회전 상태 결정, randint함수 : 지정한 숫자 내에서 난수 생성\n self.type = BLOCK_DATA[randint(0, 6)] #생성되는 블럭의 종류\n self.data = self.type[self.turn] #생성되는 블럭 종류에 회전 상태가 포함된 내용이 들어감(1차원)\n self.size = int(sqrt(len(self.data))) #self.data의 길이를 제곱근으로 계산(self.data의 길이가 9->3*3블록)\n self.xpos = randint(2, 8 - self.size) #x축 (2~ 8-size) ->약간 왼쪽에서 생성\n self.ypos = 1 - self.size #y축 (1-size)\n self.fire = count + INTERVAL #블럭이 낙하하기 시작하는 시간\n\n def update(self, count): #블록 상태 갱신 (소거한 단의 수를 반환)\n erased = 0 # x_offset, y_offset 은 BLOCK_DATA에서 위치를 계산하기 위해 사용\n if is_overlapped(self.xpos, self.ypos + 1, self.turn):\n for y_offset in range(BLOCK.size): #이중 포문으로 블록의 한칸한칸 모두 확인\n for x_offset in range(BLOCK.size):\n if 0 <= self.xpos+x_offset < WIDTH and 0 <= self.ypos+y_offset < HEIGHT:\n val = BLOCK.data[y_offset*BLOCK.size + x_offset] #인덱스 번호가 바뀌는 것\n if val != 0: #아래 부딪힌 경우 그 모양을 복사하여 자리에 둔다\n FIELD[self.ypos+y_offset]\\\n [self.xpos+x_offset] = val\n\n erased = erase_line() #행이 모두 차면 지우는 함수\n go_next_block(count) #다음 블록으로 전환하는 함수\n #여기까지 블록이 쌓이는 로직\n if self.fire < count: #낙하 시작한시간 self.fire이 count보다 작으면 낙하중이고\n self.fire = count + INTERVAL #다음 낙하할 때가 안됐으면 self.fire에 count와 interval을 더해 숫자 갱신\n self.ypos += 1 #한 칸 내려오는 것\n return erased #위에서의 erased 반환한다.\n\n def draw(self): #블록 그리기\n for index in range(len(self.data)):\n xpos = index % self.size\n ypos = index // self.size\n val = self.data[index]\n if 0 <= ypos + self.ypos < HEIGHT and 0 <= xpos + self.xpos < WIDTH and val != 0:\n x_pos = 25 + (xpos + self.xpos) * 25 #왼쪽벽테두리까지 거리25(한 칸당25) + 칸수*25\n y_pos = 25 + (ypos + self.ypos) * 25\n pygame.draw.rect(SURFACE, COLORS[val], #draw 매서드는 (대상,색상,범위\n (x_pos, y_pos, 24, 24)) #24,24크기만 색상을 넣어 블록사이가 배경색으로 된 선이 보이게 함\n\ndef erase_line(): #꽉찬 행 지우기\n erased = 0\n ypos = 20 #ypos를 20으로 초기화, HEIGHT-2 (0부터시작해서1+ 벽한칸1)\n while ypos >= 0: #가장 아랫줄부터 행이 꽉찼는지 검사(ypos가 0될때까지 if문 수행)\n if all(FIELD[ypos]): #all함수 : 인수 배열요소가 모두 True면 True반환\n erased += 1 #if문 성립하면 삭제한 행의 카운터 erased를 +1(지운 줄의 수)\n del FIELD[ypos] #ypos번째 행 삭제\n FIELD.insert(0, [8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8]) #삭제한 만큼 insert메서드를 이용하여 맨 위의 행에 추가(8은 좌우 벽)\n else: #all함수 false일 경우\n ypos -= 1\n return erased\n\ndef is_game_over(): #gameover여부확인\n filled = 0\n for cell in FIELD[0]: #FIELD의 가장 윗줄인 0번째 행을 순환하며 확인\n if cell != 0:\n filled += 1\n return filled > 2 #양쪽벽이 1씩 차지해서 2 이상일 경우 TRUE\n\ndef go_next_block(count): #낙하 중인 블럭을 다음 블럭으로 전환하는 함수\n global BLOCK, NEXT_BLOCK #전역변수를 가져와서 값을 수정하기 때문에 global사용\n BLOCK = NEXT_BLOCK if NEXT_BLOCK != None else Block(count) #class로 정의한 블럭개체 생성\n NEXT_BLOCK = Block(count) #새 블럭 대입해 놓는 것\n\ndef is_overlapped(xpos, ypos, turn): #블럭이 다른 블록과 충돌하는지 여부 확인\n data = BLOCK.type[turn] #data에 블럭의 type과 turn을 반영한 1차원 리스트 저장\n for y_offset in range(BLOCK.size): #순환변수가 블럭의 사이즈 대로 한칸한칸 순환하도록 이중포문\n for x_offset in range(BLOCK.size):\n if 0 <= xpos+x_offset < WIDTH and 0 <= ypos+y_offset < HEIGHT:\n #데이터가 유효한 범위내에 있고 순환할 때\n if data[y_offset*BLOCK.size + x_offset] != 0 and FIELD[ypos+y_offset][xpos+x_offset] != 0:\n #순환하는 자리의 값이 0이 아니고(블럭이 있고) and 필드의 그 자리에 0이 아니면 = (충돌했다면)\n return True #충돌했다면 -> True반환\n return False\n\n# 전역 변수 선언\npygame.init()\npygame.key.set_repeat(30, 30)\nSURFACE = pygame.display.set_mode([600, 600]) #화면에 그리기\nFPSCLOCK = pygame.time.Clock() #프레임레이트 조절\nWIDTH = 12 #FIELD의 폭\nHEIGHT = 22 #FIELD의 높이\nINTERVAL = 40 #블럭이 낙하하는 프레임 간격\nFIELD = [[0 for _ in range(WIDTH)] for _ in range(HEIGHT)] #필드\nCOLORS = ((0, 0, 0), (255, 165, 0), (0, 0, 255), (0, 255, 128), (0, 255, 0), (128, 0, 255), (255, 128, 0), (255, 0, 0), (128, 128, 128))\nBLOCK = None\nNEXT_BLOCK = None\nrunning = True\n\ndef main(): #메인 함수\n global INTERVAL #전역변수 끌고오기 때문에 global\n count = 0 #count, score, game_over 초기화\n score = 0\n game_over = False\n smallfont = pygame.font.SysFont(None, 40) #점수나타낼 폰트(작게)\n largefont = pygame.font.SysFont(None, 72) #게임오버 나타낼 폰트(크게)\n message_over = largefont.render(\"GAME OVER!!\", #메시지 나타낼 위치 초기화\n True, (0, 255, 225))\n message_rect = message_over.get_rect()\n message_rect.center = (300, 300)\n\n go_next_block(INTERVAL)\n\n for ypos in range(HEIGHT):\n for xpos in range(WIDTH):\n FIELD[ypos][xpos] = 8 if xpos == 0 or \\\n xpos == WIDTH - 1 else 0\n for index in range(WIDTH):\n FIELD[HEIGHT - 1][index] = 8\n\n while True:\n key = None\n for event in pygame.event.get(): #이벤트를 하나씩 불러오기\n if event.type == QUIT: #이벤트 종료가 감지되면\n pygame.quit() #pygame의 초기화를 해제하고\n sys.exit() #게임을 종료시킴\n elif event.type == KEYDOWN: #만약 키가 눌렸다면\n key = event.key #그 값을 key에 저장\n\n game_over = is_game_over() #is_game_over()를 실행시킨다\n if not game_over: #게임오버 아닌 경우 실행\n count += 5 #0이었던 count에 +5\n if count % 1000 == 0: #count가 1000의 배수가 될때마다\n INTERVAL = max(1, INTERVAL - 2) #INTERVAL을 2씩 줄인다(블럭 낙하속도 증가)\n erased = BLOCK.update(count) #블럭 class의 update메서드 실행, 매 프레임마다 블럭의 상태가 갱신,\n #erased에는 삭제한 행의 개수가 반환됨\n if erased > 0: #삭제된 칸이 있다면\n score += (2 ** erased) * 100 #점수를 높인다(한꺼번에 많이 지우면 점수획득량 많음)\n\n # 키 이벤트 처리\n next_x, next_y, next_t = BLOCK.xpos, BLOCK.ypos, BLOCK.turn\n if key == K_SPACE:\n next_t = (next_t + 1) % 4 #next_t에 1더해서 4로나눈 나머지 (회전시키기)\n elif key == K_RIGHT: #오른쪽으로 1칸 이동\n next_x += 1\n elif key == K_LEFT: #왼쪽으로 1칸 이동\n next_x -= 1\n elif key == K_DOWN: #아래쪽으로 1칸 이동\n next_y += 1\n\n if not is_overlapped(next_x, next_y, next_t): #next_x,y,t가 충돌하는지 is_overlapped로 검사 후 충돌 아닌 경우에만 갱신\n BLOCK.xpos = next_x\n BLOCK.ypos = next_y\n BLOCK.turn = next_t\n BLOCK.data = BLOCK.type[BLOCK.turn]\n\n # 배경, 낙하 중인 블럭 그리기\n SURFACE.fill((0, 0, 0)) #전체 배경 칠하기\n for ypos in range(HEIGHT): #이중포문으로 필드 그리기\n for xpos in range(WIDTH):\n val = FIELD[ypos][xpos]\n pygame.draw.rect(SURFACE, COLORS[val],\n (xpos * 25 + 25, ypos * 25 + 25, 24, 24))\n BLOCK.draw() #블럭 그리기\n\n # 다음 블럭 그리기\n for ypos in range(NEXT_BLOCK.size): #NEXT_BLOCK의 사이즈만큼만 순환하면서 그림을 그린다\n for xpos in range(NEXT_BLOCK.size):\n val = NEXT_BLOCK.data[xpos + ypos * NEXT_BLOCK.size]\n pygame.draw.rect(SURFACE, COLORS[val],\n (xpos * 25 + 460, ypos * 25 + 100, 24, 24)) #왼쪽에서 460, 위에서 100의 위치에 다음 블럭을 표시\n\n # 점수 나타내기\n score_str = str(score).zfill(6)\n score_image = smallfont.render(score_str,\n True, (0, 255, 0))\n SURFACE.blit(score_image, (480, 30)) #왼쪽위에 점수 표시\n\n if game_over: #게임오버시 메시지 표시\n SURFACE.blit(message_over, message_rect)\n\n \n\n \n \n\n pygame.display.update() #위에 그린 모든 것을 화면에 반영\n FPSCLOCK.tick(15) #프레임레이트 설정\n \nif __name__ == '__main__':\n main()\n","sub_path":"pygame_project/tetris.py","file_name":"tetris.py","file_ext":"py","file_size_in_byte":13192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"340833253","text":"import os\nN = int(input(\"Please enter N =\"))\ncount = 0\nfile_no = 1\nwith open(\"hightemp.txt\", \"r\") as file:\n lines = file.read().splitlines()\n while count < len(lines):\n with open(\"file{}.txt\".format(file_no), \"w\", encoding=\"utf8\") as output:\n for x in lines[count:count+N]:\n content = \"{}\\n\".format(x)\n print(content)\n output.write(content)\n print(\"//////\")\n count += N\n file_no += 1\nprint(\"確認\")\nos.system(\"split -l {} hightemp.txt\".format(N))\n","sub_path":"bambi/chapter02/knock16.py","file_name":"knock16.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"230601193","text":"class Solution:\n def isValidSudoku(self, board: List[List[str]]) -> bool:\n n = len(board)\n for i in range(n):\n line = [k for k in board[i] if not k == '.']\n colu = [k[i] for k in board if not k[i] == '.']\n box_list = [k[i%3*3:i%3*3+3] for k in board[i//3*3:i//3*3+3]]\n box = [i for i in box_list[0]+box_list[1]+box_list[2] if not i == '.']\n if not len(line) == len(set(line)) or not len(colu) == len(set(colu)) or not len(box) == len(set(box)):\n return False\n \n return True","sub_path":"Week_07/G20200343040290/LeetCode_36_0290.py","file_name":"LeetCode_36_0290.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"654127968","text":"#!/usr/bin/python3\n\"\"\"place Module\"\"\"\n\nfrom models.base_model import BaseModel\n\n\nclass Place(BaseModel):\n \"\"\"\n Place class\n \"\"\"\n city_id = \"\"\n user_id = \"\"\n name = \"\"\n description = \"\"\n number_rooms = 0\n number_bathrooms = 0\n max_guest = 0\n price_by_night = 0\n latitude = 0.0\n longitude = 0.0\n amenity_ids = []\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n init method\n\n Args:\n Recieve keyworded and non keyworded args\n \"\"\"\n if kwargs is not None and len(kwargs) > 0:\n super().__init__(**kwargs)\n\n elif args is not None and len(args) > 0:\n print('im using args')\n\n else:\n super().__init__()\n\n def to_dict(self):\n \"\"\"\n to_dict method\n\n Return: dictionary containing all keys/values\n of __dict__ of an instance\n \"\"\"\n dictionary = super().to_dict()\n\n dictionary[\"city_id\"] = self.city_id\n dictionary[\"user_id\"] = self.user_id\n dictionary[\"name\"] = self.name\n dictionary[\"description\"] = self.description\n dictionary[\"number_rooms\"] = self.number_rooms\n dictionary[\"number_bathrooms\"] = self.number_bathrooms\n dictionary[\"max_guest\"] = self.max_guest\n dictionary[\"price_by_night\"] = self.price_by_night\n dictionary[\"latitude\"] = self.latitude\n dictionary[\"longitude\"] = self.longitude\n dictionary[\"amenity_ids\"] = self.amenity_ids\n return dictionary\n","sub_path":"models/place.py","file_name":"place.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"387101743","text":"import numpy as np\r\nimport string\r\nimport struct\r\nimport bpy\r\nimport os\r\n\r\nV_28 = True\r\nversion = float(bpy.app.version_string[:4])\r\nif version < 2.8: V_28 = False\r\n\r\n\r\ndef readString(file_h):\r\n\taByte = file_h.read(1)\r\n\ts = aByte\r\n\twhile aByte and ord(aByte) != 0:\r\n\t\taByte = file_h.read(1)\r\n\t\ts += aByte\r\n\treturn s[:-1].decode('ascii', errors='ignore')\r\n\r\n\r\n\r\n\r\ndef align(ptr,alignment):\r\n\talignment -= 1\r\n\treturn (ptr + alignment) & ~(alignment)\r\n\r\n\r\n\r\n\r\ndef rd(file_h):\r\n\tval = 0\r\n\tch = struct.unpack(\"B\",file_h.read(1))[0]\r\n\tif ch == 0xCE:\r\n\t\tval = struct.unpack(\"> 4 == 9:\r\n\t\t\tval &= 0xF\r\n\treturn val\r\n\r\n\r\n\r\n\r\ndef get_unknownCount(k):\r\n\tval = 0\r\n\tif k == 0x91:\r\n\t\tval = 1\r\n\telif k == 0x92:\r\n\t\tval = 4\r\n\telif k == 0x93:\r\n\t\tval = 7\r\n\telif k == 0x94:\r\n\t\tval = 10\r\n\treturn val\r\n\r\ndef getByteCount(type):\r\n\tbct = 0\r\n\tif type == 6:\r\n\t\tbct = 2\r\n\telif type == 8:\r\n\t\tbct = 2\r\n\telif type == 12:\r\n\t\tbct = 1\r\n\telif type == 14:\r\n\t\tbct = 1\r\n\telif type == 16:\r\n\t\tbct = 4\r\n\telif type == 26:\r\n\t\tbct = 2\r\n\treturn bct\r\n\r\n\r\n\r\n\r\ndef get_top_count(file_h, f_size):\r\n\tunk0 = rd(file_h)\r\n\tunk1 = rd(file_h)\r\n\tstrlen_n = struct.unpack(\"B\",file_h.read(1))[0] - 0xA0\r\n\tstop,p = False,0\r\n\twhile stop == False and file_h.tell() < f_size:\r\n\t\tst = readString(file_h)\r\n\t\tif \"asset_uri\" in st:\r\n\t\t\tstop = True\r\n\t\t\tunk0 = struct.unpack(\"B\",file_h.read(1))[0]\r\n\t\t\tstrlen0 = struct.unpack(\"B\",file_h.read(1))[0] # asset_uri\r\n\t\t\treadString(file_h) # asset_uri\r\n\t\t\tstrlen1 = struct.unpack(\"B\",file_h.read(1))[0] # ref\r\n\t\t\treadString(file_h) # ref\r\n\t\t\tunk1 = struct.unpack(\"B\",file_h.read(1))[0]\r\n\t\t\tstrlen2 = struct.unpack(\"B\",file_h.read(1))[0] # file\r\n\t\t\treadString(file_h) # file\r\n\t\t\t\r\n\t\t\tp = rd(file_h)\r\n\tfile_h.seek(0,0)\r\n\treturn p\r\n\r\n\r\n\r\n\r\ndef rd_top(file_h, f_size):\r\n\tlt = []\r\n\tt_count = get_top_count(file_h, f_size)\r\n\tunk0 = rd(file_h)\r\n\tunk1 = rd(file_h)\r\n\tfor x in range(t_count):\r\n\t\tstrlen_n = struct.unpack(\"B\",file_h.read(1))[0] - 0xA0\r\n\t\tn = readString(file_h)\r\n\t\tunk2 = struct.unpack(\"B\",file_h.read(1))[0]\r\n\t\tstrlen_a = struct.unpack(\"B\",file_h.read(1))[0]\r\n\t\tlt.append({n:readString(file_h)})\r\n\t\tif x == t_count - 1:\r\n\t\t\tstrlen_2 = struct.unpack(\"B\",file_h.read(1))[0] - 0xA0\r\n\t\t\tast = readString(file_h) # asset_uri\r\n\t\t\tunk3 = struct.unpack(\"B\",file_h.read(1))[0]\r\n\t\t\tstrlen_3 = struct.unpack(\"B\",file_h.read(1))[0]\r\n\t\t\tlt.append({ast:readString(file_h)})\r\n\t\t\tstrlen_4 = struct.unpack(\"B\",file_h.read(1))[0] - 0xA0\r\n\t\t\trf = readString(file_h) # ref\r\n\t\t\tunk4 = struct.unpack(\"B\",file_h.read(1))[0]\r\n\t\t\tstrlen_5 = struct.unpack(\"B\",file_h.read(1))[0]\r\n\t\t\tlt.append({rf:readString(file_h)})\r\n\treturn lt\r\n\r\n\r\n\r\n\r\ndef rd_bones1(file_h):\r\n\tname_count = rd(file_h)\r\n\tfor i in range(name_count):\r\n\t\tfor j in range(12): rd(file_h)\r\n\t\tname_size = struct.unpack(\"B\",file_h.read(1))[0] - 0xA0\r\n\t\tbn = readString(file_h)\r\n\r\n\r\n\r\n\r\ndef modelHeader(file_h):\r\n\tfile_h.seek(11,1)\r\n\tclusterName = readString(file_h) # Usually Parts_Base\r\n\tcount_maybe = struct.unpack(\"B\",file_h.read(1))[0] # & 0xF\r\n\tname_size = struct.unpack(\"B\",file_h.read(1))[0] - 0xA0\r\n\tClusterName_name = readString(file_h) # CLUSTER_NAME\r\n\tmeshCount = rd(file_h)\r\n\treturn meshCount\r\n\r\n\r\n\r\n\r\ndef rd_meshBegin(file_h):\r\n\tname_size = struct.unpack(\"B\",file_h.read(1))[0] - 0xA0\r\n\tmesh_name = readString(file_h)\r\n\tfile_h.seek(1,1) # ?\r\n\tcount = rd(file_h)\r\n\tfor j in range(count): # cruft\r\n\t\trd(file_h)\r\n\tfile_h.seek(94,1) # more cruft\r\n\treturn mesh_name\r\n\r\n\r\n\r\n\r\ndef rd_meshEnd(file_h):\r\n\tunk = struct.unpack(\"B\",file_h.read(1))[0]\r\n\tmaybe_count = struct.unpack(\"B\",file_h.read(1))[0] # ?\r\n\t# maybe_count = rd(file_h)\r\n\t\r\n\tfile_h.seek(46,1)\r\n\tlod_0 = rd(file_h)\r\n\tlod_1 = rd(file_h)\r\n\tlod_2 = rd(file_h)\r\n\t\r\n\tpoo = struct.unpack(\"H\",file_h.read(2))[0] # 0xC2 / 0xC3\r\n\t\r\n\twhoCares = rd(file_h)\r\n\tcount_9 = struct.unpack(\"B\",file_h.read(1))[0]\r\n\tcount = get_unknownCount(count_9)\r\n\trd(file_h)\r\n\tzero = struct.unpack(\"B\",file_h.read(1))[0] # 0\r\n\tfor t in range(count):\r\n\t\trd(file_h)\r\n\tC2 = struct.unpack(\"B\",file_h.read(1))[0] # 0xC2\r\n\trd(file_h)\r\n\tC3 = struct.unpack(\"B\",file_h.read(1))[0] # 0xC3\r\n\trd(file_h)\r\n\tunk_byte = struct.unpack(\"B\",file_h.read(1))[0]\r\n\treturn lod_0\r\n\r\n\r\n\r\n\r\ndef data_paver(start, end, count, subCount, type, data):\r\n\tif type == 6: # NORMAL FACTORS\r\n\t\tpos = data[:,start:end].ravel().view(dtype = '.+$\\n', line):\n seqs += line.strip()\n else:\n seqs += '\\n'\n \n seqs = seqs.lstrip().split('\\n')\n return(seqs)\n \ndef profileMatrix(seqs):\n ''' Generate a profile matrix from a list of\n DNA sequences. Assumes all the sequences\n are of equal length.\n '''\n length = len(seqs[0])\n matrix =[[0 for x in range(4)] for y in range(length)]\n \n for i in range(length):\n for string in seqs:\n if string[i].upper() == 'A':\n matrix[i][0] += 1\n elif string[i].upper() == 'C':\n matrix[i][1] += 1\n elif string[i].upper() == 'G':\n matrix[i][2] += 1\n elif string[i].upper() == 'T':\n matrix[i][3] += 1\n\n return(matrix)\n\ndef consensusSeq(profile):\n ''' Determine the consensus sequence from a\n given profile matrix.\n '''\n consensus = ''\n\n letter = ['A', 'C', 'G', 'T']\n for i in range(len(profile)):\n nt = profile[i].index(max(profile[i]))\n consensus += letter[nt]\n\n return(consensus)\n\ndef formatProfile(profile):\n ''' A generator that outputs a given profile\n matrix in a readable format.\n '''\n prefix = ['A', 'C', 'G', 'T']\n for i in range(4):\n line = prefix[i] + ': '\n for j in range(len(profile)):\n line += str(profile[j][i]) + ' '\n\n yield(line)\n\ndef main():\n sequences = parseFasta('problem_datasets/rosalind_cons.txt')\n profile = profileMatrix(sequences)\n consensus = consensusSeq(profile)\n\n with open('output/rosalind_cons_out.txt', 'w') as outfile:\n outfile.write(consensus + '\\n')\n for line in formatProfile(profile):\n outfile.write(line + '\\n')\n\nif __name__ == '__main__':\n main()\n","sub_path":"2_bioinformatics_stronghold/rosalind_CONS.py","file_name":"rosalind_CONS.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"36254673","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.selector import Selector\n\nfrom StringIO import StringIO\nfrom zipfile import ZipFile\nfrom urllib import urlopen\nfrom datetime import date, datetime\n\nfrom urlparse import urlparse, parse_qs\n\nclass ParserMixin:\n def parse_deputado_detalhes(self, response):\n self.logger.info('parsing %s', response.url)\n perfil = response.meta.get('deputado', {})\n deputados = response.xpath('//Deputado')\n item = lambda f,d: d.xpath('%s/text()' % f).extract_first('').strip()\n keys = lambda f,d: dict([(k, item(k,d)) for k in f])\n mapKeys = lambda f,d: map(lambda x: keys(f, x), d)\n foto = (\n 'http://www.camara.gov.br/internet/deputado/bandep/{0}.jpg'\n )\n\n for deputado in deputados:\n data = {\n 'matricula': perfil.get('matricula', None),\n 'numLegislatura': item('numLegislatura', deputado),\n 'email': item('email', deputado),\n 'nomeProfissao': item('nomeProfissao', deputado),\n 'dataNascimento': item('dataNascimento', deputado),\n 'dataFalecimento': item('dataFalecimento', deputado),\n 'ufRepresentacaoAtual': item('ufRepresentacaoAtual', deputado),\n 'situacaoNaLegislaturaAtual': item('situacaoNaLegislaturaAtual', deputado),\n 'ideCadastro': item('ideCadastro', deputado),\n 'idParlamentarDeprecated': item('idParlamentarDeprecated', deputado),\n 'nomeParlamentarAtual': item('nomeParlamentarAtual', deputado),\n 'nomeCivil': item('nomeCivil', deputado),\n 'sexo': item('sexo', deputado),\n 'urlFoto': foto.format(item('ideCadastro', deputado)),\n 'partidoAtual': keys(['idPartido','sigla','nome'], deputado.xpath('partidoAtual')),\n 'gabinete': keys(['numero','anexo','telefone'], deputado.xpath('gabinete')),\n 'comissoes': mapKeys([\n 'idOrgaoLegislativoCD',\n 'siglaComissao',\n 'nomeComissao',\n 'condicaoMembro',\n 'dataEntrada',\n 'dataSaida'\n ], deputado.xpath('comissoes/comissao')),\n 'cargosComissoes': mapKeys([\n 'idOrgaoLegislativoCD',\n 'siglaComissao',\n 'nomeComissao',\n 'idCargo',\n 'nomeCargo',\n 'dataEntrada',\n 'dataSaida'\n ], deputado.xpath('cargosComissoes/cargoComissoes')),\n 'periodosExercicio': mapKeys([\n 'siglaUFRepresentacao',\n 'situacaoExercicio',\n 'dataInicio',\n 'dataFim',\n 'idCausaFimExercicio',\n 'descricaoCausaFimExercicio',\n 'idCadastroParlamentarAnterior'\n ], deputado.xpath('periodosExercicio/periodoExercicio')),\n 'historicoNomeParlamentar': item('historicoNomeParlamentar', deputado),\n 'filiacoesPartidarias': item('filiacoesPartidarias', deputado),\n 'historicoLider': mapKeys([\n 'idHistoricoLider',\n 'idCargoLideranca',\n 'descricaoCargoLideranca',\n 'numOrdemCargo',\n 'dataDesignacao',\n 'dataTermino',\n 'codigoUnidadeLideranca',\n 'siglaUnidadeLideranca',\n 'idBlocoPartido'\n ], deputado.xpath('historicoLider/itemHistoricoLider'))\n }\n\n yield data\n\n def parse_deputado(self, response):\n item = lambda f: response.xpath('%s/text()' % f).extract_first('').strip()\n\n return {\n 'numLegislatura': item('numLegislatura'),\n 'ideCadastro': item('ideCadastro'),\n 'codOrcamento': item('codOrcamento'),\n 'condicao': item('condicao'),\n 'matricula': item('matricula') or item('Matricula'),\n 'idParlamentar': item('idParlamentar'),\n 'nome': item('nome'),\n 'nomeParlamentar': item('nomeParlamentar'),\n 'urlfoto': item('urlfoto'),\n 'sexo': item('sexo') or item('SEXO'),\n 'uf': item('uf') or item('UFEleito'),\n 'partido': item('partid'),\n 'gabinete': item('gabinete'),\n 'anexo': item('anexo'),\n 'fone': item('fone'),\n 'email': item('email'),\n }\n\n def parse_presenca(self, response):\n self.logger.info('parsing %s', response.url)\n text = lambda f,d: d.xpath('%s/text()' % f).extract_first('').strip()\n campo = lambda f: text(f,response.xpath('/parlamentar'))\n meta = response.meta.get('deputado')\n\n for diaItem in response.xpath('//diasDeSessoes2/dia'):\n dia = lambda f: text(f, diaItem)\n\n for sessaoItem in diaItem.xpath('sessoes/sessao'):\n sessao = lambda f: text(f, sessaoItem)\n yield {\n 'ideCadastro': meta.get('ideCadastro'),\n 'matricula': meta.get('matricula'),\n 'legislatura': campo('legislatura'),\n 'carteiraParlamentar': campo('carteiraParlamentar'),\n 'nomeParlamentar': campo('nomeParlamentar'),\n 'siglaPartido': campo('siglaPartido'),\n 'siglaUF': campo('siglaUF'),\n 'data': dia('data'),\n 'frequencianoDia': dia('frequencianoDia'),\n 'justificativa': dia('justificativa'),\n 'qtdeSessoes': dia('qtdeSessoes'),\n 'descricao': sessao('descricao'),\n 'frequencia': sessao('frequencia')\n }\n\n def parse_votos(self, res):\n self.logger.info('parsing %s', res.url)\n deputado = res.meta.get('deputado')\n\n for row in res.css('.tabela-1 tr:nth-child(n+2)'):\n columns = filter(None, map(unicode.strip, row.css('td *::text').extract()))\n columns = map(lambda x: x.replace('---', ''), columns)\n\n if len(columns) == 3:\n proposicao = '%s %s' % (columns[0], columns[1])\n proposicao = map(unicode.strip, proposicao.split('-', 1))\n voto = columns[2]\n elif len(columns) == 2:\n proposicao = columns[0]\n voto = columns[1]\n\n if row.xpath('@class').extract_first() == 'even':\n sessao = columns\n else:\n [codigo, descricao] = dict(zip(['codigo', 'descricao'], proposicao))\n\n yield {\n 'ideCadastro' : deputado.get('ideCadastro'),\n 'nomeParlamentarAtual' : deputado.get('nomeParlamentarAtual'),\n 'partidoAtual' : deputado.get('partidoAtual'),\n 'numLegislatura' : deputado.get('numLegislatura'),\n 'matricula' : deputado.get('matricula'),\n 'data' : datetime.strptime(sessao[0], \"%d/%m/%Y\").isoformat(),\n 'sessao' : sessao[1],\n 'frequencia' : sessao[2],\n 'justificativa' : sessao[3],\n 'proposicao' : codigo,\n 'descricao' : descricao,\n 'voto' : voto\n }\n\nclass BaseSpider(scrapy.Spider):\n allowed_domains = [\"www.camara.leg.br\", \"www2.camara.leg.br\", \"www.camara.gov.br\", \"www2.camara.gov.br\"]\n custom_settings = {\n 'RETRY_TIMES': 5\n }\n\n def ano_legislatura(self, legislatura):\n return 2015 - ((55-int(legislatura or 55)) * 4)\n\n def inicio_legislatura(self, legislatura):\n return '01/02/%s' % self.ano_legislatura(legislatura)\n\n def fim_legislatura(self, legislatura):\n return '31/01/%s' % (self.ano_legislatura(legislatura) + 4)\n\n def unzip(self, filename, body):\n url = urlopen(\"http://www.camara.leg.br/internet/deputado/DeputadosXML_52a55.zip\")\n zipfile = ZipFile(StringIO(body))\n return zipfile.open(filename).read()\n\nclass DeputadosSpider(BaseSpider, ParserMixin):\n name = \"deputado\"\n start_urls = [\n 'http://www.camara.leg.br/SitCamaraWS/Deputados.asmx/ObterDeputados',\n 'http://www.camara.leg.br/internet/deputado/DeputadosXML_52a55.zip'\n ]\n\n def __init__(self, nome=None, *args, **kwargs):\n self.nome = unicode(nome, 'utf-8') if nome else None\n super(DeputadosSpider, self).__init__(*args, **kwargs)\n\n def parse(self, response):\n self.logger.info('parsing %s', response.url)\n if '.zip' in response.url:\n xml = self.unzip('Deputados.xml', response.body)\n response = Selector(text=xml, type='xml')\n\n if self.nome:\n deputados = response.xpath('//*[./nomeParlamentar = \"%s\"]' % self.nome)\n else:\n deputados = response.xpath('//nomeParlamentar/..')\n\n return map(self.fetch_deputado, map(self.parse_deputado, deputados))\n\n def fetch_deputado(self, deputado):\n url = (\n 'http://www.camara.leg.br/SitCamaraWS/'\n 'deputados.asmx/ObterDetalhesDeputado'\n '?ideCadastro={0}'\n '&numLegislatura='\n )\n\n meta = dict(deputado=deputado)\n ideCadastro = deputado.get('ideCadastro')\n link = url.format(ideCadastro)\n return scrapy.Request(link, meta=meta,\n callback=self.parse_deputado_detalhes)\n\n def parse_deputado_detalhes(self, response):\n self.logger.info('parsing %s', response.url)\n deputados = super(DeputadosSpider, self).parse_deputado_detalhes(response)\n return map(self.process_deputado, deputados)\n\n def process_deputado(self, deputado):\n return deputado\n\nclass PresencaSpider(DeputadosSpider):\n name = \"presenca\"\n\n def __init__(self, data=None, dataInicio=None, dataFim=None, *args, **kwargs):\n if data == 'hoje':\n data = date.strftime(date.today(), '%d/%m/%Y')\n\n self.dataInicio = dataInicio or data\n self.dataFim = dataFim or data\n\n if data:\n data = data.split('/')\n data[0] = str(int(data[0]) + 1)\n self.dataFim = '/'.join(data)\n\n super(PresencaSpider, self).__init__(*args, **kwargs)\n\n def process_deputado(self, deputado):\n url = (\n 'http://www.camara.leg.br/SitCamaraWS/'\n 'sessoesreunioes.asmx/ListarPresencasParlamentar'\n '?numMatriculaParlamentar={0}'\n '&dataIni={1}'\n '&dataFim={2}'\n )\n\n matricula = deputado.get('matricula')\n legislatura = deputado.get('numLegislatura')\n dataInicio = self.dataInicio or self.inicio_legislatura(legislatura)\n dataFim = self.dataFim or self.fim_legislatura(legislatura)\n link = url.format(matricula, dataInicio, dataFim)\n meta = dict(deputado=deputado)\n return scrapy.Request(link, meta=meta, callback=self.parse_presenca)\n\nclass VotosSpider(DeputadosSpider):\n name = \"voto\"\n custom_settings = {\n 'RETRY_TIMES': 5\n }\n\n def __init__(self, data=None, dataInicio=None, dataFim=None, *args, **kwargs):\n if data == 'hoje':\n data = date.strftime(date.today(), '%d/%m/%Y')\n\n self.dataInicio = dataInicio or data\n self.dataFim = dataFim or data\n super(VotosSpider, self).__init__(*args, **kwargs)\n\n def process_deputado(self, deputado):\n url = (\n 'http://www.camara.leg.br/'\n 'internet/deputado/RelVotacoes.asp'\n '?nuLegislatura={0}'\n '&nuMatricula={1}'\n '&dtInicio={2}'\n '&dtFim={3}'\n )\n\n matricula= deputado.get('matricula')\n legislatura = deputado.get('numLegislatura')\n dataInicio = self.dataInicio or self.inicio_legislatura(legislatura)\n dataFim = self.dataFim or self.fim_legislatura(legislatura)\n\n meta = dict(deputado=deputado)\n link = url.format(legislatura, matricula, dataInicio, dataFim)\n return scrapy.Request(link, meta=meta, callback=self.parse_votos)\n","sub_path":"crawler/spiders.py","file_name":"spiders.py","file_ext":"py","file_size_in_byte":12427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"195251093","text":"def evenly_divisible(lower, upper, limit):\n for i in range(upper, limit, upper):\n divisible = True\n for j in range(lower, upper):\n if divisible:\n if i % j != 0:\n divisible = False\n\n if divisible:\n return i\n\n return -1\n\n\nprint(evenly_divisible(1, 20, 10000000000))\n","sub_path":"euler/problem5.py","file_name":"problem5.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"18768247","text":"from django.shortcuts import render, redirect\nfrom django.http import *\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.contrib.auth import login, logout\nfrom django.contrib.auth.models import User\nfrom .import forms\nfrom django.contrib.auth.decorators import login_required\nimport re\nfrom django.contrib import messages\nfrom .models import Concurso, UsuarioCustom, ListaLocutores,AudioLocutor, EmpresaRol\nfrom WebConcursos.forms import UserCreationCustom\nfrom django.core.mail import EmailMessage, send_mail\nfrom django.core.files.storage import FileSystemStorage\nimport boto3\nimport shutil\n\n# Create your views here.\n#registrar usuarios: metodo usado para crear el usuario en la aplicacion\ndef form_registrar_usuario(request):\n\tprint(request.POST.get('username'))\n\tif request.method == 'POST':\n\t\t#formulario_registro = forms.UserRegisterFormCustom(request.POST)\n\t\tformulario_registro = forms.UserCreationCustom(request.POST) ##funciona con nombres y apellidos pero sin empresa ni rol\n\t\t#formulario_registro = UserCreationForm(request.POST)\n\t\tformulario_registro.errors.as_data()\n\t\tif formulario_registro.is_valid():\n\t\t\tpatron_correo = re.compile(r\"^[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?$\")\n\t\t\tcumple_patron = patron_correo.match(request.POST.get('username'))\n\t\t\tprint(cumple_patron)\n\t\t\tif cumple_patron:\n\t\t\t\tuser = formulario_registro.save(commit=False) #crea el elemnto, lo captura sin dar commit para modificar campos\n\t\t\t\tuser.save()\n\t\t\t\tlogin(request,user)\n\t\t\t\tmessages.success(request, 'Gracias por registrarte!!!!!')\n\t\t\t\tmessages.success(request, 'El username para ingreso a la aplicacion es ')\n\t\t\t\tmessages.success(request, user.username)\n\t\t\t\treturn redirect('WebConcursos:login') #Lo envio a la pantalla de ingreso de usuarios ya registrados\n\t\t\telse:\n\t\t\t\tmessages.info(request, 'El registro debe realizarse con un correo electronico valido')\n\telse:\n\t\t#formulario_registro = forms.UserRegisterFormCustom()\n\t\tformulario_registro = forms.UserCreationCustom() ##funciona con nombres y apellidos pero sin empresa ni rol\n\t\t#formulario_registro = UserCreationForm()\n\treturn render(request,'nuevo_usuario.html',{'formulario_registro':formulario_registro})\n\n\n#login\ndef formulario_ingresar_usuario(request):\n\tif request.method == 'POST':\n\t\tprint(request.POST)\n\t\tformulario_ingreso = AuthenticationForm(data=request.POST)\n\t\tprint(formulario_ingreso.errors)\n\t\tif formulario_ingreso.is_valid(): #si el usuario y password son correctos\n\t\t\t#login\n\t\t\tuser = formulario_ingreso.get_user()\n\t\t\tlogin(request,user)\n\t\t\treturn redirect('WebConcursos:lista_concursos') #hacia urls.py para name = lista_eventos\n\telse:\n\t\tformulario_ingreso = AuthenticationForm()\n\treturn render(request,'login.html',{'formulario_ingreso':formulario_ingreso})\n\n#logout\ndef logout_view(request):\n\tlogout(request)\n\treturn redirect('WebConcursos:login') #hacia urls.py para name = lista_eventos\n\n\ndef formulario_crear_concurso(request):\n\tprint(request.method)\n\tif request.method == 'POST':\n\t\tformulario_crear = forms.FormCrearConcurso(request.POST, request.FILES)\n\t\tprint(formulario_crear.errors)\n\t\tprint(request.FILES.get('ruta_imagen'))\n\t\tif formulario_crear.is_valid():\n\t\t\tprint(\"Request para crear concurso\", request.POST)\n\t\t\tconcurso = formulario_crear.save(commit=False)\n\t\t\tconcurso.id_administrador = request.user\n\t\t\t#concurso.ruta_imagen = 'http://localhost:8000/media/media/' + str(request.POST.get('ruta_imagen'))\n\t\t\tconcurso.save()\n\t\t\tconcurso.url_concurso = '/concursos/locutor/detalle_concurso/'+ str(concurso.id) + '/' + str(concurso.id_administrador.id)\n\t\t\tconcurso.save()\n\t\t\turl_usuario = concurso.url_concurso_custom\n\t\t\tconcurso.url_concurso_custom = str(url_usuario)\n\t\t\tconcurso.save()\n\t\t\tif request.FILES.get('ruta_imagen') == None:\n\t\t\t\tconcurso.ruta_imagen = 'sin-imagen.png'\n\t\t\t\tconcurso.save()\n\t\t\treturn redirect('WebConcursos:lista_concursos' ) #despues de guardarlo, envio al usuario a la lista de eventos\n\telse:\n\t\tform_crear_concurso = forms.FormCrearConcurso()\n\t\treturn render(request, 'crear_concurso.html', {'form_crear_concurso':form_crear_concurso})\n\n#Ordena los concursos por la fecha de inicio del mismo\ndef traer_lista_concursos(request):\n\tconcursos = Concurso.objects.filter(id_administrador = request.user).order_by('fecha_inicio')\n\tempresa = EmpresaRol.objects.all().filter(id_usuario = request.user.id)\n\treturn render(request, 'lista_concursos.html', {'concursos':concursos, 'empresa':empresa})\n\n\n\ndef borrar_concurso(request, id_concurso):\n\tid_elegido = id_concurso\n\tconcurso = Concurso.objects.filter(id = id_elegido)\n\tconcurso.delete()\n\tcurrent_user = request.user\n\tconcursos = Concurso.objects.filter(id_administrador = request.user).order_by('fecha_inicio')\n\tempresa = EmpresaRol.objects.all().filter(id_usuario = request.user.id)\n\treturn render(request, 'lista_concursos.html', {'concursos':concursos, 'empresa':empresa})\n\n\n\ndef traer_detalle_concurso(request, id_concurso):\n\tid_elegido = id_concurso\n\tconcurso = Concurso.objects.all().filter(id = id_elegido)\n\treturn render(request, 'detalle_concurso.html', {'concurso':concurso})\n\n\ndef formulario_editar_concurso(request, id_concurso):\n\tprint(\"metodo formulario_editar_concurso \", request.method)\n\tif request.method == 'POST':\n\t\tformulario_edicion = forms.FormEditarConcurso(request.POST, request.FILES)\n\t\tprint(formulario_edicion.errors.as_data)\n\t\tif formulario_edicion.is_valid():\n\t\t\tconcurso = formulario_edicion.save(commit=False)\n\t\t\tconcurso.id = id_concurso\n\t\t\tconcurso.id_administrador = request.user\n\t\t\tconcurso.url_concurso = '/concursos/locutor/detalle_concurso/'+ str(concurso.id) + '/' + str(concurso.id_administrador.id)\n\t\t\tconcurso.save()\n\t\t\tconcurso.url_concurso_custom = str(concurso.url_concurso_custom)\n\t\t\tconcurso.save()\n\t\t\tif request.FILES.get('ruta_imagen') == None:\n\t\t\t\tconcurso.ruta_imagen = 'sin-imagen.png'\n\t\t\t\tconcurso.save()\n\t\t\tprint(\"Edicion terminada\")\n\t\treturn redirect('WebConcursos:lista_concursos' ) #despues de guardarlo, envio al usuario a la lista de eventos\n\telse:\n\t\tconcursos = Concurso.objects.filter(id = id_concurso)\n\t\tprint(id_concurso)\n\t\tformulario_edicion = forms.FormEditarConcurso()\n\t\treturn render(request, 'editar_concurso.html', {'formulario_edicion':formulario_edicion , 'concursos':concursos })\n\n\n\ndef detalle_concurso_locutor(request, id_concurso,id_usuario):\n\tid_elegido = id_concurso\n\tprint(\"id_elegido\", id_elegido,'id_usuario', id_usuario )\n\tconcurso = Concurso.objects.filter(id = id_elegido, id_administrador=id_usuario)\n\treturn render(request, 'detalle_locutor.html', {'concurso':concurso})\n\ndef resolver_url(request, url_usuario):\n\tprint(\"url_usuario\", url_usuario)\n\tif url_usuario != 'None':\n\t\tprint(\"paso este if\")\n\t\turl_oficial = Concurso.objects.all().filter(url_concurso_custom = str(url_usuario))[0].url_concurso\n\t\tprint('url_oficial',url_oficial)\n\t\tif url_oficial != 'None': # si no es vacia la direccion del usuario, pero no encuentra en la consulta la url oficial\n\t\t\tprint(\"Esta es la url oficial: \", url_oficial )\n\t\t\treturn redirect(url_oficial)\n\t\telse:\n\t\t\tprint(\"No Existe la direccion configurada\")\n\t\t\treturn render(request, 'page_not_found.html')\n\telse:\n\t\tprint(\"No se ha configurado esta url por el usuario, por favor intentar la asignada por el sistema\")\n\t\treturn render(request, 'page_not_found.html')\n\ndef cargar(request):\n\tif request.method == 'POST':\n\t\tformulario_ingreso = forms.UploadFileForm(data=request.POST)\n\t\tif formulario_ingreso.is_valid(): #si el usuario y password son correctos\n\t\t\tfoto = formulario_ingreso.save(commit=False)\n\t\t\tfoto.save()\n\telse:\n\t\tformulario_ingreso = forms.UploadFileForm()\n\treturn render(request,'upload.html',{'formulario_ingreso':formulario_ingreso})\n\ndef RegistrarLocutorView(request):\n\tif request.method == 'POST':\n\t\tform_lista_locutor = forms.FormListaLocutor(data=request.POST)\n\t\tif form_lista_locutor.is_valid():\n\t\t\tformulario = form_lista_locutor.save(commit=False)\n\t\t\tformulario.id_administrador = request.user\n\t\t\tformulario.save()\n\t\t\tlocutores = ListaLocutores.objects.filter(id_administrador = request.user)\n\t\t\tform_lista_locutor = forms.FormListaLocutor()\n\t\t\treturn render(request, 'crear_lista_locutores.html', {'form_lista_locutor':form_lista_locutor, 'locutores':locutores})\n\telse:\n\t\tlocutores = ListaLocutores.objects.filter(id_administrador = request.user)\n\t\tform_lista_locutor = forms.FormListaLocutor()\n\treturn render(request,'crear_lista_locutores.html',{'form_lista_locutor':form_lista_locutor, 'locutores':locutores})\n\n\ndef EnviarCorreoListaView(request, id_concurso):\n\tprint(\"Estoy en EnviarCorreoListaView con el metodo\", request.method )\n\tif request.method == 'POST':\n\t\tform_mensaje = forms.FormEnviarCorreo(data=request.POST)\n\t\tprint(\"Formulario correo valido? : \", form_mensaje.is_valid())\n\t\tlocutores = ListaLocutores.objects.all().filter(id_administrador = request.user)\n\t\tprint(locutores.count())\n\t\tfor indice in range(len(locutores)):\n\t\t\tprint('Se enviara el concurso a : ',locutores[indice].email)\n\n\t\tif form_mensaje.is_valid():\n\t\t\t#para = request.POST.get('para')\n\t\t\tasunto = request.POST.get('asunto')\n\t\t\tmensaje = request.POST.get('mensaje')\n\t\t\tfor indice in range(len(locutores)):\n\t\t\t\temail = EmailMessage(\n\t\t\t\t\t\t\t asunto,\n\t\t\t\t\t\t\t mensaje,\n\t\t\t\t\t\t\t to=[locutores[indice].email],\n\t\t\t\t\t\t\t\t)\n\t\t\t\temail.send()\n\t\t\treturn redirect('WebConcursos:lista_concursos')\n\telse:\n\t\tprint('id_concurso', id_concurso)\n\t\tconcurso = Concurso.objects.all().filter(id = id_concurso)\n\t\tform_mensaje = forms.FormEnviarCorreo()\n\treturn render(request,'enviar_mail.html',{'form_mensaje':form_mensaje, 'concurso':concurso})\n\ndef BorrarLocutorView(request, id_locutor):\n\tid_elegido = id_locutor\n\tlocutor = ListaLocutores.objects.filter(id = id_elegido)\n\tlocutor.delete()\n\tcurrent_user = request.user\n\tlocutores = ListaLocutores.objects.filter(id_administrador = request.user)\n\tform_lista_locutor = forms.FormListaLocutor()\n\treturn render(request, 'crear_lista_locutores.html', {'form_lista_locutor':form_lista_locutor, 'locutores':locutores})\n\ndef CrearHomeView(request):\n\t#Crear un div por cada usuario con concursos\n\tusers = User.objects.all().count()\n\tempresas = EmpresaRol.objects.all().count()\n\tnumero_concursos = Concurso.objects.all().count()\n\tprint(empresas)\n\tprint(numero_concursos)\n\tusuarios = User.objects.all()\n\tconcursos = Concurso.objects.all()\n\tempresas = EmpresaRol.objects.all()\n\tfor indice in range(len(empresas)):\n\t\tprint('Se creara div para : ',empresas[indice].Empresa)\n\treturn render(request,'home.html',{'empresas':empresas ,'concursos':concursos, 'numero_concursos':numero_concursos,\n\t\t\t\t\t\t\t\t\t\t'usuarios':usuarios})\n\n# YJC\n\ndef enviar_audio(request,id_concurso):\n\n mensaje = \"Hemos recibido tu voz y la estamos procesando para que sea publicada en la página del concurso y pueda ser posteriormente revisada por nuestro equipo de trabajo. Tan pronto la voz quede publicada en la página del concurso te notificaremos por email.\"\n p_id_concurso = id_concurso\n\n if request.method == 'POST':\n form = forms.FormularioEnvioAudio(request.POST, request.FILES)\n print(form.errors)\n if form.is_valid():\n AudioLocutorNuevo = AudioLocutor(nombre = request.POST['nombre'],\n apellidos = request.POST['apellidos'],\n email = request.POST['email'],\n observaciones = request.POST['observaciones'],\n descripcion_audio = request.POST['descripcion_audio'],\n archivo_original = request.FILES['archivo_original'],\n id_concurso_id = p_id_concurso,)\n AudioLocutorNuevo.save(form)\n print(\"id audio\",AudioLocutorNuevo.id)\n p_id_audio = AudioLocutorNuevo.id\n messages.add_message(request, messages.INFO,mensaje)\n formato_archivo = str(request.FILES['archivo_original']).split('.')[1]\n nombre_archivo = str(request.FILES['archivo_original'])\n if formato_archivo == \"mp3\":\n print(\"Dentro del if archivo convertido\")\n audio = AudioLocutor.objects.get(id = p_id_audio)\n audio.estado = \"Convertido\"\n print(audio.archivo_original)\n audio.archivo_convertido = audio.archivo_original\n audio.save()\n #shutil.copy(nombre_archivo,'/procesados')\n # SQS\n else:\n sqs_registrar_mensaje(str(p_id_audio), nombre_archivo)\n else:\n form = forms.FormularioEnvioAudio()\n\n return render(request, 'upload_audio.html', {'form': form})\n\n\ndef listar_audios(request,id_concurso):\n p_id_concurso = id_concurso\n lista = AudioLocutor.objects.filter(id_concurso = p_id_concurso,estado = 'Convertido').order_by('-fecha_creacion')\n return render(request, 'lista_audios.html', {'lista': lista})\n\ndef listar_audios_admin(request,id_concurso):\n p_id_concurso = id_concurso\n lista = AudioLocutor.objects.filter(id_concurso = p_id_concurso).order_by('-fecha_creacion')\n return render(request, 'lista_audios_admin.html', {'lista': lista})\n\ndef listar_audios_marketing(request,id_concurso):\n p_id_concurso = id_concurso\n lista = AudioLocutor.objects.filter(id_concurso = p_id_concurso,estado = 'Convertido').order_by('-fecha_creacion')\n return render(request, 'lista_audios_marketing.html', {'lista': lista})\n\ndef traer_detalle_concurso_mkt(request, id_concurso):\n\tid_elegido = id_concurso\n\tconcurso = Concurso.objects.all().filter(id = id_elegido)\n\treturn render(request, 'detalle_concurso_marketing.html', {'concurso':concurso})\n\ndef seleccionar_audio (request,id_concurso,id_audio):\n\t#mensaje = \"Audio Seleccionado!\"\n\tp_id_concurso = id_concurso\n\tp_id_audio = id_audio\n\taudio = AudioLocutor.objects.get(id = p_id_audio)\n\taudio.seleccionado = 1\n\taudio.save()\n\tlista = AudioLocutor.objects.filter(id_concurso = p_id_concurso,estado = 'Convertido').order_by('-fecha_creacion')\n\t#messages.add_message(request, messages.INFO,mensaje)\n\treturn render(request, 'lista_audios_marketing.html', {'lista':lista})\n\ndef RegistrarEmpresaView(request):\n\tif request.method == 'POST':\n\t\tform_datos_empresa = forms.UserCreationRolEmpresa(data=request.POST)\n\t\tif form_datos_empresa.is_valid():\n\t\t\tformulario = form_datos_empresa.save(commit=False)\n\t\t\tconfigurado = EmpresaRol.objects.all().filter(id_usuario = request.user.id).count()\n\t\t\tif configurado == 1:\n\t\t\t\tem = EmpresaRol.objects.get(id_usuario = request.user.id)\n\t\t\t\tem.id_usuario = request.user\n\t\t\t\tem.Empresa = request.POST.get('Empresa')\n\t\t\t\tprint(em.Empresa)\n\t\t\t\tem.Rol = request.POST.get('Rol')\n\t\t\t\tprint(em.Rol)\n\t\t\t\tem.save()\n\t\t\telse:\n\t\t\t\tformulario = form_datos_empresa.save(commit=False)\n\t\t\t\tformulario.id_usuario = request.user\n\t\t\t\tformulario.id = request.POST.get(id)\n\t\t\t\tformulario.save()\n\t\tusuario = User.objects.all().filter(id = request.user.id)\n\t\tempresa = EmpresaRol.objects.all().filter(id_usuario = request.user.id)\n\t\treturn render(request,'empresa_rol.html',{'form_datos_empresa':form_datos_empresa, 'usuario':usuario, 'empresa':empresa})\n\telse:\n\t\tusuario = User.objects.all().filter(id = request.user.id)\n\t\tform_datos_empresa = forms.UserCreationRolEmpresa()\n\t\tempresa = EmpresaRol.objects.all().filter(id_usuario = request.user.id)\n\treturn render(request,'empresa_rol.html',{'form_datos_empresa':form_datos_empresa, 'usuario':usuario, 'empresa':empresa})\n\n\t# Manejo de colas SQS\n\n\n\ndef sqs_registrar_mensaje(id_audio, archivo_original):\n # Create SQS client\n sqs = boto3.resource('sqs')\n queue = sqs.get_queue_by_name(QueueName='sqs_concursos')\n url_queue=queue.url\n # Create a new message\n response = queue.send_message(MessageBody='Registrando Mensaje',\n MessageAttributes={\n 'id_audio': {\n 'StringValue': id_audio,\n 'DataType': 'Number'\n },\n 'archivo_original': {\n 'StringValue': archivo_original,\n 'DataType': 'String'\n }\n })\n print(response.get('MessageId'))\n print(response.get('MD5OfMessageBody'))\n","sub_path":"WebConcursos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"218112775","text":"'''\nprogram to reverse given number \n\n\n'''\n\n\ndef reverse_number(num):\n reverse = 0\n while(num > 0):\n remainder = num % 10\n reverse = (reverse * 10) + remainder\n num = num // 10\n return reverse\n\nfor i in range(1):\n print(f\"running for the {i + 1}th time\")\n n = int(input(\"Enter any number : \")) \n reverse = reverse_number(n)\n print(f\"Reversed number is:{reverse}\")\n ","sub_path":"Assignments/Aug292020/reverse_number.py","file_name":"reverse_number.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"208797087","text":"#!/usr/bin/env python2\n#coding:utf-8\n\nimport rospy\nimport cv2\nfrom sensor_msgs.msg import Image\nfrom sensor_msgs.msg import CameraInfo\nfrom cv_bridge import CvBridge,CvBridgeError\nimport socket\nimport threading\nimport sys \nimport signal\nimport struct#used for net pack \n\nclass Node:\n def __init__(self):\n self.flag1=False\n self.flag2=False\n self.num1=0.000000\n self.intrinsic=None\n self.rgbtopic=rospy.Subscriber('/camera/rgb/image_raw', Image, self.rgbcallback)\n self.depthtopic=rospy.Subscriber('/camera/depth/image_raw',Image,self.depthcallback)\n self.intrintopic=rospy.Subscriber('/camera/rgb/camera_info',CameraInfo,self.intrinsic_callback_once)\n #child thread to run server\n #t1 = threading.Thread(target=self.socket_server)\n #t1.setDaemon(True)#set daemon for ctrl c to quit\n #t1.start()\n def quit(self,signum, frame):\n sys.exit()\n def Int_ToBytes(self,n,length):#big\n return bytes('%%0%dx' % (length << 1) % n).decode('hex')[-length:]\n def socket_server(self):\n signal.signal(signal.SIGINT, self.quit) \n signal.signal(signal.SIGTERM, self.quit)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 创建 socket 对象\n host = socket.gethostname() # 获取本地主机名\n port = 12345 # 设置端口\n s.bind(('localhost',9090)) # 绑定端口\n print('localhost'+\" server open\")\n s.listen(5)\n while True:\n try:\n print('start accept')\n c, addr = s.accept() # 建立客户端连接,阻塞等待\n print(\"连接地址:\" + str(addr))\n # 先获取客户端发送过来的请求\n rec_data = c.recv(64)\n #rec_data = str(rec_data, encoding='utf-8')\n rec_data = rec_data.decode()\n while True:\n # 是返回rgbd or intrinsic\n if rec_data == \"rgbd\" and self.flag1 and self.flag2:\n print('trans '+rec_data)\n self.flag1=False\n self.flag2=False\n arrBuf = bytearray(b'\\xff\\xaa\\xff\\xaa')\n rgbBytes = self.rgb_image.tobytes()\n \n depthBytes = self.depth_image.tobytes()\n # 图片大小\n rgbSize = len(rgbBytes)#rgb one pixel 1 byte char\n depthSize = len(depthBytes)#depth one pixel 4 bytes double\n # 数据体长度 = guid大小(固定) + 图片大小\n rgbdatalen = rgbSize\n depthdatalen = depthSize\n # 组合数据包\n arrBuf += struct.pack(' 0:\n ip = q.get()\n\n return ip\n\n def _get_ip_public(self, queue_target, url, json=False, key=None):\n \"\"\"Request the url service and put the result in the queue_target\"\"\"\n try:\n response = urlopen(url, timeout=self.timeout).read().decode('utf-8')\n except Exception as e:\n logger.debug(('IP plugin - Cannot open URL {} ({})').format(url, e))\n queue_target.put(None)\n else:\n try:\n if not json:\n queue_target.put(response)\n else:\n queue_target.put(loads(response)[key])\n except ValueError:\n queue_target.put(None)\n\n return","sub_path":"pycfiles/ocglances-2.8/glances_ip.py","file_name":"glances_ip.py","file_ext":"py","file_size_in_byte":5128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"77298373","text":"# Copyright 2021 Nokia\n# Licensed under the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\n\n\nclass SimpleHTTPRequestHandler(BaseHTTPRequestHandler):\n\n def do_GET(self):\n self.send_response(200)\n self.end_headers()\n self.wfile.write(b'TPM2_Send!')\n\n\nhttpd = HTTPServer(('localhost', 8531), SimpleHTTPRequestHandler)\nhttpd.serve_forever()\n","sub_path":"t10/A10HTTPTPMSENDSTATIC/tpm2_send_server.py","file_name":"tpm2_send_server.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"171090243","text":"from matplotlib import pyplot as plt\r\nfrom matplotlib.font_manager import FontProperties\r\n\r\n# 准备数据\r\nx = range(11, 31)\r\ny = [1,0,1,1,2,4,3,2,3,4,4,5,6,5,4,3,3,1,1,1]\r\nz = [1,0,3,1,2,2,3,3,2,1,2,1,1,1,1,1,1,1,1,1]\r\n\r\n# 设置图形显示\r\n# 图形大小\r\nplt.figure(figsize=(15, 6), dpi=80)\r\n# 字体\r\nmyfont = FontProperties(fname=r\"C:\\WINDOWS\\FONTS\\MSYH.TTC\", size=16)\r\n# 标题\r\nplt.xlabel(\"年龄\", fontproperties=myfont)\r\nplt.ylabel(\"数量 (个)\", fontproperties=myfont)\r\nplt.title(\"11岁~30岁交往女(男)朋友数量的走势图\", fontproperties=myfont)\r\n\r\n# 设置x, y显示格式\r\n_xticks = [\"{}岁\".format(x) for x in x]\r\nplt.xticks(x, _xticks, fontproperties=myfont)\r\nplt.yticks(range(1, 10))\r\n\r\n# 设置网格 alpha透明度(0-1)\r\nplt.grid(alpha=0.4)\r\n\r\n# plot画图\r\nplt.plot(x, y, label=\"自己\")\r\nplt.plot(x, z, label=\"同桌\")\r\n\r\n# 设置图例 需要在画图后设置,因为在画图前设置,不知道你需要画几条数据\r\n# 第一步: 在画图时,需要设置label参数,即图例名称\r\n# 第二步: 设置图例,prop参数-设置字体 loc参数-设置图例位置(具体查看源码3)\r\nplt.legend(prop=myfont, loc=0)\r\n\r\n# 保存,显示\r\nplt.savefig(\"朋友个数走势图.png\")\r\nplt.show()\r\n\r\n","sub_path":"Analyse/demo/02_demo/01_matp.py","file_name":"01_matp.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"461855878","text":"\"\"\" This file does the functional test of the \"Text similarity processor\nfrom IO layer as well as UI later \"\"\"\nimport os\nimport unittest\nfrom tkinter import Tk\nimport time\nimport subprocess\nfrom test.test_resource import TestResource\nimport pandas as pd\nfrom similarity_processor.similarity_io import SimilarityIO\nfrom similarity_processor.similarity_ui import TextSimilarityWindow\n\n\ndef verify_file_path():\n \"\"\"This function checks the required files are being generated\n or not\"\"\"\n if (os.path.exists(TestResource.merged_file_path) and\n os.path.exists(TestResource.recommendation_file_path) and\n os.path.exists(TestResource.duplicate_id_file_path)):\n return True\n return False\n\n\nclass MyFunctionalTestCase(unittest.TestCase):\n \"\"\" This test class verifies the Text similarity index processing to cover\n similarity_io.py and similarity_core.py file with a test resources\n which simulates the user input file with defined formats required / allowed by the tool \"\"\"\n\n @classmethod\n def tearDown(cls):\n \"\"\"\"Deletes the files created: _merged, _recommendation and _duplicate.\"\"\"\n if os.path.exists(TestResource.merged_file_path):\n os.remove(TestResource.merged_file_path)\n if os.path.exists(TestResource.recommendation_file_path):\n os.remove(TestResource.recommendation_file_path)\n if os.path.exists(TestResource.duplicate_id_file_path):\n os.remove(TestResource.duplicate_id_file_path)\n\n def test_below_ui(self):\n \"\"\" Test function which injects the user input data skipping the\n presentation later to the IO layer to check the underlying functionality \"\"\"\n\n cosine = SimilarityIO(TestResource.file_path,\n TestResource.testcase_id, TestResource.teststeps_id, TestResource.var,\n TestResource.get_new_text)\n cosine.orchestrate_similarity()\n time.sleep(10)\n self.verify_functional_test()\n\n @unittest.skipIf(\"TRAVIS\" in os.environ and os.environ[\"TRAVIS\"] == \"true\", \"Skipping this test on Travis CI.\")\n def test_from_ui_new_text(self):\n \"\"\"Test function which injects the user input data at the presentation later\n to check the end to end functionality\"\"\"\n window = Tk()\n win = TextSimilarityWindow(window)\n win.check_is_new_text.invoke()\n win.path_t.insert(0, str(TestResource.file_path))\n win.uniq_id_t.insert(0, 0)\n win.steps_t.insert(0, \"1,2\")\n time.sleep(2)\n win.new_text.insert(0, \"a3 d4\")\n win.submit.invoke()\n time.sleep(10)\n window.quit()\n self.verify_functional_test(True)\n\n def test_from_command_line(self):\n \"\"\"Test function which provides input using command line interface\"\"\"\n script = os.path.abspath(os.path.join(TestResource.par_dir,\n \"similarity_processor\", \"similarity_cmd.py\"))\n cmd = 'python3.7 %s --p \"%s\" --u \"%s\" --c \"%s\"' % (\n script, TestResource.file_path,\n TestResource.command_unique_id, TestResource.command_colint)\n os.system(cmd)\n time.sleep(10)\n self.verify_functional_test()\n\n def test_invalid_file(self):\n \"\"\"Function test the empty file/ incorrect data/ extra sheet in the input file\"\"\"\n text_check = 'Input data is incorrect/ file is invalid/It has more than one sheet'\n flag = False\n cos_io_obj = SimilarityIO(TestResource.empty_file_path,\n TestResource.command_unique_id, TestResource.command_colint, 0)\n cos_io_obj.orchestrate_similarity()\n line = subprocess.check_output(['tail', '-1', TestResource.log_file_path])\n line = line.decode('UTF-8')\n if text_check in line:\n flag = True\n self.assertEqual(True, flag, \"Validating empty input file from log file\")\n\n def verify_functional_test(self, new_text=False):\n \"\"\" This function verifies the result populated from the functional test \"\"\"\n if verify_file_path():\n __data_duplicate = pd.read_excel(TestResource.golden_duplicate_id_file_path)\n\n if new_text:\n __data_merged = pd.read_excel(TestResource.golden_new_merged_file_path)\n __data_recomend = pd.read_excel(TestResource.golden_new_recommendation_file_path)\n else:\n __data_merged = pd.read_excel(TestResource.golden_merged_file_path)\n __data_recomend = pd.read_excel(TestResource.golden_recommendation_file_path)\n\n act_df_recomend = pd.read_excel(TestResource.recommendation_file_path)\n act_df_merged = pd.read_excel(TestResource.merged_file_path)\n act_df_duplicated = pd.read_excel(TestResource.duplicate_id_file_path)\n\n self.assertEqual(True, __data_recomend['Similarity Index'].equals(act_df_recomend['Similarity Index']),\n \"Actual and recommended Similarity Index data matches\")\n self.assertEqual(True, __data_recomend['Test Case(ID / Brief Description)'].equals(\n act_df_recomend['Test Case(ID / Brief Description)']),\n \"Actual and recommended ['Test Case(ID / Brief Description)'] data matches\")\n self.assertEqual(True, __data_recomend['Potential Match'].equals(act_df_recomend['Potential Match']),\n \"Actual and recommended ['Potential Match'] data matches\")\n self.assertEqual(True, __data_merged.equals(act_df_merged), \"Actual and merged data matches\")\n self.assertEqual(True, __data_duplicate.equals(act_df_duplicated),\n \"Actual and duplicated data matches\")\n else:\n self.assertEqual(True, verify_file_path, \"output files are not generated\")\n\n\nif __name__ == '__main__':\n\n unittest.main()\n","sub_path":"test/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":5925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"361761904","text":"##\n# Project: gespeaker - A GTK frontend for espeak \n# Author: Fabio Castelli \n# Copyright: 2009-2013 Fabio Castelli\n# License: GPL-2+\n# This program is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the Free\n# Software Foundation; either version 2 of the License, or (at your option)\n# any later version.\n# \n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n# more details.\n# \n# On Debian GNU/Linux systems, the full text of the GNU General Public License\n# can be found in the file /usr/share/common-licenses/GPL-2.\n##\n\nPLUGIN_NAME = 'Pidgin'\nPLUGIN_VERSION = '0.1'\nPLUGIN_DESCRIPTION = 'Interface for Pidgin received messages'\nPLUGIN_AUTHOR = 'Fabio Castelli'\nPLUGIN_ICON = '%s/icon.svg' % __path__[0]\nPLUGIN_WEBSITE = ''\n\nimport dbus\nimport dbus.mainloop.glib\nimport re\nfrom plugins import GespeakerPlugin, register_plugin\n\nclass GespeakerPlugin_Pidgin(GespeakerPlugin):\n def __init__(self, name, version, description, author, icon, website):\n \"Module initialization\"\n GespeakerPlugin.__init__(self, name, version, description, author, icon, website)\n dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n bus = dbus.SessionBus()\n bus.add_signal_receiver(self.message_received, \n dbus_interface='im.pidgin.purple.PurpleInterface',\n signal_name = 'ReceivingImMsg'\n )\n\n def on_uiready(self, ui):\n self.ui = ui\n\n def message_received(self, account, sender, message, conversation, flags):\n \"New message received\"\n if self.active:\n replaces = {\n '<': '<',\n '>': '>',\n '&': '&'\n }\n message = re.compile(r'<.*?>').sub('', message)\n for k, v in replaces.iteritems():\n message = message.replace(k, v)\n self.logger('message_received(%d, %s, %s, %d, %d)' % (\n account, sender, message, conversation, flags))\n self.ui.proxy['text.set'](message, 0)\n self.ui.proxy['espeak.play'](None, None)\n\nplugin = GespeakerPlugin_Pidgin(\n PLUGIN_NAME, PLUGIN_VERSION, PLUGIN_DESCRIPTION, \n PLUGIN_AUTHOR, PLUGIN_ICON, PLUGIN_WEBSITE)\nregister_plugin(PLUGIN_NAME, plugin)\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"300532653","text":"import vk_api\r\nfrom vk_api.utils import get_random_id\r\nfrom vk_api.longpoll import VkLongPoll, VkEventType\r\nimport requests\r\nimport random\r\nfrom time import ctime\r\n\r\nclass vkBot():\r\n def __init__(self, token):\r\n self.token = token\r\n self.Type = {\"SET\": 0, \"UQ\": 1}\r\n self.BAD_WORDS = \"BadWords.txt\"\r\n self.LECTURE_ON_PHYSICS = \"Physics.txt\"\r\n self.ANSWERS = \"Answers.txt\"\r\n\r\n def authorization(self):\r\n '''Авторизация'''\r\n session = requests.Session()\r\n vk_session = vk_api.VkApi(token=self.token)\r\n return vk_session\r\n\r\n def antiMat(self, message):\r\n '''Проверяет сообщение на мат'''\r\n bad_words = self.parseText(self.BAD_WORDS, \"SET\")\r\n for word in bad_words:\r\n if word.lower() in message.lower():\r\n return True\r\n return False\r\n\r\n def elseMessages(self, message):\r\n '''Обработка всех остальных сообщений'''\r\n answers = self.parseText(self.ANSWERS, \"UQ\")\r\n\r\n for dialog in answers:\r\n if dialog[0] in message.lower():\r\n return random.choice(dialog[1])\r\n else:\r\n return \"Команда не опознана!\"\r\n\r\n\r\n\r\n def parseText(self, name, type):\r\n '''Обрабатывает файлы ответов на сообщения'''\r\n r = open(name, \"r\", encoding=\"utf-8\")\r\n text = r.read().replace(\"'\", \"\")\r\n if self.Type[type] == 0:\r\n return text.replace(\" \", \"\").split(\",\")\r\n\r\n elif self.Type[type] == 1:\r\n dialogs = text.replace(\"'\", \"\").split(\";\")\r\n arr = []\r\n for i, value in enumerate(dialogs):\r\n arr.append(value.split(\":\", 1))\r\n for i in arr:\r\n if len(i) >= 2:\r\n i[1] = i[1].split(\",\")\r\n\r\n return arr\r\n\r\n\r\n def sendAnswer(self, vk_session, event, answer):\r\n '''Отправка ответного сообщения'''\r\n vk = vk_session.get_api()\r\n vk.messages.send(user_id=event.user_id, random_id=get_random_id(), message=answer)\r\n\r\n\r\n def runChatBot(self, vk_session):\r\n '''Запуск Чат Бота'''\r\n print(\"Запуск Чат Бота DeepSci!\")\r\n\r\n longpoll = VkLongPoll(vk_session)\r\n for event in longpoll.listen():\r\n # Обработка ивента - сообщения\r\n if event.type == VkEventType.MESSAGE_NEW and event.to_me and event.text:\r\n print(event.text + \" - \" + ctime())\r\n if self.antiMat(event.text):\r\n answer = \"Давайте притворимся, что я этого не слышала!\"\r\n print(answer+ \" - \" + ctime())\r\n self.sendAnswer(vk_session, event, answer)\r\n elif \"ФИЗИКА\" in event.text:\r\n self.sendAnswer(vk_session, event, \"Если ты хочешь подготовиться к ЕГЭ по физике, то выбери лекцию, которую хочешь посмотреть!\")\r\n # Сделать так, чтобы появились кнопки у пользователя\r\n else:\r\n answer = self.elseMessages(event.text)\r\n print(answer+ \" - \" + ctime())\r\n self.sendAnswer(vk_session, event, answer)\r\n\r\n# Запускаем нашу машинку\r\nif __name__ == \"__main__\":\r\n # Уникальный токен сообщества\r\n token = \"MyToken\"\r\n myVkBot = vkBot(token) # Создаем объект - чат бота\r\n vk_session = myVkBot.authorization() # Проходим авторизацию\r\n myVkBot.runChatBot(vk_session) # Запускаем\r\n","sub_path":"DeepSci v2.0/PyBot.1.0.py","file_name":"PyBot.1.0.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"444038627","text":"\n\nfrom xai.brain.wordbase.verbs._overuse import _OVERUSE\n\n#calss header\nclass _OVERUSES(_OVERUSE, ):\n\tdef __init__(self,): \n\t\t_OVERUSE.__init__(self)\n\t\tself.name = \"OVERUSES\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"overuse\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_overuses.py","file_name":"_overuses.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"481674241","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"Script to extract Visual Basic for Applications (VBA).\"\"\"\n\nfrom __future__ import print_function\nimport argparse\nimport logging\nimport sys\n\nfrom olecf_kb import vba\n\n\nclass StdoutWriter(object):\n \"\"\"Class that defines a stdout output writer.\"\"\"\n\n def Close(self):\n \"\"\"Closes the output writer object.\"\"\"\n return\n\n def Open(self):\n \"\"\"Opens the output writer object.\n\n Returns:\n bool: True if successful or False if not.\n \"\"\"\n return True\n\n def WriteText(self, text):\n \"\"\"Writes text to the output.\n\n Args:\n text (str): text.\n \"\"\"\n print(text)\n\n\ndef Main():\n \"\"\"The main program function.\n\n Returns:\n bool: True if successful or False if not.\n \"\"\"\n argument_parser = argparse.ArgumentParser(description=(\n u'Extracts VBA from an OLE Compound File.'))\n\n argument_parser.add_argument(\n u'-d', u'--debug', dest=u'debug', action=u'store_true', default=False,\n help=u'enable debug output.')\n\n argument_parser.add_argument(\n u'source', nargs=u'?', action=u'store', metavar=u'PATH', default=None,\n help=u'path of the OLE Compound File.')\n\n options = argument_parser.parse_args()\n\n if not options.source:\n print(u'Source value is missing.')\n print(u'')\n argument_parser.print_help()\n print(u'')\n return False\n\n logging.basicConfig(\n level=logging.INFO, format=u'[%(levelname)s] %(message)s')\n\n output_writer = StdoutWriter()\n\n if not output_writer.Open():\n print(u'Unable to open output writer.')\n print(u'')\n return False\n\n collector_object = vba.VBACollector(debug=options.debug)\n collector_object.Collect(options.source, output_writer)\n output_writer.Close()\n\n if not collector_object.stream_found:\n print(u'No VBA stream found.')\n\n return True\n\n\nif __name__ == '__main__':\n if not Main():\n sys.exit(1)\n else:\n sys.exit(0)\n","sub_path":"scripts/vba.py","file_name":"vba.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"559246462","text":"'''\nCreated on Aug 12, 2009\n\n@author: bareno\n'''\n\n#__all__ ={'v_angle', 'UCell', 'Hexag', 'get_mono', 'get_spinel', 'get_hex', 'get_NaCl'}\nfrom numpy import *\n\nimport math\nimport pickle\n\ndef v_angle(a,b):\n \"\"\"\n returns angle between two vectors coded as numpy.array\n \"\"\"\n ab=dot(a,b)\n ab/=linalg.norm(a)*linalg.norm(b)\n return(math.degrees(math.acos(ab)))\n\n\nclass UCell:\n '''\n Class to handle crystallographic basis and basis conversions\n '''\n\n\n def __init__(self, a=1., b=1., c=1., alpha=90., beta=90., gamma=90.):\n '''\n Basis constructor from standard description.\n Generates and stores reciprocal lattice basis.\n Matrix M to convert to cartesian coords. Mr in reciprocal space.\n '''\n \n bx =math.cos(math.radians(gamma))\n by = math.sin(math.radians(gamma))\n cx = math.cos(math.radians(beta))\n cy = (math.cos(math.radians(alpha)) - \\\n math.cos(math.radians(beta))*math.cos(math.radians(gamma))) \\\n / math.sin(math.radians(gamma)) \n cz = math.sqrt(1-cx**2-cy**2)\n self.av=array([a,0.,0.])\n self.bv=array([b*bx,b*by,0.])\n self.cv=array([c*cx,c*cy,c*cz])\n self.a=a\n self.b=b\n self.c=c\n self.alpha=alpha\n self.beta=beta\n self.gamma=gamma\n self.vol=dot(self.av,cross(self.bv,self.cv))\n self.M = array([self.av, self.bv, self.cv]).transpose()\n self.iM = linalg.inv(self.M)\n self.avr = cross(self.bv,self.cv)/self.vol\n self.bvr = cross(self.cv,self.av)/self.vol\n self.cvr = cross(self.av,self.bv)/self.vol\n self.ar = linalg.norm(self.avr)\n self.br = linalg.norm(self.bvr)\n self.cr = linalg.norm(self.cvr)\n self.alphar = v_angle(self.bvr,self.cvr)\n self.betar = v_angle(self.cvr,self.avr)\n self.gammar = v_angle(self.bvr,self.avr)\n self.volr = 1./self.vol\n self.Mr = array([self.avr, self.bvr, self.cvr]).transpose()\n self.iMr = linalg.inv(self.Mr)\n \n def __cmp__(self, other):\n if not (self.a == other.a):\n return(False)\n elif not (self.b == other.b):\n return(False) \n elif not (self.c == other.c):\n return(False) \n elif not (self.alpha == other.alpha):\n return(False) \n elif not (self.beta == other.beta):\n return(False) \n elif not (self.gamma == other.gamma):\n return(False) \n else:\n return(True)\n \n def __repr__(self):\n return(str(self.M))\n \n def __str__(self):\n ret_str = \"Direct basis:\\n\"\n ret_str += \"a= %.4f\\tb= %.4f\\tc= %.4f\\n\" %(self.a, self.b, self.c)\n ret_str += \"alpha= %.2f\\tbeta= %.2f\\tgamma= %.2f\\n\" %(self.alpha, self.beta, self.gamma)\n ret_str += \"vol= %.4f\\n\\n\" %(self.vol)\n ret_str += \"Reciprocal basis:\\n\"\n ret_str += \"a= %.4f\\tb= %.4f\\tc= %.4f\\n\" %(self.ar, self.br, self.cr)\n ret_str += \"alpha= %.2f\\tbeta= %.2f\\tgamma= %.2f\\n\" %(self.alphar, self.betar, self.gammar)\n ret_str += \"vol= %.4f\\n\\n\" %(self.volr)\n return(ret_str)\n \n def from_basis_vectors(self,av,bv,cv):\n '''\n Creates a Basis from three basis vectors.\n Specified as numpy.array's\n '''\n a = 1.0 * linalg.norm(av)\n b = 1.0 * linalg.norm(bv)\n c = 1.0 * linalg.norm(cv)\n alpha = v_angle(bv, cv)\n beta = v_angle(av, cv)\n gamma = v_angle(av, bv)\n ret_basis = UCell(a,b,c, alpha, beta, gamma)\n ret_basis.av=1.0 * av\n ret_basis.bv=1.0 * bv\n ret_basis.cv=1.0 * cv \n ret_basis.vol=dot(ret_basis.av,cross(ret_basis.bv,ret_basis.cv))\n ret_basis.M = array([ret_basis.av, ret_basis.bv, ret_basis.cv]).transpose()\n ret_basis.iM = linalg.inv(ret_basis.M)\n ret_basis.avr = cross(ret_basis.bv,ret_basis.cv)/ret_basis.vol\n ret_basis.bvr = cross(ret_basis.cv,ret_basis.av)/ret_basis.vol\n ret_basis.cvr = cross(ret_basis.av,ret_basis.bv)/ret_basis.vol\n ret_basis.ar = linalg.norm(ret_basis.avr)\n ret_basis.br = linalg.norm(ret_basis.bvr)\n ret_basis.cr = linalg.norm(ret_basis.cvr)\n ret_basis.alphar = v_angle(ret_basis.bvr,ret_basis.cvr)\n ret_basis.betar = v_angle(ret_basis.cvr,ret_basis.avr)\n ret_basis.gammar = v_angle(ret_basis.bvr,ret_basis.avr)\n ret_basis.volr = 1./ret_basis.vol\n ret_basis.Mr = array([ret_basis.avr, ret_basis.bvr, ret_basis.cvr]).transpose()\n ret_basis.iMr = linalg.inv(ret_basis.Mr)\n return(ret_basis)\n \n \n def extend(self, xa=1, xb=1, xc=1):\n '''\n extends unit cell along basis vectors times xa, xb, xc\n '''\n retBasis = UCell(xa*self.a, xb*self.b, xc*self.c, self.alpha, self.beta, self.gamma)\n retBasis.av = xa * self.av\n retBasis.bv = xb * self.bv\n retBasis.cv = xc * self.cv \n retBasis.vol=dot(retBasis.av,cross(retBasis.bv,retBasis.cv))\n retBasis.M = array([retBasis.av, retBasis.bv, retBasis.cv]).transpose()\n retBasis.iM = linalg.inv(retBasis.M)\n retBasis.avr = cross(retBasis.bv,retBasis.cv)/retBasis.vol\n retBasis.bvr = cross(retBasis.cv,retBasis.av)/retBasis.vol\n retBasis.cvr = cross(retBasis.av,retBasis.bv)/retBasis.vol\n retBasis.ar = linalg.norm(retBasis.avr)\n retBasis.br = linalg.norm(retBasis.bvr)\n retBasis.cr = linalg.norm(retBasis.cvr)\n retBasis.alphar = v_angle(retBasis.bvr,retBasis.cvr)\n retBasis.betar = v_angle(retBasis.cvr,retBasis.avr)\n retBasis.gammar = v_angle(retBasis.bvr,retBasis.avr)\n retBasis.volr = 1./retBasis.vol\n retBasis.Mr = array([retBasis.avr, retBasis.bvr, retBasis.cvr]).transpose()\n retBasis.iMr = linalg.inv(retBasis.Mr)\n return(retBasis) \n \n def dir_to_cart(self, v):\n return(dot(self.M,v))\n \n def dir_from_cart(self, v):\n return(dot(self.iM,v))\n \n def plane_to_cart(self,v):\n return(dot(self.Mr,v))\n \n def plane_from_cart(self,v):\n return(dot(self.iMr, v))\n \n def dir_from_plane(self, v):\n return(self.dir_from_cart(self.plane_to_cart(v)))\n \n def plane_from_dir(self, v):\n return(self.plane_from_cart(self.dir_to_cart(v)))\n \n def dir_norm(self, v):\n return(linalg.norm(self.dir_to_cart(v)))\n \n def plane_dist(self, p):\n return(1./linalg.norm(self.plane_to_cart(p)))\n \n def dir_dot(self, d1, d2):\n return(dot(self.dir_to_cart(d1), self.dir_to_cart(d2) ))\n \n def plane_dot(self, p1, p2):\n return(dot(self.plane_to_cart(p1), self.plane_to_cart(p2)))\n \n def mix_dot(self, p, v):\n return(dot(p,v))\n \n def plane_angle(self, p1, p2):\n dot = self.plane_dot(p1, p2)\n cosang = dot * self.plane_dist(p1) * self.plane_dist(p2)\n if abs(cosang)>1:\n return(0.)\n else:\n return(math.degrees(math.acos(cosang))) \n \n def dir_angle(self, v1, v2):\n dot = self.dir_dot(v1, v2)\n cosang = dot / (self.dir_norm(v1)* self.dir_norm(v2))\n if abs(cosang)>1:\n return(0.)\n else:\n return(math.degrees(math.acos(cosang)))\n \n def mix_angle(self, v1, p2):\n dot = self.mix_dot(v1, p2)\n cosang = dot * self.plane_dist(p2)/ self.dir_norm(v1)\n if abs(cosang)>1:\n return(0.)\n else:\n return(math.degrees(math.acos(cosang)))\n \n \n def common_plane(self, d1, d2):\n cart_plane = cross(self.dir_to_cart(d1),self.dir_to_cart(d2))\n return(self.plane_from_cart(cart_plane))\n \n def common_dir(self, p1, p2):\n cart_dir = cross(self.plane_to_cart(p1),self.plane_to_cart(p2))\n return(self.dir_from_cart(cart_dir))\n \n def new_basis(self, G):\n \"\"\" \n Creates a new base from matrix G.\n Columns of G taken as coords of new basis vectors in self basis.\n \"\"\"\n nB = UCell(1,1,1)\n nB.av = self.dir_to_cart(G.transpose()[0])\n nB.bv = self.dir_to_cart(G.transpose()[1])\n nB.cv = self.dir_to_cart(G.transpose()[2])\n nB.M = array([nB.av, nB.bv, nB.cv]).transpose()\n nB.iM = linalg.inv(nB.M)\n nB.a = linalg.norm(nB.av)\n nB.b = linalg.norm(nB.bv)\n nB.c = linalg.norm(nB.cv)\n nB.alpha = v_angle(nB.bv, nB.cv)\n nB.beta = v_angle(nB.cv, nB.av)\n nB.gamma = v_angle(nB.av, nB.bv)\n nB.vol = dot(nB.av, cross(nB.bv, nB.cv))\n nB.avr = cross(nB.bv, nB.cv)/nB.vol\n nB.bvr = cross(nB.cv, nB.av)/nB.vol\n nB.cvr = cross(nB.av, nB.bv)/nB.vol\n nB.ar = linalg.norm(nB.avr)\n nB.br = linalg.norm(nB.bvr)\n nB.cr = linalg.norm(nB.cvr)\n nB.alphar = v_angle(nB.bvr, nB.cvr)\n nB.betar = v_angle(nB.avr, nB.cvr)\n nB.gammar = v_angle(nB.avr, nB.bvr)\n nB.Mr = array([nB.avr, nB.bvr, nB.cvr]).transpose()\n nB.iMr = linalg.inv(nB.Mr)\n nB.volr = dot(nB.avr, cross(nB.bvr, nB.cvr))\n return(nB)\n \n def seek_angle(self, target, tol, rg=range(-3,4)):\n s=[]\n for h1 in rg:\n for k1 in rg:\n for l1 in rg:\n for h2 in rg:\n for k2 in rg:\n for l2 in rg:\n x = self.plane_angle(array([h1,k1,l1]), array([h2,k2,l2]))\n if abs(x-target)< tol:\n st1 =\"(%d, %d, %d)\\t\" %(h1,k1,l1)\n st1 += \"(%d, %d, %d)\\t%.2f\" %(h2,k2,l2,x)\n s.append(st1)\n return(s)\n \n def update_basis(self, **update_dir):\n '''\n Auxiliary for XRD 2theta optimization\n Gnerates new cell with modified lattice params\n Needs to be called with named arguments\n If param not in arg_list, use same value as self'''\n cell_params = {}\n for key in ['a', 'b', 'c', 'alpha', 'beta', 'gamma']:\n cell_params[key]= getattr(self,key)\n if key in update_dir:\n #this is a guess param\n cell_params[key]=update_dir[key]\n \n return(UCell(**cell_params))\n \n def to_file(self, fname):\n ''' Saves UCell object to file '''\n f = open(fname, 'w')\n pickle.dump(self, f)\n f.close()\n return()\n \n def from_file(self, fname):\n f = open(fname, 'r')\n ret = pickle.load(f)\n f.close()\n return(ret)\n \nclass Hexag(UCell):\n '''\n extension of Basis to handle 4-index notation in hexagonal lattices\n '''\n def __init__(self, a=1., c=1.):\n '''\n just call Basis__init__\n '''\n UCell.__init__(self, a, a, c, 90, 90, 120)\n \n \n def four_dir_to_three(self, v):\n h,k,i,l = tuple(v)\n return(array([h-i, k-i, l]))\n \n def three_dir_to_four(self, v):\n s,t,u = tuple(v)\n return(array([(2.*s-t)/3, (2.*t-s)/3, -1.*(s+t)/3, u]))\n \n def four_plane_to_three(self, v):\n h,k,i,l = tuple(v)\n return(array([h,k,l]))\n \n def three_plane_to_four(self,v):\n h,k,l = tuple(v)\n return(array([h,k,-(h+k),l]))\n \n def four_dir_to_cart(self,v):\n return(UCell.dir_to_cart(self, self.four_dir_to_three(v)))\n \n def four_dir_from_cart(self, v):\n return(self.three_dir_to_four(UCell.dir_from_cart(self, v)))\n \n def four_plane_to_cart(self, v):\n return(UCell.plane_to_cart(self, self.four_plane_to_three(v)))\n \n def four_plane_from_cart(self, v):\n return(self.three_plane_to_four(UCell.plane_from_cart(self, v)))\n \n def four_dir_from_four_plane(self,v):\n return(self.four_dir_from_cart(self.four_plane_to_cart(v)))\n \n def four_plane_from_four_dir(self, v):\n return(self.four_plane_from_cart(self.four_dir_to_cart(v)))\n \n def four_dir_norm(self, v):\n return(linalg.norm(self.four_dir_to_cart(v)))\n \n def four_plane_dist(self, p):\n return(1./linalg.norm(self.four_plane_to_cart(p)))\n \n def four_dir_dot(self, d1, d2):\n return(dot(self.four_dir_to_cart(d1), self.four_dir_to_cart(d2) ))\n \n def four_plane_dot(self, p1, p2):\n return(dot(self.four_plane_to_cart(p1), self.four_plane_to_cart(p2)))\n \n def four_mix_dot(self, p, v):\n return(dot(p,v))\n \n def four_plane_angle(self, p1, p2):\n dot = self.four_plane_dot(p1, p2)\n cosang = dot * self.four_plane_dist(p1) * self.four_plane_dist(p2)\n if abs(cosang)>1:\n return(0.)\n else:\n return(math.degrees(math.acos(cosang))) \n \n def four_dir_angle(self, v1, v2):\n dot = self.four_dir_dot(v1, v2)\n cosang = dot / (self.four_dir_norm(v1)* self.four_dir_norm(v2))\n if abs(cosang)>1:\n return(0.)\n else:\n return(math.degrees(math.acos(cosang)))\n \n def mix_angle(self, v1, p2):\n dot = self.four_mix_dot(v1, p2)\n cosang = dot * self.four_plane_dist(p2)/ self.four_dir_norm(v1)\n if abs(cosang)>1:\n return(0.)\n else:\n return(math.degrees(math.acos(cosang)))\n \n def common_four_plane(self, d1, d2):\n cart_plane = cross(self.four_dir_to_cart(d1),self.four_dir_to_cart(d2))\n return(self.four_plane_from_cart(cart_plane))\n \n def common_four_dir(self, p1, p2):\n cart_dir = cross(self.four_plane_to_cart(p1),self.four_plane_to_cart(p2))\n return(self.four_dir_from_cart(cart_dir))\n \n def from_UCell(self, B):\n #check that it is hexagonal\n a = (B.a + B.b) / 2.\n b = (B.a - B.b) / 2.\n c = B.c\n if abs(b/a) > 0.01:\n return(0)\n \n if abs(B.gamma - 120.) > 0.2:\n return(0)\n \n if abs(B.alpha - 90.) > 0.2:\n return(0)\n \n if abs(B.beta - 90.) > 0.2:\n return(0)\n \n ret_basis = Hexag(a,c) \n ret_basis.av=B.av \n ret_basis.bv=B.bv \n ret_basis.cv=B.cv \n \n ret_basis.M = array([ret_basis.av, ret_basis.bv, ret_basis.cv]).transpose()\n ret_basis.iM = linalg.inv(ret_basis.M)\n ret_basis.avr = cross(ret_basis.bv,ret_basis.cv)/ret_basis.vol\n ret_basis.bvr = cross(ret_basis.cv,ret_basis.av)/ret_basis.vol\n ret_basis.cvr = cross(ret_basis.av,ret_basis.bv)/ret_basis.vol\n ret_basis.ar = linalg.norm(ret_basis.avr)\n ret_basis.br = linalg.norm(ret_basis.bvr)\n ret_basis.cr = linalg.norm(ret_basis.cvr)\n ret_basis.alphar = v_angle(ret_basis.bvr,ret_basis.cvr)\n ret_basis.betar = v_angle(ret_basis.cvr,ret_basis.avr)\n ret_basis.gammar = v_angle(ret_basis.bvr,ret_basis.avr)\n ret_basis.volr = 1./ret_basis.vol\n ret_basis.Mr = array([ret_basis.avr, ret_basis.bvr, ret_basis.cvr]).transpose()\n ret_basis.iMr = linalg.inv(ret_basis.Mr)\n return(ret_basis)\n \n\n#Aux functions to build standard unit cells during runtime\ndef get_mono():\n a_m = array([0.5,0.5,-1])\n b_m = 0.5*array([-3,3,0])\n c_m = array([0.5,0.5,1])\n M=UCell().from_basis_vectors(a_m, b_m, c_m)\n return(M)\n\ndef get_spinel():\n a_m = array([2,0,0])\n b_m = array([0,2,0])\n c_m = array([0,0,2])\n S=UCell().from_basis_vectors(a_m, b_m, c_m)\n return(S)\n\ndef get_NaCl():\n return(UCell())\n\ndef get_hex():\n a_m = 0.5*array([0,1,-1])\n b_m = 0.5*array([-1,0,1])\n c_m = array([2,2,2])\n H = Hexag().from_basis_vectors(a_m, b_m, c_m)\n return(Hexag().from_UCell(H)) \n \n\n#Test code follows\nif __name__ == '__main__':\n '''Initialize Li-ion crystallography session'''\n from transform3 import *\n import pickle\n \n print('Imports bases needed for common NMC projects:')\n \n M=get_mono()\n S=get_spinel()\n RS=get_NaCl()\n H=get_hex()\n \n\n \n ","sub_path":"transform3.py","file_name":"transform3.py","file_ext":"py","file_size_in_byte":16097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"588268594","text":"Group = {}\nprint(\"### K-Pop 그룹 관리 프로그램 ###\")\n\nwhile True:\n menu = int(input(\"[1:등록/2:삭제/3:조회/4:조회]:\"))\n if menu == 4:\n break\n elif menu == 1:\n name = input(\"그룹 이름이 무엇인가?\")\n member = input(\"그룹 인원이 몇명인가?\")\n Group[name] = member\n elif menu == 2:\n delete = input(\"삭제할 그룹 이름이 무엇인가?\")\n del(Group[delete])\n elif menu == 3:\n print(Group)\n\n print(\"### K-Pop 관리 프로그램 종료 ###\")","sub_path":"School Data Structure/python/basic2/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"452837458","text":"ferias = {}\n\nwhile True:\n pessoa = input(\"\\nQual o seu nome? ('quit' para sair) \")\n if pessoa == 'quit':\n break\n lugar = input(\"Se pudesse visitar qualquer lugar do mundo, para onde você iria? \")\n ferias[pessoa] = lugar\n\nfor pessoa, lugar in ferias.items():\n print(\"\\nAs férias dos sonhos de {} seriam em {}.\"\n .format(pessoa.title(), lugar.title()))","sub_path":"cap_07/ferias.py","file_name":"ferias.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"428552143","text":"import asyncio\nimport time\nimport datetime\n\nloop = asyncio.get_event_loop()\n\nasync def printTime():\n t = datetime.date.today()\n print(t)\n\nasync def foreverTime():\n while True:\n t = datetime.date.today()\n asyncio.sleep(1)\n print(t)\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(foreverTime())\n\nif __name__ == '__main__':\n print(\"tarik \")\n loop.close()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"490965250","text":"import os\nfrom slack.web.client import WebClient\n\nSlackWebClient = None\n\ndef GetSlackWebClient():\n global SlackWebClient\n if SlackWebClient is None:\n slack_token = os.environ['SLACK_BOT_TOKEN']\n SlackWebClient = WebClient(token=slack_token)\n\n return SlackWebClient","sub_path":"src/factories/WebClientFactory.py","file_name":"WebClientFactory.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"557129193","text":"from typing import List, Set\nfrom collections import Counter\n\n\ndef get_check_digit(pin: int) -> int:\n product_pairs = [\n int(val) * (i + 2) for i, val in enumerate(list(str(pin))[::-1])\n ]\n sum_of_products = sum(product_pairs)\n\n if sum_of_products == 17:\n return -1\n\n check_digit = sum_of_products % 123\n\n if check_digit < 10:\n return check_digit\n else:\n return get_check_digit(sum_of_products)\n\n\ndef get_check_digits_in_range(pin: int, pin_range: int) -> List[int]:\n check_digits_in_range = [\n get_check_digit(pin + n) for n in range(pin_range + 1)\n ]\n check_digits_in_range_without_discards = [\n n for n in check_digits_in_range if n != -1\n ]\n\n return check_digits_in_range_without_discards\n\n\ndef get_most_common_check_digits_in_range(pin: int, pin_range: int) -> Set[int]:\n check_digits = get_check_digits_in_range(pin, pin_range)\n check_digit_occurance_pairs = Counter(check_digits).most_common()\n\n largest_occurance = 0\n most_common_digits = set()\n\n for check_digit, occurance in check_digit_occurance_pairs:\n if occurance >= largest_occurance:\n largest_occurance = occurance\n most_common_digits.add(check_digit)\n\n return most_common_digits\n\n\nfor _ in range(5):\n input_line = input()\n pin = int(input_line.split(', ')[0])\n pin_range = int(input_line.split(', ')[1])\n most_common_check_digits = get_most_common_check_digits_in_range(\n pin, pin_range)\n most_common_check_digit_strings = [\n str(n) for n in most_common_check_digits\n ]\n print(', '.join(most_common_check_digit_strings))\n","sub_path":"2018-2019/practice1/acsl.py","file_name":"acsl.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"77525817","text":"from lib.yjSysUtils import *\r\nfrom SQLiteCommon import *\r\n\r\ndef isValidStr(s):\r\n for i in range(0, len(s)):\r\n v = ord(s[i])\r\n if (v in range(0, 0x1f + 1)) and (not v in [9, 10, 13]):\r\n return False\r\n return True\r\n\r\nTRecordInfo = type('TRecordInfo', (), dict(tableColumns = {}, fieldDataTypeInfos = [], lengthOfRecord = 0, stPos_FieldDataTypes = 0, stPos_RecordData = 0))\r\n\r\n\"\"\"\r\n TSQLiteCarver:\r\n .getRecordInfo(self, stPos_FieldDataTypes, tableColumns)\r\n .getRecordData(self, recordInfo)\r\n\"\"\"\r\nclass TSQLiteCarver:\r\n def __init__(self, dbDump):\r\n self.dbDump = dbDump\r\n pass\r\n\r\n def __del__(self):\r\n pass\r\n\r\n def getTextEncoding(self):\r\n return self.dbDump.textEncoding\r\n\r\n # http://ysbulpe.egloos.com/2282868\r\n def _isValidRecordInfo(self, recordInfo):\r\n \"\"\" \r\n recordInfo가 가지고 있는 실데이터에 대한 Field data types이 해당 Table의 Field Types에 포함될 수 있는 값인지 확인한다.\r\n 만약 Field data type이 Table의 Field Types에 포함될 수 없는 Type이면 잘못된 recordInfo라고 할 수 있다.\r\n \"\"\"\r\n for i, (key, value) in enumerate(recordInfo.tableColumns.items()):\r\n # fieldType이 'INTEGER'라면, dataType은 TFieldDataType.dtInt (1..8 bytes), dtConst0, dtConst1 이 들어갈 수 있다.\r\n dataType = recordInfo.fieldDataTypeInfos[i][1] # dataType = (4, TFieldDataType.dtInt)[i]\r\n fieldType = value[0] # fieldType = [TTableColumnType.Str, 255][0]\r\n if fieldType == TTableColumnType.Str:\r\n if not dataType in [TFieldDataType.dtStr, TFieldDataType.dtNULL]:\r\n return False\r\n elif fieldType == TTableColumnType.Int:\r\n if not dataType in [TFieldDataType.dtConst0, TFieldDataType.dtConst1, TFieldDataType.dtInt, TFieldDataType.dtNULL]: \r\n return False\r\n elif fieldType == TTableColumnType.Blob:\r\n if not dataType in [TFieldDataType.dtBLOB, TFieldDataType.dtNULL]: \r\n return False\r\n elif fieldType in [TTableColumnType.Double, TTableColumnType.Float]:\r\n if not dataType in [TFieldDataType.dtConst0, TFieldDataType.dtConst1, TFieldDataType.dtInt, TFieldDataType.dtFloat, TFieldDataType.dtNULL]: \r\n return False\r\n elif fieldType == TTableColumnType.TimeStamp: \r\n if not dataType in [TFieldDataType.dtStr, TFieldDataType.dtFloat, TFieldDataType.dtNULL]:\r\n return False\r\n else: assert False\r\n return True\r\n\r\n def getRecordInfo(self, stPos_FieldDataTypes, tableColumns):\r\n \"\"\" \r\n 지정 위치의 RecordInfo를 구한다. \r\n 지정 위치는 레코드 Field data types이 있는 위치다.\r\n\r\n RecordInfo는 tableColumns, 레코드 실데이터가 있는 시작 위치(stPos_RecordData)와 레코드 실데이터에 대한 Type정보(fieldDataTypeInfos)등을 가지고 있다.\r\n \"\"\"\r\n def getSize_FieldDataTytes(stPos_FieldDataTypes, fieldCount):\r\n i = fieldCount\r\n size = 0\r\n dbDump.position = stPos_FieldDataTypes\r\n while i > 0:\r\n if (dbDump.read(1, 'B') & 0x80) != 0: # String, Blob\r\n size += 1\r\n if (dbDump.read(1, 'B') & 0x80) != 0: return 0 \r\n size += 1\r\n i -= 1\r\n return size\r\n\r\n def getRecordDataSize(fieldDataTypeInfos):\r\n size = 0\r\n for i in range(0, len(fieldDataTypeInfos)):\r\n size += fieldDataTypeInfos[i][0]\r\n return size\r\n\r\n # getRecordInfo()\r\n dbDump = self.dbDump\r\n try:\r\n size_FieldDataTypes = getSize_FieldDataTytes(stPos_FieldDataTypes, len(tableColumns))\r\n if size_FieldDataTypes > 0:\r\n fieldDataTypesBlob = dbDump.read(size_FieldDataTypes, stPos = stPos_FieldDataTypes)\r\n\r\n recordInfo = createObject(TRecordInfo)\r\n recordInfo.fieldDataTypeInfos = getFieldDataTypes(fieldDataTypesBlob)\r\n if recordInfo.fieldDataTypeInfos == []:\r\n #if __debug__: print('Error: FieldDataTypes 영역이 아닙니다. #2', end='\\r')\r\n return None\r\n if __debug__: assert len(recordInfo.fieldDataTypeInfos) == len(tableColumns)\r\n recordInfo.lengthOfRecord = getRecordDataSize(recordInfo.fieldDataTypeInfos); # 레코드 크기 (SQLite 레코드는 크기가 가변이다)\r\n recordInfo.stPos_FieldDataTypes = stPos_FieldDataTypes\r\n recordInfo.stPos_RecordData = stPos_FieldDataTypes + size_FieldDataTypes\r\n recordInfo.tableColumns = tableColumns\r\n if self._isValidRecordInfo(recordInfo): return recordInfo\r\n else: return None \r\n else:\r\n #if __debug__: print('Error: FieldDataTypes 영역이 아닙니다. #1', end='\\r')\r\n return None\r\n except TypeError: # dbDump.data 범위를 벗어난 경우...\r\n return None\r\n\r\n def __getRecordData(self, recordInfo):\r\n recordData = []\r\n if recordInfo != None:\r\n pos = recordInfo.stPos_RecordData\r\n i = 0\r\n for i, (_, fieldType) in enumerate(recordInfo.tableColumns.items()):\r\n (v, pos) = self.dbDump.getAFieldData(i, recordInfo.fieldDataTypeInfos, stPos_FieldData = pos)\r\n if recordInfo.fieldDataTypeInfos[i][1] == TFieldDataType.dtNULL: # 값이 NULL인 경우... (NULL값은 모든 DataType에 다 들어갈 수 있음)\r\n if fieldType[0] == TTableColumnType.Str: v = ''\r\n else: v = None\r\n if fieldType[0] == TTableColumnType.Str: # \"TEXT\"\r\n ep = len(fieldType) - 1 \r\n if ep != 0:\r\n if type(fieldType[1]) is int: # [TTableColumnType.Str, 1, 255, \"UNIQUE\"]\r\n if ('NOT NULL' in fieldType) and (v == None): break\r\n if (len(fieldType) >= 3) and not (fieldType[1] <= len(v) <= fieldType[2]): break\r\n if not type(fieldType[ep]) is int: # [TTableColumnType.Str, \"UNIQUE\", \"NOT NULL\"] , \"_data\": [TTableColumnType.Str, 1, 255, \"NOT NULL\", \"UNIQUE\"]\r\n if ('NOT NULL' in fieldType) and (v == ''): break\r\n if 'UNIQUE' in fieldType: pass\r\n if (v == None) or ((len(v) > 0) and (v.replace('\\x00', '') == '')): break # TextEncoding이 안되었거나, 문자열이 '\\x00'로 채워진 경우 잘못된 문자열로 본다.\r\n else:\r\n if (len(fieldType) >= 3) and not (fieldType[1] <= v <= fieldType[2]): break\r\n recordData.append(v)\r\n if len(recordData) != len(recordInfo.tableColumns): recordData = []\r\n return recordData\r\n\r\n def getRecordData(self, recordInfo, exportFieldsIndex = None):\r\n \"\"\" recordInfo의 record 데이터를 구한다. \"\"\"\r\n i = recordInfo.stPos_FieldDataTypes\r\n result = []\r\n recordData = self.__getRecordData(recordInfo)\r\n if recordInfo.lengthOfRecord > 0:\r\n if len(recordData) != 0:\r\n if exportFieldsIndex != None: result = exportRecordFields(recordData, exportFieldsIndex)\r\n else: result = recordData\r\n i = recordInfo.stPos_RecordData + recordInfo.lengthOfRecord\r\n else:\r\n i = recordInfo.stPos_FieldDataTypes + len(recordInfo.fieldDataTypeInfos) \r\n else: \r\n i = recordInfo.stPos_FieldDataTypes + len(recordInfo.fieldDataTypeInfos) \r\n return (result, i)\r\n","sub_path":"utils/sqliteparser/SQLiteCarving.py","file_name":"SQLiteCarving.py","file_ext":"py","file_size_in_byte":7106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"108837394","text":"import csv\n\ndata = csv.reader(open('C:/Users/Yesha/website/booking_reviews.csv'), delimiter=\",\")\n\ndef jan():\n Month = \"Jan\"\n Year = \"2017\"\n count_month = 0\n count = 0\n\n monthly = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n\n while True:\n for row in data:\n if row[2] == monthly[count]:\n if row[4] == \"2017\":\n if row[6] == \"pos\":\n count = count + 1\n if count == 11:\n break\n\n Sentiment = \"pos\"\n\n print(Month + \" \" + Year + \" \" + Sentiment + \" \" + str(count))\n\nif __name__ == \"__main__\":\n jan()","sub_path":"Yesha/2ND ITERATION/website/monthly.py","file_name":"monthly.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"226288192","text":"# coding: utf-8\n\"\"\"\n关键点\n\n定时爬取\n\"\"\"\n\nimport json\nfrom workers import app\nfrom db.myredis import RedisApi\n\nfrom logger.logger import logger\n#\n#\n# def clean():\n# redis = RedisApi()\n# redis.brpop('tasks.douban.movie250.crawl', 1)\n\n\ndef manage_crawl_task():\n # urls = [\n # 'https://movie.douban.com/top250?start=0',\n # 'https://movie.douban.com/top250?start=25',\n # 'https://movie.douban.com/top250?start=50',\n # 'https://movie.douban.com/top250?start=75',\n # ]\n\n urls = []\n for i in range(10):\n start = i * 25\n url = 'https://movie.douban.com/top250?start={}'.format(start)\n urls.append(url)\n\n redis = RedisApi()\n\n data = {\n 'urls': urls,\n 'task': 'tasks.douban.movie250.crawl',\n }\n redis.push('crawler:urlpool', json.dumps(data))\n\n print('push')\n # for url in urls:\n # # app.send_task('tasks.repost.crawl', args=(url,))çç\n # # app.send_task('tasks.repost.crawl')\n # # app.send_task('tasks.douban.movie250.crawl')\n # app.send_task('tasks.douban.movie250.crawl')\n\n\nif __name__ == '__main__':\n manage_crawl_task()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"566160627","text":"# <>\n# Copyright (c) 2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n# Written by the LLNL Nuclear Data and Theory group\n# (email: mattoon1@llnl.gov)\n# LLNL-CODE-683960.\n# All rights reserved.\n# \n# This file is part of the FUDGE package (For Updating Data and \n# Generating Evaluations)\n# \n# When citing FUDGE, please use the following reference:\n# C.M. Mattoon, B.R. Beck, N.R. Patel, N.C. Summers, G.W. Hedstrom, D.A. Brown, \"Generalized Nuclear Data: A New Structure (with Supporting Infrastructure) for Handling Nuclear Data\", Nuclear Data Sheets, Volume 113, Issue 12, December 2012, Pages 3145-3171, ISSN 0090-3752, http://dx.doi.org/10. 1016/j.nds.2012.11.008\n# \n# \n# Please also read this link - Our Notice and Modified BSD License\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the disclaimer below.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the disclaimer (as noted below) in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of LLNS/LLNL nor the names of its contributors may be used\n# to endorse or promote products derived from this software without specific\n# prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC,\n# THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# \n# \n# Additional BSD Notice\n# \n# 1. This notice is required to be provided under our contract with the U.S.\n# Department of Energy (DOE). This work was produced at Lawrence Livermore\n# National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.\n# \n# 2. Neither the United States Government nor Lawrence Livermore National Security,\n# LLC nor any of their employees, makes any warranty, express or implied, or assumes\n# any liability or responsibility for the accuracy, completeness, or usefulness of any\n# information, apparatus, product, or process disclosed, or represents that its use\n# would not infringe privately-owned rights.\n# \n# 3. Also, reference herein to any specific commercial products, process, or services\n# by trade name, trademark, manufacturer or otherwise does not necessarily constitute\n# or imply its endorsement, recommendation, or favoring by the United States Government\n# or Lawrence Livermore National Security, LLC. The views and opinions of authors expressed\n# herein do not necessarily state or reflect those of the United States Government or\n# Lawrence Livermore National Security, LLC, and shall not be used for advertising or\n# product endorsement purposes.\n# \n# <>\n \nimport site_packages.legacy.toENDF6.gndToENDF6 as gndToENDF6Module\nimport site_packages.legacy.toENDF6.endfFormats as endfFormatsModule\nimport fudge.gnd.reactionData.crossSection as crossSectionModule\n\ndef toENDF6( self, MT, endfMFList, targetInfo, level, LR ) :\n \"\"\"\n Convert self into ENDF format\n\n :param int MT: The ENDF reaction designator, MT\n :param endfMFList:\n :param targetInfo:\n :param level:\n :param LR:\n \"\"\"\n\n ZA, mass, QI, QM = targetInfo['ZA'], targetInfo['mass'], targetInfo['Q'], targetInfo['QM']\n if( 'EFL' in targetInfo ) :\n QM = QI\n QI = targetInfo['EFL']\n else :\n if( QM is None ) :\n if( MT in ( 2, 5 ) ) :\n QM = QI\n elif( MT == 4 ) : # Q should be 0 except for excited-state targets:\n QM = 0\n if( hasattr( targetInfo['reactionSuite'].target, 'getLevelIndex' ) ) :\n if( targetInfo['reactionSuite'].target.getLevelIndex() > 0 ) : QM = QI\n else :\n QM = QI + level\n interpolationFlatData, crossSectionFlatData = self[targetInfo['style']].toENDF6Data( MT, endfMFList, targetInfo, level )\n MF = targetInfo['crossSectionMF']\n endfMFList[MF][MT] = [ endfFormatsModule.endfHeadLine( ZA, mass, 0, 0, 0, 0 ) ]\n endfMFList[MF][MT].append( endfFormatsModule.endfContLine( QM, QI, 0, LR, len( interpolationFlatData ) / 2, len( crossSectionFlatData ) / 2 ) )\n endfMFList[MF][MT] += endfFormatsModule.endfInterpolationList( interpolationFlatData )\n endfMFList[MF][MT] += endfFormatsModule.endfDataList( crossSectionFlatData )\n endfMFList[MF][MT].append( endfFormatsModule.endfSENDLineNumber( ) )\n\ncrossSectionModule.component.toENDF6 = toENDF6\n\ndef toENDF6Data( self, MT, endfMFList, targetInfo, level ) :\n\n endfInterpolation = gndToENDF6Module.gndToENDFInterpolationFlag( self.interpolation )\n crossSectionFlatData = []\n for xy in self.copyDataToXYs( xUnitTo = 'eV', yUnitTo = 'b' ) : crossSectionFlatData += xy\n return( [ len( crossSectionFlatData ) / 2, endfInterpolation ], crossSectionFlatData )\n\ncrossSectionModule.XYs1d.toENDF6Data = toENDF6Data\n\ndef toENDF6Data( self, MT, endfMFList, targetInfo, level ) :\n\n interpolationFlatData, crossSectionFlatData = [], []\n counter = 0\n lastX, lastY = None, None\n for region in self :\n ENDFInterpolation = gndToENDF6Module.gndToENDFInterpolationFlag( region.interpolation )\n data = region.copyDataToXYs( xUnitTo = 'eV', yUnitTo = 'b' )\n if( lastX is not None ) :\n if( lastY == data[0][1] ) :\n data = data[1:]\n elif( ( lastY == 0 ) and region.interpolation[4:] == 'log' ) :\n interpolationFlatData[-2] += 1\n elif( ENDFInterpolation == lastENDFInterpolation ) :\n interpolationFlatData = interpolationFlatData[:-2]\n counter += len( data )\n interpolationFlatData.append( counter )\n interpolationFlatData.append( ENDFInterpolation )\n for xy in data : crossSectionFlatData += xy\n lastX, lastY = data[-1]\n lastENDFInterpolation = ENDFInterpolation\n return( interpolationFlatData, crossSectionFlatData )\n\ncrossSectionModule.regions1d.toENDF6Data = toENDF6Data\n\ndef toENDF6Data( self, MT, endfMFList, targetInfo, level ) :\n\n return self.tabulatedData.toENDF6Data( MT, endfMFList, targetInfo, level )\n\ncrossSectionModule.resonancesWithBackground.toENDF6Data = toENDF6Data\n\ndef toENDF6Data( self, MT, endfMFList, targetInfo, level ) :\n\n endfInterpolation = gndToENDF6Module.axesToEndfInterpolationFlag( self.weights.axes )\n crossSectionFlatData = []\n for xy in self.weights.copyDataToXYs() : crossSectionFlatData += xy\n return( [ len( crossSectionFlatData ) / 2, endfInterpolation ], crossSectionFlatData )\n\ncrossSectionModule.weightedPointwise.toENDF6Data = toENDF6Data\n","sub_path":"site_packages/legacy/toENDF6/reactionData/crossSection.py","file_name":"crossSection.py","file_ext":"py","file_size_in_byte":7538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"127938670","text":"# Authors: Do Yeon Kim\n# Irvin Steve Cardenas (@kpatch)\n\n# this is 'test' version (ver1)\n## import\nimport sys\nsys.path.append(\"/Users/dkim/Documents/Programming/Python/pyserial-3.4\")\nimport serial\nimport binascii\nimport time\nimport pandas as pd\nimport numpy as np\nfrom scipy import signal\nfrom scipy.signal import butter, iirnotch, filtfilt, lfilter\nimport csv\n\noutput_file = open('training_ver8_raw.csv', 'w') \n\n## initial set up\nclass byte1(): \n pass\nclass val(): \n pass\nclass temp():\n pass\n# openBCI\nser = serial.Serial('/dev/tty.usbserial-DM00PZYR') # open serial port tty.usbserial-DM00PZYR\nnum_sample = \"\"\nnum_ch = 9 # 6 ch + 2 reference ch\nnum_accel = 3 # 3 accelerometer x, y & z\nraw_EMG = [0] * num_ch\nraw_accel = [0] * num_accel\n# data format\nbyte1.sample_Hz = 250.0\nbyte1.start1 = 0xA0 # start of data packet\nbyte1.end1 = 0xC0 # end of data packet\n# scale factor\nval.V_ref = 4.5 # reference voltages (V); set by its hardware\nval.gain = 24.0 # assumed gain setting; set by its Arduino code\nval.scaleFac = val.V_ref / val.gain / (2**23 - 1) # scale factor\nval.acc_scaleFac = 0.002 / 2**4 # accelerometer scale factor\n# bandstop\nval.Fs_bs = 250.0 # sample frequency (Hz)\nval.Fo_bs = 60.0 # bandstop frequency (Hz)\nval.q_bs = 35.0 # filter bandwidth\nval.w0_bs = val.Fo_bs / (val.Fs_bs / 2)\n# bandpass\nval.Fs_bp = val.Fs_bs # sample frequency (Hz)\nval.Nq_bp = 0.5 * val.Fs_bp # Nyquist frequency\nval.cutoff_low = 50 / val.Nq_bp # low cutoff frequency (Hz)\nval.cutoff_high = 100 / val.Nq_bp # high cutoff frequency (Hz)\n# check\nprint(ser.name) # check which port was really used\nprint(\"Parameters initialized\")\n\n\n## real time\nwith output_file as csv_file, serial.Serial(ser.name, 115200, timeout = 1, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE) as ser1: \n ser1.write('b')\n val.startTime = time.time()\n\n data_df1 = pd.DataFrame(columns = ['CH1', 'CH2', 'CH3', 'CH4', 'CH5', 'CH6', 'CH7', 'CH8'])\n while(1):\n temp.s1 = ser1.read(1) # read up to ten bytes (timeout)\n if binascii.hexlify(temp.s1) == \"a0\":\n raw_data = ser1.read(31)\n raw_ID = raw_data[0]\n for temp_i in range(1, num_ch): \n raw_EMG[temp_i] = raw_data[1 + 3 * (temp_i - 1):4 + 3 * (temp_i - 1)] # EMG data\n for temp_i in range(1, num_accel): \n raw_accel[temp_i - 1] = raw_data[25 + 2 * (temp_i - 1):27 + 2 * (temp_i - 1)] # accel x, y, and z\n\n ch1 = int(binascii.hexlify(raw_EMG[1]), 16) * val.scaleFac\n ch2 = int(binascii.hexlify(raw_EMG[2]), 16) * val.scaleFac\n ch3 = int(binascii.hexlify(raw_EMG[3]), 16) * val.scaleFac\n ch4 = int(binascii.hexlify(raw_EMG[4]), 16) * val.scaleFac\n ch5 = int(binascii.hexlify(raw_EMG[5]), 16) * val.scaleFac\n ch6 = int(binascii.hexlify(raw_EMG[6]), 16) * val.scaleFac\n ch7 = int(binascii.hexlify(raw_EMG[7]), 16) * val.scaleFac\n ch8 = int(binascii.hexlify(raw_EMG[8]), 16) * val.scaleFac\n # print(ch1 + \"\\t\" + ch2 + \"\\t\" + ch3 + \"\\t\" + ch4 + \"\\t\" + ch5 + \"\\t\" + ch6 + \"\\t\" + ch7 + \"\\t\" + ch8)\n\n channels =[{'CH1': ch1, 'CH2': ch2, 'CH3': ch3, 'CH4': ch4, 'CH5': ch5, 'CH6': ch6, 'CH7': ch7, 'CH8': ch8}]\n data_df2 = pd.DataFrame(channels)\n data_df1 = data_df1.append(data_df2)\n val.endTime = time.time()\n if( (val.endTime - val.startTime) >= 1):\n val.startTime = val.endTime\n print(\"Recorded\")\n\n # reference channels; creating bipolar EMG channel\n data_refCH1 = data_df1.ix[:, [0, 1]]\n data_refCH2 = data_refCH1.mean(axis = 1, skipna = True) # mean horizontally\n data_refCH3 = data_df1.ix[:, 2:8].sub(data_refCH2, axis = 0)\n print(data_refCH3)\n print(\"Reference CH\")\n\n writer = csv.writer(csv_file, delimiter=\",\", lineterminator='\\n')\n #for output_line in data_refCH3:\n # print output_line\n #writer.writerow(output_line)\n \n for index, row in data_refCH3.iterrows():\n # print row['CH3']\n writer.writerow([row['CH3'], row['CH4'], row['CH5'], row['CH6'], row['CH7'], row['CH8']])\n\n data_df1 = data_df1.iloc[0:0]\n","sub_path":"openBCI_test_ver2.py","file_name":"openBCI_test_ver2.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"265820323","text":"import websocket\r\nimport json\r\nimport ssl\r\n\r\n\r\nclass SenseConnect:\r\n #initialising the constructor\r\n def __init__(self, domain='localhost', port='4848', userdirectory='', userid='', conntype='cert'):\r\n self.ws = self.create_connection(domain, port, userdirectory, userid, conntype)\r\n self.qlikcall = SenseCalls()\r\n\r\n def create_connection(self, domain, port, userdirectory, userid, conntype):\r\n print('Connection initiated!')\r\n if domain=='localhost':\r\n try:\r\n conn = websocket.create_connection(\"ws://localhost:4848/app\")\r\n print('Succesfully connected to local qliksense desktop app.')\r\n return conn\r\n except :\r\n print('Error : Failed to connect with Qliksense app.\\nMake sure you have opened your local qliksense desktop app.')\r\n return 'Make sure you have opened your local qliksense desktop app before running this.'\r\n else:\r\n qlik_server_url = f\"wss://{domain}:{port}/app\"\r\n certs = ({\"ca_certs\":'root.pem',\r\n \"certfile\": 'client.pem',\r\n \"keyfile\": 'client_key.pem',\r\n \"cert_reqs\": ssl.CERT_REQUIRED,\r\n \"server_side\": False})\r\n ssl.match_hostname = lambda cert, hostname: True\r\n return websocket.create_connection(qlik_server_url, \r\n sslopt=certs,\r\n header={f\"'X-Qlik-User': 'UserDirectory={userdirectory}; UserId={userid}'\"})\r\n\r\n\r\n def close_connection(self):\r\n self.ws.close()\r\n \r\n # return the handle \r\n def get_handle(self):\r\n result = json.loads(self.ws.recv())\r\n if 'method' in result.keys():\r\n result = json.loads(self.ws.recv())\r\n return result['result']['qReturn']['qHandle']\r\n \r\n \r\n # This will give the list of all the apps in workspace\r\n def get_list_of_apps(self):\r\n app_list = []\r\n self.ws.send(self.qlikcall.get_doc_list())\r\n result = json.loads(self.ws.recv())\r\n if 'method' in result.keys():\r\n # result = self.ws.recv()\r\n result = json.loads(self.ws.recv())\r\n for doclist in result['result']['qDocList']:\r\n app_list.append(doclist['qTitle']) \r\n return app_list\r\n \r\n # This will give the last updated/reloaded date&time of an app\r\n def get_last_updated_status(self, appname):\r\n reload_status = []\r\n request = self.qlikcall.get_doc_list()\r\n self.ws.send(request)\r\n result = json.loads(self.ws.recv())\r\n if 'method' in result.keys():\r\n result = json.loads(self.ws.recv())\r\n for doclist in result['result']['qDocList']:\r\n if 'qLastReloadTime' in doclist.keys() and doclist['qTitle'].lower() in appname.lower():\r\n reload_status.append(doclist['qTitle'])\r\n reload_status.append(doclist['qLastReloadTime'])\r\n break\r\n # print(reload_status,len(reload_status))\r\n if len(reload_status)>0:\r\n return reload_status\r\n else:\r\n return print('App not found!\\n[Localhost] : Make sure spelling of app is correct.\\n[Enterprise] : Make sure App ID is correct.')\r\n \r\n # evaluate the expression and return the output of set analysis(expression)\r\n def evaluate_expression(self,appname,expression,e_o_dim):\r\n self.ws.send(self.qlikcall.open_doc(appname))\r\n #self.ws.recv()\r\n self.ws.send(self.qlikcall.evaluate_expr(expression, self.get_handle()))\r\n result = json.loads(self.ws.recv())\r\n resp = result['result']['qValue']['qText']\r\n if e_o_dim==0:\r\n # self.ws.close()\r\n pass\r\n return resp\r\n\r\n #return all the fields name and related data in a list\r\n def get_all_fields(self, appname):\r\n self.ws.send(self.qlikcall.open_doc(appname))\r\n self.ws.send(self.qlikcall.create_session(self.get_params('field'), self.get_handle()))\r\n request = self.qlikcall.get_layout(self.get_handle())\r\n self.ws.send(request)\r\n res = json.loads(self.ws.recv())\r\n #self.ws.close()\r\n return res['result']['qLayout']['qFieldList']['qItems']\r\n\r\n # it will return the parameter to create the session\r\n def get_params(self, requirement, fieldname=False):\r\n if requirement=='fieldvalues':\r\n params = [\r\n {\r\n \"qInfo\": {\r\n \"qId\": \"ListObject01\",\r\n \"qType\": \"ListObject\"\r\n },\r\n \"qListObjectDef\": {\r\n \"qStateName\": \"$\",\r\n \"qLibraryId\": \"\",\r\n \"qDef\": {\r\n \"qFieldDefs\": [\r\n fieldname\r\n ],\r\n \"qFieldLabels\": [\r\n fieldname\r\n ],\r\n \"qSortCriterias\": [\r\n {\r\n \"qSortByLoadOrder\": 1\r\n }\r\n ]\r\n },\r\n \"qInitialDataFetch\": [\r\n {\r\n \"qTop\": 0,\r\n \"qHeight\": 1,\r\n \"qLeft\": 0,\r\n \"qWidth\": 1\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n elif requirement=='master_measures':\r\n params = [\r\n {\r\n \"qInfo\": {\r\n \"qType\": \"MeasureList\"\r\n },\r\n \"qMeasureListDef\": {\r\n \"qType\": \"measure\",\r\n \"qData\": {\r\n \"title\": \"/qMetaDef/title\",\r\n \"description\": \"/qMetaDef/description\",\r\n \"expression\": \"/qMeasure/qDef\"\r\n }\r\n }\r\n }\r\n ]\r\n elif requirement=='exportdata':\r\n params = [\r\n {\r\n \"qInfo\": {\r\n \"qType\": \"SheetList\"\r\n },\r\n \"qAppObjectListDef\": {\r\n \"qType\": \"sheet\",\r\n \"qData\": {\r\n \"title\": \"/qMetaDef/title\",\r\n \"description\": \"/qMetaDef/description\",\r\n \"thumbnail\": \"/thumbnail\",\r\n \"cells\": \"/cells\",\r\n \"rank\": \"/rank\",\r\n \"columns\": \"/columns\",\r\n \"rows\": \"/rows\"\r\n }\r\n }\r\n }\r\n ]\r\n else:\r\n params = [\r\n {\r\n \"qInfo\": {\r\n \"qType\": \"FieldList\"\r\n },\r\n \"qFieldListDef\": {\r\n \"qShowSystem\": False,\r\n \"qShowHidden\": False,\r\n \"qShowDerivedFields\": True,\r\n \"qShowSemantic\": True,\r\n \"qShowSrcTables\": True,\r\n \"qShowImplicit\": True\r\n }\r\n }\r\n ]\r\n return params\r\n\r\n # returs the json of all the values in a field\r\n def get_all_field_values(self, appname, fieldname):\r\n filedvalues = []\r\n # request = self.qlikcall.open_doc(appname)\r\n self.ws.send(self.qlikcall.open_doc(appname))\r\n self.ws.send(self.qlikcall.create_session(self.get_params('fieldvalues', fieldname), self.get_handle()))\r\n self.ws.send(self.qlikcall.Select_list_object_values(self.get_handle()))\r\n # print('pehla result : ',self.ws.recv())\r\n self.ws.recv()\r\n # print('okieeeeeeeeeeeeeeeeeeeeeeeeee, ',fieldname)\r\n request = self.qlikcall.get_list_object_data()\r\n self.ws.send(request)\r\n # res = self.ws.recv()\r\n result = json.loads(self.ws.recv())\r\n print(result)\r\n for qMatrix in result['result']['qDataPages'][0]['qMatrix']:\r\n filedvalues.append(qMatrix[0]['qText'])\r\n return filedvalues\r\n\r\n # returns the master_measure expressions\r\n def get_master_measures(self, appname, only_mastermeasure_name=False):\r\n mastermeasure_info = []\r\n self.ws.send(self.qlikcall.open_doc(appname))\r\n self.ws.send(self.qlikcall.create_session(self.get_params('master_measures'), self.get_handle()))\r\n self.ws.send(self.qlikcall.get_layout(self.get_handle()))\r\n res = json.loads(self.ws.recv())\r\n if only_mastermeasure_name:\r\n for qItem in res['result']['qLayout']['qMeasureList']['qItems']:\r\n mastermeasure_info.append(qItem['qData']['title'])\r\n else:\r\n for qItem in res['result']['qLayout']['qMeasureList']['qItems']:\r\n mastermeasure_info.append((qItem['qData']['title'],qItem['qData']['description'],qItem['qData']['expression']))\r\n return mastermeasure_info\r\n\r\n def export_to_excel(self, appname):\r\n export_info = []\r\n self.ws.send(self.qlikcall.open_doc(appname))\r\n self.ws.send(self.qlikcall.create_session(self.get_params('exportdata'), self.get_handle()))\r\n qhandle_obj = self.get_handle()\r\n self.ws.send(self.qlikcall.get_layout(qhandle_obj))\r\n result = json.loads(self.ws.recv())\r\n # qhandle = result['result']['qReturn']['qHandle'] \r\n for sheet in result['result']['qLayout']['qAppObjectList']['qItems']:\r\n for chart in sheet['qData']['cells']:\r\n self.ws.send(self.qlikcall.get_object(1,chart['name']))\r\n result = json.loads(self.ws.recv())\r\n if result['result']['qReturn']['qGenericType'] not in ['VizlibFilter','tcmenu','filterpane','kpi']:\r\n chart_type = result['result']['qReturn']['qGenericType']\r\n qhandle_export = result['result']['qReturn']['qHandle']\r\n self.ws.send(self.qlikcall.get_layout(qhandle_export))\r\n result = json.loads(self.ws.recv())\r\n chart_name = result['result']['qLayout']['title']\r\n print('chart name :',chart_name)\r\n print('qhandle :',qhandle_export)\r\n self.ws.send(self.qlikcall.export_data(qhandle_export,chart_name))\r\n result = json.loads(self.ws.recv())\r\n chart_url=\"http://localhost:4848\" + result[\"result\"][\"qUrl\"]\r\n export_info.append((chart_name, chart_url, chart_type))\r\n return export_info\r\n\r\n\r\nclass SenseCalls:\r\n def __init__(self):\r\n self.id = 1\r\n\r\n def inc_id(self):\r\n return self.id+1\r\n\r\n #to create a session for fieldlist\r\n def create_session(self, params, qhandle):\r\n request = {\r\n \"method\": \"CreateSessionObject\",\r\n \"handle\": qhandle,\r\n \"params\": params,\r\n \"outKey\": -1,\r\n \"id\": 3\r\n }\r\n return json.dumps(request)\r\n\r\n def get_layout(self, qhandle):\r\n request = {\r\n \"method\": \"GetLayout\",\r\n \"handle\": qhandle,\r\n \"params\": [],\r\n \"outKey\": -1,\r\n \"id\": 4\r\n }\r\n return json.dumps(request)\r\n\r\n #returns the JSON file like appname, lastreloadtime etc.\r\n def get_doc_list(self):\r\n request = {\r\n \"handle\": -1,\r\n \"method\": \"GetDocList\",\r\n \"params\": {},\r\n \"outKey\": -1,\r\n \"id\": 2}\r\n return json.dumps(request)\r\n \r\n #to open an app\r\n def open_doc(self, appname):\r\n request = {\r\n \"handle\": -1,\r\n \"method\": \"OpenDoc\",\r\n \"params\": [appname],\r\n \"outKey\": -1,\r\n \"id\": 1}\r\n return json.dumps(request)\r\n\r\n #to evaluate the expression\r\n def evaluate_expr(self, expression, qhandle):\r\n print('qlik call :',expression)\r\n request = {\r\n \"handle\": qhandle,\r\n \"method\": \"EvaluateEx\",\r\n \"params\": {\r\n \"qExpression\": expression\r\n },\r\n \"outKey\": -1,\r\n \"id\": 4}\r\n return json.dumps(request)\r\n \r\n # request to crerat a session for a particular field\r\n def create_fieldvalues_session(self, params, qhandle):\r\n request = {\r\n \"jsonrpc\": \"2.0\",\r\n \"id\": 8,\r\n \"method\": \"CreateSessionObject\",\r\n \"handle\": qhandle,\r\n \"params\": params\r\n }\r\n return json.dumps(request)\r\n\r\n # to initiate a field request\r\n def Select_list_object_values(self, qHandle):\r\n request = {\r\n \"jsonrpc\": \"2.0\",\r\n \"id\": 9,\r\n \"method\": \"SelectListObjectValues\",\r\n \"handle\": str(qHandle),\r\n \"params\": [\r\n \"/qListObjectDef\",\r\n [\r\n 0\r\n ],\r\n True\r\n ]\r\n }\r\n # jaja = json.loads(request)\r\n return json.dumps(request)\r\n\r\n # to get the the max of qHeight number of values \r\n def get_list_object_data(self):\r\n request = {\r\n \"jsonrpc\": \"2.0\",\r\n \"id\": self.inc_id(),\r\n \"method\": \"GetListObjectData\",\r\n \"handle\": 2,\r\n \"params\": [\r\n \"/qListObjectDef\",\r\n [\r\n {\r\n \"qTop\": 0,\r\n \"qLeft\": 0,\r\n \"qWidth\": 1,\r\n \"qHeight\": 20\r\n }\r\n ]\r\n ]\r\n }\r\n return json.dumps(request)\r\n\r\n # to get the chart object\r\n def get_object(self,qhandle,chart_objid):\r\n request = {\r\n \"handle\": qhandle,\r\n \"method\": \"GetObject\",\r\n \"params\": {\r\n \"qId\": chart_objid\r\n },\r\n \"outKey\": -1,\r\n \"id\": 6\r\n }\r\n return json.dumps(request)\r\n \r\n # to get the url of the exporting chart\r\n def export_data(self,qhandle,chartname):\r\n request = {\r\n \"handle\": qhandle,\r\n \"method\": \"ExportData\",\r\n \"params\": {\r\n \"qFileType\": \"OOXML\",\r\n \"qPath\": \"\",\r\n \"qFileName\": chartname,\r\n \"qExportState\": 0\r\n },\r\n \"outKey\": -1,\r\n \"id\": 7\r\n }\r\n return json.dumps(request)\r\n","sub_path":"qlikconnect/qlikconnect.py","file_name":"qlikconnect.py","file_ext":"py","file_size_in_byte":14658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"209135745","text":"\"\"\"\nTitle: historical_crash_map.py\n \nAuthor: @alicefeng\n \nThis script creates the datasets needed to power MVP. This file only needs to be\nrun once prior to using the map for the first time.\n\nInputs:\n cad_crash_events_with_transport_2016_wgs84.csv\n vz_predict_dataset.csv (i.e., the canonical dataset)\n csv of model predictions\n inter_and_non_int.shp\n\nOutput:\n geojson of historical crash data\n geojson of predictions merged with geometries\n weekly_crashes.csv\n dow_crashes.csv\n hourly_crashes.csv\n\"\"\"\n\n#Import the necessary Python moduless\nimport pandas as pd\nimport geopandas as gpd\nimport shapely.geometry\nfrom shapely.geometry import Point\nimport os\n\n\nBASE_DIR = os.path.dirname(\n os.path.dirname(\n os.path.dirname(\n os.path.abspath(__file__))))\n\nRAW_FP = BASE_DIR + '/data/raw'\nMAP_FP = BASE_DIR + '/data/processed/maps'\nDATA_FP = BASE_DIR + '/data/processed'\n\n### Generate historical crash dataset\n# read CAD data\ncad = pd.read_csv(RAW_FP + '/cad_crash_events_with_transport_2016_wgs84.csv', parse_dates=['CALENDAR_DATE'])\n\n# create points from lat/lon and read into geodataframe\ngeometry = [Point(xy) for xy in zip(cad.X, cad.Y)]\ncrs = {'init': 'epsg:4326'}\n\n# get week of the year\ncad['week'] = cad['CALENDAR_DATE'].dt.week\ndf = cad['week']\n\ngeo_df = gpd.GeoDataFrame(df, crs=crs, geometry=geometry)\ngeo_df.to_file('cad.geojson', driver=\"GeoJSON\")\n\n\n\n\n\n### Generate model predictions dataset\n# read in model output and reformat\ncar = pd.read_csv(DATA_FP + '/car_preds_weekly_named.csv', dtype={'segment_id': str})\nweek_cols = list(car.columns[2:56])\ncar_weekly = pd.melt(car, id_vars=['segment_id', 'st_name'], value_vars=week_cols,\n var_name='week', value_name='pred')\ncar_weekly['week'] = pd.to_numeric(car_weekly['week'])\ncar_weekly['id'] = car_weekly['segment_id']\n\n\n# Read in shapefile as a GeoDataframe\nstreets = gpd.read_file(MAP_FP + '/inter_and_non_int.shp')\n\n# Set the projection as EPSG:3857 since the shapefile didn't export with one\nstreets.crs = {'init': 'epsg:3857'}\n\n# Then reproject to EPSG:4326 to match what Leaflet uses\nstreets = streets.to_crs({'init': 'epsg:4326'})\n\n# Join geometry to the crash data\n#crashes_joined = streets.merge(crashes, on='id')\ncar_joined = streets.merge(car_weekly, on='id')\n\n# export joined predictions as GeoJSON \n# IMPORTANT: it's a known bug that Fiona won't let you overwrite GeoJSON files so you have \n# to first delete the file from your hard drive before re-exporting\ncar_preds = car_joined[['geometry', 'id', 'week', 'pred', 'st_name']]\ncar_preds.to_file(\"car_preds_named.json\", driver='GeoJSON')\n\n\n\n\n\n### Generate weekly crash dataset\n# read in historical crash data\ncrashes = pd.read_csv(DATA_FP + '/vz_predict_dataset.csv', dtype={'segment_id': str})\n\n# roll up crashes to the weekly level\nweekly_crashes = crashes.groupby(['week'], as_index=False)['crash'].sum()\nweekly_crashes.to_csv('weekly_crashes.csv', index=False)\n\n\n### Generate day of week crash dataset\nweekday_map= {0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday', 4:'Friday', 5:'Saturday', 6:'Sunday'}\ncad['dow'] = cad['CALENDAR_DATE'].dt.dayofweek\ncad['dow_name'] = cad['dow'].map(weekday_map)\ndow_crashes = cad.groupby(['dow', 'dow_name'], as_index=False)['N_EVENTS'].sum()\ndow_crashes.to_csv('dow_crashes.csv', index=False)\n\n\n### Generate time of day crash dataset\ncad['hour'] = cad['TIME'].str.split(':').str.get(0).astype(int)\n\n# add indicator for weekday/weekend to see if there's a difference in crash distribution\ncad['weekend'] = (cad['dow']//5==1).astype(int)\n\nhourly_crashes = cad.groupby(['weekend', 'hour'], as_index=False)['N_EVENTS'].sum()\nhourly_crashes['pct_crash'] = hourly_crashes.groupby(['weekend'])['N_EVENTS'].apply(lambda x: x/x.sum())\nhourly_crashes['weekend_lbl'] = hourly_crashes['weekend'].map({0:'Weekday', 1:'Weekend'})\nhourly_crashes.to_csv('hourly_crashes.csv', index=False)\n","sub_path":"reports/historical_crash_map.py","file_name":"historical_crash_map.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"575006224","text":"from mpi4py import MPI\n\nimport SistemasEcuaciones.Paralelos as eliminacion\n\ncomm = MPI.COMM_WORLD\nrank = comm.rank\nsize = comm.size\n\ndef repartirEquitativamente(n, Ab, k):\n numeroDeNucleos = comm.size\n filasPorNucleo = int((n - k) / numeroDeNucleos)\n sobrantes = (n - k) % numeroDeNucleos\n filasDistribuidasPorNucleo = []\n for i in range(0, numeroDeNucleos):\n filasDeNucleoActual = []\n for j in range(filasPorNucleo * i, filasPorNucleo * i + filasPorNucleo):\n filasDeNucleoActual.append(Ab[j + k])\n filasDistribuidasPorNucleo.append(filasDeNucleoActual)\n if sobrantes != 0:\n for x in range(0, sobrantes):\n filasDistribuidasPorNucleo[x].append(Ab[numeroDeNucleos * filasPorNucleo + x + k])\n return filasDistribuidasPorNucleo\n\ndef cuadradoNatural(n, x, y):\n tabla = []\n n = n - 1\n marcas = set()\n xDistribuidas = []\n yDistribuidas = []\n if rank == 0:\n xDistribuidas = repartirEquitativamente(len(x), x, 0)\n yDistribuidas = repartirEquitativamente(len(y), y, 0)\n\n misXs = comm.scatter(xDistribuidas, root=0)\n misYs = comm.scatter(yDistribuidas, root=0)\n filas = []\n\n for i in range(len(misXs)):\n fila = [0.0] * (n * 3 + 1)\n fila1 = None\n\n if (x.index(misXs[i]) != 0) & (x.index(misXs[i]) != n):\n k = (x.index(misXs[i]) - 1) * 3\n exp = 2\n for j in range(0, 3):\n fila[k + j] = pow(misXs[i], exp)\n exp -= 1\n exp = 2\n fila1 = [0.0] * (n * 3 + 1)\n for j in range(0, 3):\n fila1[k + j + 3] = pow(misXs[i], exp)\n exp -= 1\n fila1[-1] = misYs[i]\n elif (x.index(misXs[i]) == n):\n k = (n * 3) - 3\n exp = 2\n for j in range(0, 3):\n fila[k + j] = pow(misXs[i], exp)\n exp -= 1\n else:\n k = 0\n exp = 2\n for j in range(0, 3):\n fila[k + j] = pow(misXs[i], exp)\n exp -= 1\n\n fila[-1] = misYs[i]\n filas.append(fila)\n filas.append(fila1)\n arregloFilas = comm.gather(filas)\n if rank == 0:\n for i in range(len(arregloFilas)):\n for j in range(len(arregloFilas[i])):\n if arregloFilas[i][j] is not None:\n tabla.append(arregloFilas[i][j])\n etapa2x = x[1:-1]\n xDistribuidas = repartirEquitativamente(len(etapa2x), etapa2x, 0)\n misXs = comm.scatter(xDistribuidas, root=0)\n\n filas = []\n exp = 1\n # Etapa 2\n for i in range(len(misXs)):\n fila = [0.0] * (n * 3 + 1)\n k = (x.index(misXs[i]) - 1) * 3\n for j in range(0, 2):\n fila[k + j] = (2 - j) * pow(misXs[i], exp)\n exp -= 1\n exp = 1\n\n for j in range(0, 2):\n fila[k + j + 3] = -(2 - j) * pow(misXs[i], exp)\n exp -= 1\n filas.append(fila)\n arregloFilas = comm.gather(filas)\n\n if rank == 0:\n for i in range(len(arregloFilas)):\n for j in range(len(arregloFilas[i])):\n if arregloFilas[i][j] is not None:\n tabla.append(arregloFilas[i][j])\n\n filas = []\n exp = 1\n # Etapa 3\n if len(misXs) > 0:\n for i in range(len(misXs)):\n fila = [0.0] * (n * 3 + 1)\n k = (x.index(misXs[i]) - 1) * 3\n fila[k + 0] = 2 * pow(misXs[i], exp)\n filas.append(fila)\n\n arregloFilas = comm.gather(filas)\n\n if rank == 0:\n for i in range(len(arregloFilas)):\n for j in range(len(arregloFilas[i])):\n if arregloFilas[i][j] is not None:\n tabla.append(arregloFilas[i][j])\n\n Ab = comm.bcast(tabla, root=0)\n xns = eliminacion.eliminacionGaussianaTotal((n * 3), Ab)\n if rank == 0:\n m = [chr(97 + j) + str(i) for i in range(1, n + 1) for j in range(3)]\n for i in range(len(m)):\n print(str(m[i]) + \" = \" + str(xns[i]))\n return xns\n\n\ndef hallarValor(x, vars, valor):\n solucion = vars\n ind = 0\n if valor >= x[0]:\n if valor <= x[len(x) - 1]:\n for i in range(0, len(x) - 1):\n if (valor >= x[i]) & (valor <= x[i + 1]):\n ind = i\n else:\n ind = len(x) - 2\n resp = 0.0\n for i in range(0, 4):\n resp += solucion[(ind * 4) + i] * pow(valor, 3 - i)\n print(\"Resultado:\")\n print(\"f(\" + str(valor) + \") = \" + str(resp))\n return resp\n\nx = [2, 3, 5,6]\ny = [-1, 2, -7,8]\nvars = cuadradoNatural(4, x, y)","sub_path":"Interpolacion/Trazadores/cuadradoParalelo.py","file_name":"cuadradoParalelo.py","file_ext":"py","file_size_in_byte":4573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"6271443","text":"# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import render, render_to_response, HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.template import RequestContext\nfrom django.core.context_processors import csrf\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth import login\nfrom django.conf import settings\n\n# app imports\nfrom .models import FacebookProfile\nfrom .facebook import FacebookAuth\n\n_Facebook = FacebookAuth(settings.FACEBOOK_APP_ID, settings.FACEBOOK_APP_SECRET)\n\ndef home(request):\n\trc = RequestContext(request)\n\tmessage = \"Fall in love Python and Linux\"\n\tc = {}\n\tc['message'] = message\n\treturn render_to_response('base.html',c,rc)\n\ndef callback(request):\n\tif request.user.is_authenticated():\n\t\treturn HttpResponseRedirect('/account/profile/')\n\tif not request.user.is_active:\n\t\tif request.GET.items():\n\t\t\tuser_obj = None\n\t\t\tif 'facebook' in request.META['HTTP_REFERER']:\n\t\t\t\tcode = request.GET['code']\n\t\t\t\t_Facebook._access_token(code)\n\t\t\t\tuser_info = _Facebook._user_info()\n\t\t\t\tuser_id = user_info['id']\n\t\t\t\tfirst_name = user_info.get('first_name')\n\t\t\t\tlast_name = user_info.get('last_name')\n\t\t\t\tusername = user_info.get('first_name') + user_info.get('last_name') #\n\t\t\t\temail = user_info.get('email')\n\t\t\t\tbirthday = user_info.get('birthday')\n\t\t\t\tgender=user_info.get('gender')\n\t\t\t\tpicture = user_info.get('picture')['data']['url']\n\t\t\t\tprofile_url = '{u}{i}'.format(u=settings.FACEBOOK_URL,i=user_id)\n\n\t\t\t\ttry:\n\t\t\t\t\tuser_obj = User.objects.get(username=user_id)\n\t\t\t\texcept User.DoesNotExist:\n\t\t\t\t\tuser_obj = User.objects.create_user(\n\t\t\t\t\t\t\t\t\tusername=user_id,\n\t\t\t\t\t\t\t\t\temail=email,\n\t\t\t\t\t\t\t\t\tfirst_name=first_name,\n\t\t\t\t\t\t\t\t\tlast_name=last_name)\n\n\t\t\t\ttry:\n\t\t\t\t\tprofile = FacebookProfile.objects.get(user=user_obj.id)\n\t\t\t\t\tprofile.access_token = _Facebook.access_token\n\t\t\t\texcept FacebookProfile.DoesNotExist:\n\t\t\t\t\tprofile = FacebookProfile(\n\t\t\t\t\t\t\t\tuser=user_obj,\n\t\t\t\t\t\t\t\tfacebook_id=user_id,\n\t\t\t\t\t\t\t\tfirst_name=first_name,\n\t\t\t\t\t\t\t\tlast_name=last_name,\n\t\t\t\t\t\t\t\tbirthday=birthday,\n\t\t\t\t\t\t\t\temail=email,\n\t\t\t\t\t\t\t\tgender=gender,\n\t\t\t\t\t\t\t\tpicture=picture,\n\t\t\t\t\t\t\t\tprofile_url=profile_url,\n\t\t\t\t\t\t\t\taccess_token=_Facebook.access_token)\n\t\t\t\t\tprofile.save()\n\t\t\t# else:\n\t\t\t\t# user_obj = User.objects.filter(username=user_id)\n\n\t\t\tuser = authenticate(token=_Facebook.access_token, user=user_obj)\n\t\t\tlogin(request, user)\n\t\t\treturn HttpResponseRedirect('/account/profile/')\n\treturn HttpResponse('Welcome to Facebook')\n\ndef facebook_login(request):\n\tif request.user.is_authenticated():\n\t\treturn HttpResponseRedirect('/account/profile/')\n\tfacebook_url = _Facebook._authorize_url()\n\treturn HttpResponseRedirect(facebook_url)","sub_path":"example/facebook/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"212603496","text":"num=int(input(''))\nif num%2==0:\n if num%3==0:\n print('bei2,3zhengchu')\n else:\n print('bei2chu,bubei3chu')\nelse:\n if num%3==0:\n print('bei3chu,bubei2chu')\n else:\n print('既不被3整除,也不被2整除')\n","sub_path":"1906101020-张瑞/day0225-.py/test03.py","file_name":"test03.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"149314838","text":"import torch\n\nfrom gan.nn.stylegan.generator import HeatmapToImage, FromStyleConditionalGenerator, NoiseToStyle, \\\n HeatmapAndStyleToImage\nfrom gan.nn.stylegan.style_encoder import GradualStyleEncoder, StyleEncoder\nfrom torch import Tensor\n\nfrom gan.noise.stylegan import mixing_noise\n\n\nclass StyleGanAutoEncoder:\n\n def __init__(self, image_size: int = 256, noise_size: int = 512, hm_nc=1):\n\n style_count = {\n 64: 10,\n 128: 12,\n 256: 14,\n 512: 16,\n 1024: 18,\n }\n\n self.noise_size = noise_size\n\n self.generator = HeatmapToImage(\n FromStyleConditionalGenerator(image_size, noise_size),\n NoiseToStyle(512, 8, 0.01, style_count[image_size]),\n hm_nc\n )\n\n self.decoder = HeatmapAndStyleToImage(self.generator)\n self.style_encoder = GradualStyleEncoder(50, 3, mode=\"ir\", style_count=style_count[image_size])\n\n\n def load_state_dict(self, weights, style=True):\n if style:\n self.style_encoder.load_state_dict(weights[\"s\"])\n self.generator.load_state_dict(weights['gi'])\n return self\n\n def cuda(self):\n self.generator = self.generator.cuda()\n self.decoder = HeatmapAndStyleToImage(self.generator)\n self.style_encoder = self.style_encoder.cuda()\n return self\n\n def encode_latent(self, image: Tensor) -> Tensor:\n return self.style_encoder(image)\n\n def generate(self, one_channel_heatmap: Tensor, noise=None) -> (Tensor, Tensor):\n # assert one_channel_heatmap.shape[1] == 1\n if noise is None:\n noise = mixing_noise(one_channel_heatmap.shape[0], self.noise_size, 0.9, one_channel_heatmap.device)\n fake, fake_latent = self.generator.forward(one_channel_heatmap, noise, return_latents=True)\n fake_latent = torch.cat([f[:, None, :] for f in fake_latent], dim=1).detach()\n return fake, fake_latent\n\n def decode(self, one_channel_heatmap: Tensor, latent: Tensor):\n # assert one_channel_heatmap.shape[1] == 1\n return self.decoder(one_channel_heatmap, latent)\n\n\n\n\n","sub_path":"src/models/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"495729384","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated by Dufy on 2020/2/27 10:27\nIDE used: PyCharm \nDescription :\n1) 替换之前的 jieba_used_1209.py\n2) target_path = 'data\\excel_write'\n3) 原先的 excel2different_files(),现在重写OperateExcelSubclass.excel_write_in\n\nRemark: 注意target_path = 'data\\excel_write'要与 txt_filePath = r'D:\\dufy\\code\\ft_BOM\\data\\excel_write' # 读取文件夹路径,\n 保持一致\n\"\"\"\nfrom data_operation import OperateExcel, function\nfrom data_operation.function import load_stop_word_list, label_new, standard\n# from data_operation import OperateTXT\nfrom data_operation.txt_operate import OperateTXT\nfrom data_operation.constant import label_name_forbid, label_name_refer\nimport os\nimport jieba # 组合使用】\nfrom data_operation.function import get_logger\n\njieba.load_userdict('dict_boom.txt') # 组合使用】\nstop_words = load_stop_word_list(\"stopwords_subclass.txt\")\nlogger = get_logger()\n\nclass OperateExcelSubclass(OperateExcel): # 重写函数\n def excel_write_in(self, target_path):\n pass\n try:\n # print(target_path, '~~~~~~~')\n # fs_list.append(open(filenames, 'w', encoding='utf-8'))\n for line_read in self.excel_content_all().splitlines():\n target_path_temp = target_path # 由于此处要循环,所以设置临时变量代替\n\n aa_label = line_read.split()[0].replace('/', '') # 替换标签里面 '/'\n # aa_label = aa.split()[0] # 替换标签里面 '/'\n if aa_label in label_name_forbid and aa_label in ['RF', 'EMIRFI']:\n logger.critical('禁止标签:{}'.format(aa_label))\n continue\n elif aa_label in label_name_forbid:\n continue\n\n aa_label = label_new(aa_label)\n\n if aa_label != 'nan':\n\n # print(aa_label, '~~~~~~~')\n aa_description = \" \".join(line_read.split()[1:])\n aa_description = standard(aa_description, stop_words) # 标准化处理\n\n logger.debug('标签:{}, 最终写入行为:{}'.format(aa_label, aa_description))\n aa_description_length = 0\n for i in aa_description.split(' '):\n if i != '':\n aa_description_length += 1\n # print(length)\n\n target_path_temp = target_path_temp + '\\\\' + aa_label + '.txt'\n # print(target_path, '-', aa_label, '!!!!')\n if aa_description_length > 1: # 选取训练数据的长度,大于3才算\n if aa_label not in label_name_refer:\n logger.critical('路径\"{},产生新的标签:{}'.format(self.file_path, aa_label))\n OperateTXT().txt_write_line(target_path_temp, aa_description)\n\n except IOError as ex:\n print(ex)\n print('bom_read.py,写文件时发生错误!!!!!!') # \\033[1;31m 字体颜色:红色\\033[0m\n\n print('操作完成!')\n\n\n\ndef excel_read2txt():\n # 先清空:\n txt_filePath = r'D:\\dufy\\code\\fast_subclass30\\data\\excel_write' # 读取文件夹路径,\n function.files_clear(txt_filePath)\n\n # filePath = r'C:\\Users\\Administrator\\Documents\\Tencent Files\\3007490756\\FileRecv\\bom_test_random' # 读取文件夹路径\n filePath = r'D:\\dufy\\code\\ft_BOM\\data\\bom_subclass30' # 读取文件夹路径!!!!!!!!!!!!\n file_names = os.listdir(filePath)\n\n for i, name0 in enumerate(file_names): # 文件夹下文件循环\n logger.debug('==========================')\n path = filePath + '\\\\' + name0\n logger.debug('path为:{} '.format(path))\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n aa = OperateExcelSubclass(path)\n # aa.excel_data2temp_files() # 生成temp @文件,为后续处理做准备\n aa.excel_write_in(r'data\\excel_write') # 读取当前excel覆盖写入\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n logger.debug('path为: '.format(path))\n logger.debug('==========================')\n\nif __name__ == \"__main__\":\n pass\n\n","sub_path":"bom_read.py","file_name":"bom_read.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"364704099","text":"import os\n\nimport mock\nimport shutil\nimport tempfile\n\nimport utils\n\nfrom common import known_bugs_utils\n\n# Need this for plugin imports\nutils.add_sys_plugin_path(\"juju\")\nfrom plugins.juju.parts import ( # noqa E402\n services,\n charms,\n units,\n known_bugs,\n)\n\n\nclass TestJujuPluginPartServices(utils.BaseTestCase):\n\n def setUp(self):\n super().setUp()\n\n def tearDown(self):\n super().tearDown()\n\n @mock.patch.object(services, 'JUJU_MACHINE_INFO', {\"machines\": {}})\n def test_get_machine_info(self):\n expected = {'machines': {'running': ['33 (version=unknown)',\n '0 (version=unknown)']}}\n services.get_machine_checks()()\n self.assertEquals(services.JUJU_MACHINE_INFO, expected)\n\n\nclass TestJujuPluginPartCharms(utils.BaseTestCase):\n\n def setUp(self):\n super().setUp()\n\n def tearDown(self):\n super().tearDown()\n\n @mock.patch.object(charms, 'CHARM_VERSIONS', {})\n def test_get_charm_versions(self):\n charms.get_charm_checks()()\n expected = {}\n self.assertEquals(charms.CHARM_VERSIONS, expected)\n\n\nclass TestJujuPluginPartUnits(utils.BaseTestCase):\n\n def setUp(self):\n super().setUp()\n\n def tearDown(self):\n super().tearDown()\n\n @mock.patch.object(units, 'JUJU_UNIT_INFO', {\"units\": {}})\n def test_get_app_from_unit_name(self):\n unit = \"foo-32\"\n app = units.JujuUnitChecks().get_app_from_unit_name(unit)\n self.assertEquals(app, \"foo\")\n\n @mock.patch.object(units, 'JUJU_UNIT_INFO', {\"units\": {}})\n def test_get_unit_version(self):\n unit = \"foo-32\"\n version = units.JujuUnitChecks().get_unit_version(unit)\n self.assertEquals(version, 32)\n\n @mock.patch.object(units, 'JUJU_UNIT_INFO', {\"units\": {}})\n def test_get_unit_info(self):\n expected = {'local': ['filebeat-24', 'neutron-gateway-0',\n 'ntp-0'],\n 'lxd': ['ceph-mon-0',\n 'ceph-osd-no-fixed-wal-7',\n 'ceph-radosgw-0',\n 'ceph-radosgw-hacluster-0',\n 'cinder-0',\n 'cinder-ceph-0',\n 'cinder-hacluster-0',\n 'elasticsearch-1',\n 'filebeat-39',\n 'glance-0',\n 'glance-hacluster-0',\n 'grafana-0',\n 'keystone-0',\n 'keystone-hacluster-0',\n 'landscape-client-80',\n 'memcached-0',\n 'mysql-0',\n 'mysql-hacluster-0',\n 'neutron-api-0',\n 'neutron-api-hacluster-0',\n 'neutron-openvswitch-25',\n 'neutron-openvswitch-octavia-0',\n 'nova-cloud-controller-0',\n 'nova-cloud-controller-hacluster-0',\n 'nova-compute-0',\n 'nrpe-container-31',\n 'ntp-71',\n 'octavia-0',\n 'octavia-hacluster-5',\n 'openstack-dashboard-0',\n 'openstack-dashboard-hacluster-0',\n 'prometheus-0',\n 'prometheus-ceph-exporter-0',\n 'prometheus-openstack-exporter-0',\n 'rabbitmq-server-0'],\n 'stopped': ['nrpe-0', 'rabbitmq-server-2',\n 'rabbitmq-server-3']}\n units.get_unit_checks()()\n self.assertEquals(units.JUJU_UNIT_INFO, {\"units\": expected})\n\n\nclass TestJujuPluginPartKnown_bugs(utils.BaseTestCase):\n\n def setUp(self):\n super().setUp()\n self.tmpdir = tempfile.mkdtemp()\n\n def tearDown(self):\n if os.path.isdir(self.tmpdir):\n shutil.rmtree(self.tmpdir)\n\n super().tearDown()\n\n def test_detect_known_bugs(self):\n with mock.patch.object(known_bugs_utils, 'PLUGIN_TMP_DIR',\n self.tmpdir):\n known_bugs.detect_known_bugs()\n expected = {'bugs-detected':\n [{'id': 'https://bugs.launchpad.net/bugs/1910958',\n 'desc':\n ('Unit unit-rabbitmq-server-2 failed to start due '\n 'to members in relation 236 that cannot be '\n 'removed.'),\n 'origin': 'testplugin.01part'}]}\n self.assertEqual(known_bugs_utils._get_known_bugs(), expected)\n","sub_path":"tests/unit/test_juju.py","file_name":"test_juju.py","file_ext":"py","file_size_in_byte":4844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"322712556","text":"# python3 bandit.py --instance ./instances/i-2.txt --algorithm kl-ucb --randomSeed 42 --verbose --horizon 10000\n\nimport math, operator, random\nfrom helper import getReward, getRegret\n\n# Function to return KL divergence of two numbers p and q\ndef kl(p, q):\n\tif p == 0 or p == 1:\n\t\treturn 0\n\telif q == 0 or q ==1:\n\t\treturn float('inf')\n\telse:\n\t\treturn p * math.log(p / q) + (1 - p) * math.log((1 - p) / (1 - q))\n\n# Function to get the target value for a given time t\ndef getTarget(t):\n\t# c is the const for kl-ucb algorithm\n\tc = 1\n\tif t == 1:\n\t\treturn float('inf')\n\telse:\n\t\treturn math.log(t) + c * math.log(math.log(t))\n\n# Function to find optimal value of q for given target and empirical mean of an arm \ndef findQ(target, mean_emp, num_sample):\n\tq = 1\n\tepsilon = 10 ** -2\n\tvalue = kl(mean_emp, q) * num_sample\n\twhile q >= mean_emp and value > target:\n\t\tq -= epsilon\n\t\tvalue = kl(mean_emp, q) * num_sample\n\treturn q\n\n# Returns the modified rewards afer sampling an arm\ndef sampleArm(means_ucbkl):\n\tarm_max = max(means_ucbkl.items(), key=operator.itemgetter(1))[0]\n\treturn arm_max\n\n# Function for kl-ucb sampling algorithm\ndef ucbKL(seed, horizon, means_true, verbose=False):\n\trandom.seed(seed)\n\trewards = {i: 0 for i in means_true.keys()}\n\tsamples = {i: 0 for i in means_true.keys()}\n\tmeans_emp = {i: 0 for i in means_true.keys()}\n\tmeans_ucbkl = {i: float('inf') for i in means_true.keys()}\n\t\n\t# Sample arms\n\tfor t in range(horizon):\n\t\tarm = sampleArm(means_ucbkl)\n\t\treward = getReward(means_true[arm])\n\t\trewards[arm] += reward\n\t\tsamples[arm] += 1\n\t\t# Modify the empirical mean\n\t\tmeans_emp[arm] = rewards[arm] / samples[arm]\n\t\t# Find out the kl-ucb means\n\t\ttarget = getTarget(t+1)\n\t\tmeans_ucbkl = {arm: findQ(target, means_emp[arm], samples[arm]) for arm in means_true.keys()}\n\n\tif verbose:\n\t\tprint(f'True means:\\n{means_true}')\n\t\tprint(f'Empirical means:\\n{means_emp}')\n\t\tprint(f'Number of pulls:\\n{samples}')\n\t\n\t# Return the regret\n\treturn getRegret(horizon, means_true, rewards)\n","sub_path":"Assignment1/submission/ucb_kl.py","file_name":"ucb_kl.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"637808003","text":"import numpy as np\nimport os\nimport pandas as pd\n# root_path='/home/eric/Documents/Hashtag-recommendation-for-social-images/neural_image_captioning/datasets/Custom/preprocessed_data'\n# data_path='./data/custom/'\n# validation_filename = data_path + 'validation_data.txt'\n# validation_data = pd.read_table(validation_filename, delimiter='*')\n# validation_data.drop(columns=['tweets'],inplace=True)\n# validation_data = validation_data.values.tolist()\n\ndata_path='./data/'\n# data_path='./data/NUS-WIDE/'\nvalidation_filename = data_path + 'validation_data.txt'\nvalidation_data = pd.read_csv(validation_filename, delimiter='*')\nvalidation_data = validation_data['hashtags'].tolist()\n\nprint(validation_data[0])\n\ntest_y=[]\npred_y=[]\n\npred_path=os.path.join(data_path,\"predicted_hashtags.txt\")\n# pred_path=os.path.join(data_path,\"ml_knn_predict.txt\")\nwith open(pred_path,\"r\") as file:\n predicted_data=file.readlines()\n print(len(predicted_data))\n for i in range(len(predicted_data)):\n line_arr=predicted_data[i].strip().split(\"*\")\n test_y.append(validation_data[i].strip().split())\n pred_y.append(line_arr)\n# with open(\"target_captions.txt\",\"r\") as file:\n# target_labels=file.readlines()\n # print(target_labels)\nprint(test_y[:5])\nprint(pred_y[:5])\nprint(len(test_y))\nprint(len(pred_y))\n\ndef precision_score(test_y, pred_y, k=1):\n p_score = []\n \n for i in range(len(test_y)):\n # print(pred_y[i][-k:])\n # print(pred_y[i][-k])\n # result_at_topk = pred_y[i][-k:]\n count = 0\n # if(k>len(pred_y[i])):\n # print(pred_y[i])\n end=min(k,len(pred_y[i]))\n for j in range(0,end):\n if(pred_y[i][j] in test_y[i]):\n count+=1\n p_score.append(float(count) / float(k))\n # if j in test_y[i]:\n # count += 1\n # p_score.append(float(count) / float(k))\n\n return np.mean(p_score)\n\ndef recall_score(test_y, pred_y, k=1):\n r_score = []\n for i in range(len(test_y)):\n count = 0\n end=min(k,len(pred_y[i]))\n for j in range(0,end):\n if(pred_y[i][j] in test_y[i]):\n count+=1\n r_score.append(float(count) / float(len(test_y[i])))\n\n return np.mean(r_score)\n\ndef hits_score(test_y, pred_y, k=1):\n h_score = []\n for i in range(len(test_y)):\n # result_at_topk = pred_y[i][-k:]\n # count = 0\n # for j in result_at_topk:\n # if j in test_y[i]:\n # count += 1\n count = 0\n end=min(k,len(pred_y[i]))\n for j in range(0,end):\n if(pred_y[i][j] in test_y[i]):\n count+=1\n h_score.append(1 if count > 0 else 0)\n\n return np.mean(h_score)\n\nprecisions=[]\nrecalls=[]\nhits_rates=[]\nnum=5\nnames=[]\nfor i in range(num*3):\n if(i= 1, 'Se debe escoger un grupo con al menos un lider')\n\n lider = grupo.lideres.all()[0]\n grupo.lideres.clear()\n grupo.lideres.add(lider)\n\n datos = {'lider': lider.id}\n\n form = self.form(data=datos)\n\n self.assertTrue(form.is_valid())\n\n form.desvincular_lider()\n\n grupo = Grupo.objects.get(id=800)\n lider.refresh_from_db()\n\n self.assertEqual(grupo.estado, HistorialEstado.ARCHIVADO)\n self.assertEqual(grupo.lideres.count(), 0)\n self.prueba_estado_lider(lider)\n self.assertEqual(lider.grupo, None)\n\n def test_grupo_lideres_sigan_siendo_si_se_reemplaza_otro_lider(self):\n \"\"\"\n verifica que al momento de reemplazar un lider, el grupo, siga manteniendo los lideres con los cuales,\n no se hará ningún movimiento.\n \"\"\"\n\n miembro = self.miembro\n lider = self.grupo.lideres.first()\n lideres = self.grupo.lideres.exclude(id=lider.id)\n datos = {'lider': lider.id, 'nuevo_lider': miembro.id}\n form = self.form(data=datos)\n\n self.assertTrue(form.is_valid())\n\n form.desvincular_lider()\n\n lider.refresh_from_db()\n self.prueba_estado_lider(lider)\n\n self.assertIn(miembro, self.grupo.lideres.all())\n self.assertEqual(len(lideres), len(self.grupo.lideres.all()))\n\n def test_lider_desvinculado_si_no_lidera_grupo(self):\n \"\"\"\n Verifica que el miembro tambien sea desvinculado asi no lidere grupo.\n \"\"\"\n\n miembro = self.miembro\n\n form = self.form(data={'lider': miembro.id})\n\n self.assertTrue(form.is_valid())\n\n form.desvincular_lider()\n miembro.refresh_from_db()\n\n self.prueba_estado_lider(miembro)\n","sub_path":"miembros/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":5559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"238508457","text":"import os\nimport matplotlib\n#matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport math\n\nfrom skimage import io, transform, color\nfrom skimage.filters import threshold_otsu\nimport random\nimport numpy as np\nimport pickle\nimport keras\n\nfrom keras.models import Model\nfrom keras.models import load_model\nfrom keras.layers.core import Lambda\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"4\"\nfrom skimage.measure import compare_ssim as ssim\nfrom keras.utils import np_utils, plot_model\n\n\nheight, width = 128, 128\nw_hei, w_wid = 32, 32\n\nbatch_size = 64\ndataset_len = 10500 # total cover images\n\ntest_percentage = 0.1\ntest_len = int(dataset_len * test_percentage)\ntrain_len = dataset_len - test_len\n\n### data\nfrom keras.datasets import cifar10\n(wm_array, _), (_, _) = cifar10.load_data()\ndef get_wm_array(train=1):\n np.random.seed(1313)\n np.random.shuffle(wm_array)\n \n if train == 1:\n return wm_array[0:train_len,:,:,:]\n else:\n test_set = wm_array[train_len:,:,:,:]\n np.random.shuffle(test_set)\n return test_set\n\ndef get_file_names(path='/data/xin/workspace_x/wmnn/18/images', train=1):\n #n = [x for x in os.listdir(path)]\n file_name = os.listdir(path)\n random.seed(1313)\n random.shuffle(file_name)\n\n if train == 1:\n # train_set_names = \n return file_name[0:train_len]\n else:\n # test_set_names = \n return file_name[train_len:]\n\ndef get_batch(path='/data/xin/workspace_x/wmnn/18/images', train=1, batch_size=64):\n n = get_file_names(path, train)\n wn = get_wm_array(train)\n \n i_c, i_w = 0, 0\n while True:\n \n ### cover\n if i_c+batch_size >= len(n):\n i_c = 0\n random.shuffle(n)\n c = np.random.choice(n, batch_size)\n else:\n c = n[i_c:i_c+batch_size]\n i_c += batch_size\n \n img_batch = []\n for each_c in c:\n img_c = io.imread(os.path.join(path, each_c))\n img_c = transform.resize(img_c, (height, width, 3), mode='reflect')\n img_batch.append(img_c)\n img_batch = np.array(img_batch)\n img_batch = np.reshape(img_batch, [batch_size, height, width, 3])\n # print('cover:',img_batch.shape, img_batch.max(), img_batch.min())\n \n #------------------------------------------------------------------\n \n ### wm\n if i_w+batch_size >= wn.shape[0]:\n i_w = 0\n np.random.shuffle(wn)\n w = wn[np.random.randint(0,wn.shape[0], size=batch_size), :, :, :]\n else:\n w = wn[i_w:i_w+batch_size,:,:,:]\n i_w += batch_size\n \n w_batch = []\n for each_w in w:\n img_w = color.rgb2gray(each_w)\n img_w = (img_w > threshold_otsu(img_w)) / 1.\n w_batch.append(img_w)\n w_batch = np.array(w_batch)\n w_batch = np.reshape(w_batch, [batch_size, w_hei, w_wid, 1])\n yield (img_batch, w_batch)\n\n\n### layer / model\n\nfrom keras.layers import Input, Conv2D, concatenate, Dense, Dropout, add, MaxPooling2D, Flatten, BatchNormalization, GlobalAveragePooling2D, Reshape\n# GaussianNoise, GaussianDropout\nfrom keras.models import Model\nimport keras.backend as K\nfrom keras import optimizers\n# from keras.utils import multi_gpu_model\n\ndef conv_block(x, scale, prefix):\n\n d = K.int_shape(x)\n d = d[-1]\n\n filters = 32\n\n ### path #1\n p1 = Conv2D(int(filters * scale), kernel_size=(1, 1), strides=1, activation='relu', \\\n padding='same', name=prefix + 'path1_1x1_conv')(x)\n\n ### path #2\n p2 = Conv2D(int(filters * scale), kernel_size=(1, 1), strides=1, activation='relu', \\\n padding='same', name=prefix + 'path2_1x1_conv')(x)\n p2 = Conv2D(int(filters * scale), kernel_size=(3, 3), strides=1, activation='relu', \\\n padding='same', name=prefix + 'path2_3x3_conv')(p2)\n\n ### path #3\n p3 = Conv2D(int(filters * scale), kernel_size=(1, 1), strides=1, activation='relu', \\\n padding='same', name=prefix + 'path3_1x1_conv')(x)\n p3 = Conv2D(int(filters * scale), kernel_size=(3, 3), strides=1, activation='relu', \\\n padding='same', name=prefix + 'path3_3x3_conv1')(p3)\n p3 = Conv2D(int(filters * scale), kernel_size=(3, 3), strides=1, activation='relu', \\\n padding='same', name=prefix + 'path3_3x3_conv2')(p3)\n\n out = concatenate([p1, p2, p3], axis=-1, name=prefix + 'path_combine')\n return out\n\ndef wm_enc(x, scale, prefix):\n \n ### conv and upsample\n wm_2D = conv_block(x, scale, prefix=prefix + 'conv1_')\n wm_2D = Conv2D(24, kernel_size=(3, 3), strides=1, activation='relu', \\\n padding='same', name=prefix + 'up1')(wm_2D)\n wm_2D = conv_block(wm_2D, scale, prefix=prefix + 'conv2_')\n wm_2D = Conv2D(48, kernel_size=(3, 3), strides=1, activation='relu', \\\n padding='same', name=prefix + 'up2')(wm_2D)\n \n return wm_2D\n\ndef wm_dec(x, scale, prefix):\n\n ### conv and downsample\n m_ext = conv_block(x, scale, prefix=prefix+'conv1_')\n m_ext = Conv2D(24, kernel_size=(3, 3), strides=1, activation='relu', \\\n padding='same', name=prefix + 'pool1')(m_ext)\n m_ext = conv_block(m_ext, scale, prefix=prefix+'conv2_')\n m_ext = Conv2D(1, kernel_size=(3, 3), strides=1, activation='relu', \\\n padding='same', name=prefix + 'pool2')(m_ext)\n\n return m_ext\n\nimport tensorflow as tf\n\ndef SSIM_LOSS(y_true , y_pred):\n score=tf.reduce_mean(tf.image.ssim(y_true, y_pred, 2.0))\n return 1-score\n\ndef G(in_w = (w_hei, w_wid, 1), in_c = (height, width, 3), scale=1):\n\n C = Input(shape=in_c, name='C')\n\n W = Input(shape=in_w, name='W')\n w_code = wm_enc(W, scale, 'wm_enc_')\n w_code = Reshape(target_shape=(128,128,3), name='wm_code3')(w_code)\n #w_code = conv_block(w_code, scale, prefix='wm_code_3_encode_')\n #w_code = Conv2D(1, (3,3), name='En_W_2', padding='same')(w_code)\n\n G = concatenate([C,w_code], axis=-1)\n x = conv_block(G, scale=int(scale*2), prefix='em_en_1')\n\n\n M = Conv2D(3, kernel_size=(3, 3), padding='same', strides=1, activation='sigmoid', name='M')(x) #128 128 3\n\n G_model = Model(inputs=[C,W], outputs=M)\n G_model.compile(optimizer='adam', loss= SSIM_LOSS)\n\n print(\"===========================\")\n print(\"Model G:{C,W}->M\")\n G_model.summary()\n\n return G_model\n\ndef R(in_m = (height, width, 3), scale = 1): #128 128 3\n\n M = Input(shape = in_m, name='M')\n\n M1 = Reshape(target_shape=(32,32,48), name='wm_code1_reshapeb')(M)\n\n W_prime = wm_dec(M1, scale, 'm1_dec_')\n\n R_model = Model(inputs=M, outputs=W_prime)\n R_model.compile(optimizer='adam', loss = 'binary_crossentropy')\n \n print(\"===========================\")\n print(\"Model R:M->W_prime\")\n R_model.summary()\n\n return R_model\n\ndef D1(input_shape=(height, width, 3)):\n x = Input(shape = input_shape, name='D1_shapes')\n\n x1 = Conv2D(16, (3,3), name='D1_conv1', activation='relu', padding='same')(x)\n x1 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(x1)\n #x1 = BatchNormalization()(x1)\n x2 = Conv2D(32, (3,3), name='D1_conv2', activation='relu', padding='same')(x1)\n x2 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(x2)\n #x2 = BatchNormalization()(x2)\n x3 = Conv2D(64, (3,3), name='D1_conv3', activation='relu', padding='same')(x2)\n x3 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(x3)\n #x3 = BatchNormalization()(x3)\n\n #x4 = Flatten()(x3)\n x3 = BatchNormalization()(x3)\n x4 = GlobalAveragePooling2D()(x3)\n\n x5 = Dense(units=512, activation='relu')(x4)\n x6 = Dense(units=256, activation='relu')(x5)\n \n \n\n output = Dense(units=1, activation='sigmoid')(x6)\n\n model = Model(inputs=x, outputs=output)\n #adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n #model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])\n \n print(\"===========================\")\n print(\"Model D1:Image->real?\")\n model.summary()\n\n return model\n\ndef D2(M_shape=(height, width, 3), C_shape=(height, width, 3), W_shape=(w_hei, w_wid, 1)):\n \n scale = 1\n I1 = Input(shape = M_shape, name='D2_M_shape')\n I2 = Input(shape = C_shape, name='D2_C_shape')\n I3 = Input(shape = W_shape, name='D2_W_shape')\n\n x = concatenate([I1, I2], axis=-1)\n w_code = wm_enc(I3, scale , 'w_enc_')\n w_code = Reshape(target_shape=(128,128,3), name='wm_code')(w_code)\n #w_code = Conv2D(1, (3,3), name='D1_conv1', activation='relu', padding='same')(w_code)\n x = concatenate([x, w_code], axis=-1)\n\n x1 = Conv2D(16, (3,3), name='D2_conv1', activation='relu', padding='same')(x)\n x1 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(x1)\n #x1 = BatchNormalization()(x1)\n x2 = Conv2D(32, (3,3), name='D2_conv2', activation='relu', padding='same')(x1)\n x2 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(x2)\n #x2 = BatchNormalization()(x2)\n x3 = Conv2D(64, (3,3), name='D2_conv3', activation='relu', padding='same')(x2)\n x3 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(x3)\n #x3 = BatchNormalization()(x3)\n\n #x4 = Flatten()(x3)\n x3 = BatchNormalization()(x3)\n x4 = GlobalAveragePooling2D()(x3)\n\n x5 = Dense(units=512, activation='relu')(x4)\n x6 = Dense(units=256, activation='relu')(x5)\n #x6 = BatchNormalization()(x6)\n\n output = Dense(units=1, activation='sigmoid')(x6)\n\n model = Model(inputs=[I1, I2, I3], outputs=output)\n #adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n #model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])\n \n print(\"===========================\")\n print(\"Model D2:three images->group?\")\n model.summary()\n\n return model\n\n\ndef shuffling(x):\n idxs = K.arange(0, K.shape(x)[0])\n idxs = K.tf.random_shuffle(idxs)\n return K.gather(x, idxs)\n\ndef stegoGAN(in_w=(w_hei, w_wid, 1), in_c=(height, width, 3)):\n G_model = G()\n R_model = R()\n D1_model = D1()\n D2_model = D2()\n C = Input(shape=in_c, name='C')\n W = Input(shape=in_w, name='W')\n M = G_model([C, W])\n\n ## models for traning\n # a. G connected to R\n W_prime = R_model(M)\n GR_model = Model(inputs=[C, W], outputs=[M, W_prime])\n #GR_model.compile(optimizer='adam', \\\n # loss=[SSIM_LOSS, 'binary_crossentropy'], \\\n # loss_weights=[1., 1.]\n # )\n \n ssim_loss = SSIM_LOSS(C, M)\n w_loss = K.mean(K.binary_crossentropy(W,W_prime)) \n gr_loss = ssim_loss + w_loss\n GR_model.add_loss(gr_loss)\n GR_model.compile(optimizer='adam')\n \n print(\"===========================\")\n print(\"Model GR:CW->M->W_prime\")\n GR_model.summary()\n\n # b. G connected to D1\n score1_M = D1_model(M)\n score1_C = D1_model(C)\n d1_loss = - K.mean(K.log(score1_C + 1e-6) + K.log(1 - score1_M + 1e-6))\n #d1_loss = - K.sum(K.log(score1_C + 1e-6) + K.log(1 - score1_M + 1e-6))\n\n\n GD1_model = Model(inputs=[C, W], outputs=[score1_M,score1_C])\n GD1_model.add_loss(d1_loss)\n GD1_model.compile(optimizer='adam')\n\n print(\"===========================\")\n print(\"Model GD1:CW->M->D1\")\n GD1_model.summary()\n\n # c. G connected to D2\n C_shuffle = Lambda(shuffling)(C)\n W_shuffle = Lambda(shuffling)(W)\n\n score2_t = D2_model([M, C, W])\n score2_f = D2_model([M, C_shuffle, W_shuffle])\n d2_loss = - K.mean(K.log(score2_t + 1e-6) + K.log(1 - score2_f + 1e-6))\n #d2_loss = - K.sum(K.log(score2_t + 1e-6) + K.log(1 - score2_f + 1e-6))\n\n GD2_model = Model(inputs=[C, W], outputs=[score2_t, score2_f])\n GD2_model.add_loss(d2_loss)\n GD2_model.compile(optimizer='adam')\n\n print(\"===========================\")\n print(\"Model GD2:CW->M, MCW->D2\")\n GD2_model.summary()\n\n return GR_model, GD1_model, GD2_model, G_model, R_model\n\ndef train(epochs=100):\n\n # model\n GR_model, GD1_model, GD2_model, G_model, R_model = stegoGAN()\n\n # data\n itr = get_batch(batch_size = batch_size, train = 1)\n\n # train\n history = []\n steps = int(dataset_len / batch_size)\n for epoch in range(epochs):\n for step in range(steps):\n C, W = itr.__next__()\n \n GR_loss = GR_model.train_on_batch([C,W], [])\n GD1_loss = GD1_model.train_on_batch([C,W], [])\n GD2_loss = GD2_model.train_on_batch([C,W], [])\n if step%50 == 0:\n print('Step:', step, 'GR_loss:', GR_loss, 'GD1_loss:', GD1_loss, 'GD2_loss:', GD2_loss)\n print('============================================================================')\n print('Epoch:', epoch, 'GR_loss:', GR_loss, 'GD1_loss:', GD1_loss, 'GD2_loss:', GD2_loss)\n print('============================================================================')\n history.append([GR_loss,GD1_loss,GD2_loss])\n G_model.save('/home/CVL1/Shaobo/StegoGAN/0_G.h5')\n R_model.save('/home/CVL1/Shaobo/StegoGAN/1_R.h5')\n #GR_model.save('/home/CVL1/Shaobo/StegoGAN/GR.h5')\n \n\n #with open('train_history/history_0_whole.pkl', 'wb') as file_pi:\n #pickle.dump(history.history, file_pi)\n\n\nimport smtplib\nfrom email.mime.text import MIMEText\n\nserver = \"smtp.gmail.com:587\"\nuser_account = \"johnbrown20033@gmail.com\"\npassword = \"ko963852\"\nmailto_list = [\"90liushaobo@gmail.com\"]\n\ndef send_mail(to_list, sub, content):\n me = \"python smtp alert \" + \"\"\n msg = MIMEText(content)\n msg['Subject'] = sub\n msg['From'] = me\n msg['To'] = \";\".join(mailto_list)\n try:\n s = smtplib.SMTP(server)\n s.starttls()\n s.login(user_account, password)\n s.sendmail(me, to_list, msg.as_string())\n s.close()\n return True\n except Exception as e:\n print(str(e))\n return False\n\n # pickle the history\n\n\nif __name__ == \"__main__\":\n print(\"===============\")\n \n \n train(epochs=5)\n if send_mail(mailto_list, \"Training1.py on GPUstation finished\", \"training1.py\"): \n print(\"====notification sent.====\")\n \n \n \n \n \n \n \n","sub_path":"training1.py","file_name":"training1.py","file_ext":"py","file_size_in_byte":14200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"534206517","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nExperiments on Continuization (not yet published)\n\nTODO: merge this file with arch19.py as soon as everything is supported by continuization\n\"\"\"\n\nimport os\nimport shutil\nimport logging\nimport pickle\nimport sys\nimport numpy as np\nimport math\nfrom qronos.reachability.hybrid_sys import HybridSysControlLoop\nfrom qronos.util.latex_table import generate_table, format_float_ceil\nfrom copy import deepcopy\nimport qronos.examples\n\ndef output_dir():\n return os.path.dirname(os.path.realpath(__file__)) + \"/output_continuization/\"\n\nfrom qronos.controlloop import DigitalControlLoop\n\ndef example_C3_discrete_PI():\n '''\n example_C_quadrotor_attitude_one_axis(), but without the one-step delay from y to u\n '''\n system = qronos.examples.example_C_quadrotor_attitude_one_axis()\n system.enable_immediate_ctrl()\n return system\n\ndef example_C4_with_continuous_P_and_discrete_I():\n '''\n academic example based on example_C_quadrotor_attitude_one_axis:\n\n P-controller is (unsoundly) merged into the continuous system.\n Only the I-controller is discrete.\n '''\n\n system=DigitalControlLoop()\n Jx=9.0359e-06\n K_control_integral = 3.6144e-3 # K_I,p\n K_control_proportional = 2.5557e-4 # K_f,p\n system.A_p = np.array([[-K_control_proportional/Jx]])\n system.B_p = np.array([[1/Jx]])\n\n system.T=0.003\n system.spaceex_time_horizon_periods = math.ceil(0.5/system.T)\n\n system.C_p = np.array([[1]])\n\n system.x_p_0_min = np.array([1]) * -1\n system.x_p_0_max = np.array([1]) * 1\n\n system.A_d = np.array([[1]])\n system.B_d = np.array([[system.T]])\n\n system.C_d = np.array([[-K_control_integral]])\n\n max_timing = 0.0\n system.delta_t_u_min=np.array([1]) * -max_timing * system.T\n system.delta_t_u_max=-system.delta_t_u_min\n system.delta_t_y_min=system.delta_t_u_min\n system.delta_t_y_max=-system.delta_t_y_min\n\n system.spaceex_iterations = 100\n system.spaceex_iterations_for_global_time = system.spaceex_time_horizon_periods\n system.spaceex_timeout = 0.0001* 3600 * 10\n system.enable_immediate_ctrl()\n system.plot_ylim_xp = [[-1.5, 1.5]]\n system.plot_ylim_xd = [[-0.035, 0.035]]\n return system\n\ndef example_C5_with_lowpass_PI():\n '''\n modified version of example_C_quadrotor_attitude_one_axis\n\n discrete PI controller, P channel is lowpass filtered\n '''\n system=DigitalControlLoop()\n Jx=9.0359e-06\n K_control_integral = 3.6144e-3 # K_I,p\n K_control_proportional = 2.5557e-4 # K_f,p\n system.A_p = np.array([[0]])\n system.B_p = np.array([[1/Jx]])\n\n # the original model is continuous. We consider a sampled version of the controller.\n # All following parameters are not from the original example.\n system.T=0.001\n system.spaceex_time_horizon_periods = math.ceil(0.5 / system.T)\n\n system.C_p = np.array([[1]])\n\n system.x_p_0_min = np.array([1]) * -1\n system.x_p_0_max = np.array([1]) * 1\n\n # We use the following controller discretization::\n # x_d_1: forward-euler approximation of integrator\n # x_d_2: lowpass for P controller\n T_lowpass = 0.01 # continuous-time equivalent time constant of lowpass\n lowpass_alpha = np.exp(-system.T / T_lowpass) # discretization\n system.A_d = np.asarray(np.diag([1, lowpass_alpha]))\n system.B_d = np.array([[system.T], [1 - lowpass_alpha]])\n\n system.C_d = np.array([[-K_control_integral, -K_control_proportional]])\n\n max_timing = 0.0\n system.delta_t_u_min=np.array([1]) * -max_timing * system.T\n system.delta_t_u_max=-system.delta_t_u_min\n system.delta_t_y_min=system.delta_t_u_min\n system.delta_t_y_max=-system.delta_t_y_min\n\n # hits timeout even for 10h :-(\n system.spaceex_iterations_for_global_time = system.spaceex_time_horizon_periods\n system.spaceex_timeout = 3600 * 10\n system.enable_immediate_ctrl()\n system.plot_ylim_xp = [[-1.5, 1.5]]\n system.plot_ylim_xd = [[-0.035, 0.035], [-1, 1]]\n return system\n\ndef main(argv):\n if \"--help\" in argv:\n # print(\"--load: load saved previous results (only runs the code for formatting the result table, skips the time-extensive actual analysis)\")\n print(\"--fast: only run a few experiments and not all, with very short timeout, for a quick test of the toolchain\")\n sys.exit()\n if os.path.exists(output_dir()) and not \"--load\" in argv:\n shutil.rmtree(output_dir())\n os.makedirs(output_dir())\n\n # Files are denoted with a unique prefix (e.g. A1) to simplify referencing them in publications\n systems={}\n systems['C4']=example_C4_with_continuous_P_and_discrete_I()\n if not \"--fast\" in sys.argv:\n systems['C5']=example_C5_with_lowpass_PI()\n systems['C3']=example_C3_discrete_PI()\n systems['C3'].spaceex_timeout = 360 # we just want to check if continuization would work, so 10min per iteration step must be enough. The analysis fails at a point where the reachable set of x_p has already grown to 3x the actual reachable set, so aborting then is fine.\n systems['C3_T_0.001']=example_C3_discrete_PI()\n systems['C3_T_0.001'].T = 0.001\n systems['C3_T_0.001'].spaceex_timeout = systems['C3'].spaceex_timeout # see above\n\n\n for key in systems:\n systems[key] = HybridSysControlLoop(systems[key])\n\n # If system names (the keys of systems[]) are given on the command line, process only these.\n # NOTE: invalid names will be ignored.\n requested_system_names = set(systems.keys()).intersection(set(argv))\n if requested_system_names:\n print(\"Example names were given on the command line. Only processing these: {}\".format(\", \".join(requested_system_names)))\n systems = {name: system for (name, system) in systems.items() if name in requested_system_names}\n\n if \"--load\" in argv:\n raise NotImplementedError(\"--load not yet supported. TODO: implement pickle functionality for mpmath intervals and matrices.\")\n # Load results from file\n # TODO re-enable pickling below\n with open(output_dir() + \"systems.pickle\", \"rb\") as f:\n systems=pickle.load(f)\n else:\n # Save systems to files, run analysis and simulation\n tmp = systems\n systems = {}\n for (name, system) in tmp.items():\n for suffix in [\"_continuized\", \"_orig\"]:\n systems[name + suffix] = deepcopy(system)\n systems[name + suffix].name = name + suffix\n\n for (name, system) in sorted(systems.items()):\n try:\n if name.endswith(\"_continuized\"):\n system.run_analysis_continuized(name, output_dir())\n else:\n system.run_analysis(name, output_dir())\n except Exception:\n logging.error(\"Failed to process system {}\".format(name))\n raise\n with open(output_dir() + \"systems.pickle\", \"wb\") as f:\n # pickling disabled for now\n # pickle.dump(systems, f)\n pass\n\n\n\n # Generate LaTeX table\n print(\"producing LaTeX table\")\n def format_spaceex_columns(system):\n def format_spaceex_result(stability, time):\n if stability == \"stable\":\n return r\"\\checkmark\"\n elif stability==\"N/A\":\n return \"---\"\n else:\n return r\"$\\times$ \" + stability\n def format_spaceex_runtime(stability, time):\n if stability.startswith(\"crash\") or stability==\"N/A\" or stability.startswith(\"diverging\") or stability.startswith(\"error\"):\n return \"---\"\n if stability.startswith(\"timeout\") and time >= 7200:\n return \"---\"\n return \"{:.0f}\\,s\".format(time)\n stability = system.results.get('stability_spaceex', \"NOT RUN\")\n time = system.results.get('spaceex_hypy', {}).get('time', -1)\n return {'result': format_spaceex_result(stability, time),\n 'runtime': format_spaceex_runtime(stability, time)}\n\n for (name, system) in sorted(systems.items()):\n system.name = name\n # [ ('column name', 'alignment', lambda system: generate_column_from_system(system)), ... ]\n columns = [ ('name', 'l|', lambda s: s.name),\n ('Continuization', 'l|', lambda s: s.results.get('continuization', '')),\n (r'$n\\idxPlant$', 'c', lambda s: s.s.n_p),\n (r'$n\\idxDiscrete$', 'c', lambda s: s.s.n_d),\n ('$m$', 'c', lambda s: s.s.m),\n ('$p$', 'c', lambda s: s.s.p),\n ('timing', 'l|', lambda s: 'constant' if s.s.is_fixed_timing() else 'varying'),\n ('$T$', 'l|', lambda s: str(s.s.T)),\n ('SpaceEx', 'l', lambda s: format_spaceex_columns(s)['result']),\n (r'$t_{\\mathrm{SE}}$', 'r', lambda s: format_spaceex_columns(s)['runtime']),\n (r'$K_{\\mathrm{SE}}$', 'r|', lambda s: format_float_ceil(s.results['k'], digits=3) if 'k' in s.results else '---'),\n # TODO implement LTI stability check for this case\n # ('LTI-stability', 'l', lambda s: s.results['stability_eigenvalues'].replace(\"N/A\",\"---\"))\n ]\n\n\n\n table = generate_table(columns, sorted(iter(systems.values()), key = lambda s: s.name.split(\"/\")[-1]))\n print(table)\n with open(output_dir() + \"results.tex\", \"w\") as f:\n f.write(table)\n\n if \"--fast\" in argv:\n print(\"CAUTION: The script was run with --fast, which means that the results are imprecise and/or useless. Use this ONLY for testing the code, NEVER for publication-ready results.\")\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","sub_path":"src/qronos/reachability/experiments/continuization.py","file_name":"continuization.py","file_ext":"py","file_size_in_byte":9651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"246052107","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\packages\\codereload\\win32api\\pump.py\nimport ctypes\nfrom ctypes.wintypes import DWORD, HWND, LPARAM, POINT, UINT, WPARAM\nPM_REMOVE = 1\nWM_QUIT = 18\n\nclass MSG(ctypes.Structure):\n _fields_ = [('hwnd', HWND), ('message', UINT), ('wParam', WPARAM), ('lParam', LPARAM), ('time', DWORD), ('pt', POINT)]\n\n\ndef PumpWindowsMessages():\n msg = MSG()\n while ctypes.windll.user32.PeekMessageW(ctypes.byref(msg), HWND(), 0, 0, PM_REMOVE):\n if msg.message == WM_QUIT:\n return False\n ctypes.windll.user32.TranslateMessage(ctypes.byref(msg))\n ctypes.windll.user32.DispatchMessageW(ctypes.byref(msg))\n\n return True","sub_path":"client/codereload/win32api/pump.py","file_name":"pump.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"480251246","text":"import shutil\nimport time\n\n\nfrom watchdog.events import PatternMatchingEventHandler\nfrom watchdog.observers import Observer\n\n\nclass FolderHandler(PatternMatchingEventHandler):\n\n def __init__(self, target_folder, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.target_folder = target_folder\n\n def on_created(self, event):\n # to prevent PermissionError: [Errno 13] Permission denied:\n time.sleep(1)\n\n # move source file to target folder\n shutil.move(event.src_path, self.target_folder)\n\n\nif __name__ == '__main__':\n # source_folder: watching folder, target_folder: move file to folder.\n folders = {\n 'foo': {\n 'source_folder': './foo',\n 'target_folder': './home'\n },\n 'bar': {\n 'source_folder': './bar',\n 'target_folder': './home'\n }\n }\n\n observer = Observer()\n\n for _, folder in folders.items():\n # patterns: txt file only\n event_handler = FolderHandler(target_folder=folder['target_folder'], patterns=[\n '*.txt'], ignore_directories=True, case_sensitive=False)\n observer.schedule(\n event_handler, folder['source_folder'], recursive=True)\n\n observer.start()\n\n try:\n while True:\n time.sleep(1)\n\n except KeyboardInterrupt:\n observer.stop()\n\n observer.join()\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"192386463","text":"import os\nimport webapp2\nimport jinja2\nimport hashlib\nimport hmac\n\nSECRET = 'imsosecret'\n\nfrom google.appengine.ext import db\n\ntemplate_dir = os.path.join(os.path.dirname(__file__),'templates')\njinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),\n\t\t\t\t\t\t\t\tautoescape=True)\n\n\ndef hash_str(s):\n\treturn hmac.new(SECRET,s).hexdigest()\n\ndef make_secure_val(s):\n\treturn '%s|%s' % (s, hash_str(s))\n\ndef check_secure_val(h):\n\tval = h.split('|')[0]\n\tif h == make_secure_val(val):\n\t\treturn val\n\telse:\n\t\treturn None\n\n\n\nclass Handler(webapp2.RequestHandler):\n\tdef write(self, *a, **kw):\n\t\tself.response.out.write(*a, **kw)\n\n\tdef render_str(self,template, **params):\n\t\tt = jinja_env.get_template(template)\n\t\treturn t.render(params)\n\n\tdef render(self, template, **kw):\n\t\tself.write(self.render_str(template, **kw))\n\nclass MainPage(Handler):\n\n\tdef get(self):\n\t\tself.response.headers['Content-Type'] = 'text/plain'\n\t\tvisits = 0\n\t\t# trying our new variables for adding new headers 'user' & 'password'\n\t\tuser=''\n\t\tvisit_cookie_str = self.request.cookies.get('visits')\n\t\ta_user = self.request.cookies.get('user')\n\t\tyour_name = 'Caesar'\n\t\tyour_password = 'aliens'\n\n\t\tif visit_cookie_str:\n\t\t\tcookie_val = check_secure_val(visit_cookie_str)\n\t\t\tif cookie_val:\n\t\t\t\tvisits = int(cookie_val)\n\n\t\tvisits += 1\n\t\tnew_cookie_val = make_secure_val(str(visits))\n\n\t\tself.response.headers.add_header('Set-Cookie', 'visits=%s' %new_cookie_val)\n\t\tself.response.headers.add_header('Set-Cookie', 'user=%s'%your_name)\n\t\tself.response.headers.add_header('Set-Cookie', 'password=%s'%your_password)\n\n\t\tif visits > 100:\n\t\t\tself.write(\"You are the best ever!\")\n\t\telse:\n\t\t\tself.write(\"%s, you've been here %s times\" % (str(a_user),str(visits)))\n\n\napp = webapp2.WSGIApplication([('/', MainPage)], debug=True)\n\n\n","sub_path":"lesson4/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"583621158","text":"# Loads a target data then defines tables for it\nspark.read \\\n .option(\"header\", True) \\\n .csv(\"./testdata/adult.csv\") \\\n .write \\\n .saveAsTable(\"adult\")\n\nscavenger.misc() \\\n .setDbName(\"default\") \\\n .setTableName(\"adult\") \\\n .setRowId(\"tid\") \\\n .flatten() \\\n .write \\\n .saveAsTable(\"adult_flatten\")\n\nspark.table(\"adult\").show(1)\nspark.table(\"adult_flatten\").show(1)\n\n# Loads a ground truth data then defines tables for it\nspark.read \\\n .option(\"header\", True) \\\n .csv(\"./testdata/adult_clean.csv\") \\\n .write \\\n .saveAsTable(\"adult_clean\")\n\nspark.table(\"adult_flatten\") \\\n .join(spark.table(\"adult_clean\"), [\"tid\", \"attribute\"], \"inner\") \\\n .where(\"not(value <=> correct_val)\") \\\n .write \\\n .saveAsTable(\"error_cells_ground_truth\")\n\nspark.table(\"adult_clean\").show(1)\nspark.table(\"error_cells_ground_truth\").show(1)\n\n# Detects error cells then repairs them\nrepaired_df = scavenger.repair() \\\n .setDbName(\"default\") \\\n .setTableName(\"adult\") \\\n .setRowId(\"tid\") \\\n .setConstraints(\"./testdata/adult_constraints.txt\") \\\n .run()\n\n# Computes performance numbers (precision & recall)\n# - Precision: the fraction of correct repairs, i.e., repairs that match\n# the ground truth, over the total number of repairs performed\n# - Recall: correct repairs over the total number of errors\npdf = repaired_df.join(spark.table(\"adult_clean\"), [\"tid\", \"attribute\"], \"inner\")\nrdf = repaired_df.join(spark.table(\"error_cells_ground_truth\"), [\"tid\", \"attribute\"], \"right_outer\")\n\nprecision = pdf.where(\"repaired <=> correct_val\").count() / pdf.count()\nrecall = rdf.where(\"repaired <=> correct_val\").count() / rdf.count()\nf1 = (2.0 * precision * recall) / (precision + recall)\n\nprint(\"Precision=%s Recall=%s F1=%s\" % (precision, recall, f1))\n\n","sub_path":"resources/examples/adult.py","file_name":"adult.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"77815804","text":"import platform\nimport pandas as pd\nimport json, csv\nimport pickle\nfrom datetime import datetime\nfrom operator import itemgetter\nfrom pprint import pprint\nimport importlib\nimport itertools\nfrom copy import deepcopy\nimport numpy as np\nimport math, time, collections, os, errno, sys, code, random\nimport matplotlib\n#matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom sklearn import mixture\nfrom sklearn.cluster import KMeans\nfrom sklearn.preprocessing import StandardScaler\nfrom multiprocessing import Pool\nimport shutil\nimport configparser\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom scipy import spatial\n\n\n\nclass RCAE2E:\n \n def __init__(self, \\\n \\\n data_folder_path, \\\n tool_id, \\\n normalize, \\\n sample, \\\n load_data_mode, \\\n \\\n r_w, \\\n t_w, \\\n K, \\\n _lambda, \\\n beta, \\\n alpha, \\\n maxIters, \\\n TICC_GTC_convergence_threshold, \\\n num_proc, \\\n output_path, \\\n \\\n c, \\\n tau, \\\n RCA_CTC_convergence_threshold, \\\n \\\n significant_difference_threshold):\n \n self.data_folder_path = data_folder_path\n self.tool_id = tool_id\n self.normalize = normalize\n self.sample = sample\n self.load_data_mode = load_data_mode\n \n self.r_w = r_w\n self.t_w = t_w\n self.K = K\n self._lambda = _lambda\n self.beta = beta\n self.alpha = alpha\n self.maxIters = maxIters\n self.TICC_GTC_convergence_threshold = TICC_GTC_convergence_threshold\n self.num_proc = num_proc\n self.output_path = output_path\n \n self.TICC_GTC_mode = None\n self.r = None\n self.t = None\n self.T = None\n \n self.c = c\n self.tau = tau\n self.RCA_CTC_convergence_threshold = RCA_CTC_convergence_threshold\n \n self.significant_difference_threshold = significant_difference_threshold\n \n \n \n def fit(self):\n \n print(\"##########################################################################################################\")\n print(\"##########################################################################################################\")\n print(\"@@@@@@@@@@@@ RCAE2E start! @@@@@@@@@@@@\")\n \n \n self.r = 2\n \n ### save as pickle\n \n with open(os.path.join(data_folder_path, \"test\", \"anomalous_run_data_MRF\" + \".pickle\"), 'rb') as file:\n anomalous_run_data_MRF = pickle.load(file)\n \n with open(os.path.join(data_folder_path, \"test\", \"ground_truth_run_data_MRF\" + \".pickle\"), 'rb') as file:\n ground_truth_run_data_MRF = pickle.load(file)\n \n \n ### RCA_CTC\n \n causal_anomaly_score = self.RCA_CTC(anomalous_run_data_MRF, ground_truth_run_data_MRF)\n \n ### save as pickle\n \n with open(os.path.join(data_folder_path, \"test\", \"causal_anomaly_score\" + \".pickle\"), 'wb') as file:\n pickle.dump(causal_anomaly_score, file)\n \n print(\"@@@@@@@@@@@@ RCAE2E end! @@@@@@@@@@@@\")\n print(\"##########################################################################################################\")\n print(\"##########################################################################################################\")\n \n \n \n def load_data(self):\n \n load_data_module = importlib.import_module(\"3-load_data\")\n load_data_class = getattr(load_data_module, \"load_data\")\n load_data_instance = load_data_class(data_folder_path = self.data_folder_path, \\\n tool_id = self.tool_id, \\\n normalize = self.normalize, \\\n sample = self.sample, \\\n load_data_mode = self.load_data_mode)\n data = load_data_instance()\n \n return data\n \n \n \n def TICC_GTC(self, data):\n \n TICC_GTC_module = importlib.import_module(\"5-TICC_GTC\")\n TICC_GTC_class = getattr(TICC_GTC_module, \"TICC_GTC\")\n TICC_GTC_instance = TICC_GTC_class(r_w = self.r_w, \\\n t_w = self.t_w, \\\n K = self.K, \\\n _lambda = self._lambda, \\\n beta = self.beta, \\\n alpha = self.alpha, \\\n maxIters = self.maxIters, \\\n TICC_GTC_convergence_threshold = self.TICC_GTC_convergence_threshold,\\\n num_proc = self.num_proc, \\\n output_path = self.output_path, \\\n \\\n TICC_GTC_mode = self.TICC_GTC_mode, \\\n r = self.r, \\\n t = self.t, \\\n T = self.T)\n \n return TICC_GTC_instance.fit(data = data)\n \n \n \n def compare_two_profiles(self, old_profile, new_profile):\n \n method = \"test\"\n \n if method == 0 or method == \"test\":\n \n difference_proportion_mean = []\n for idx in range( len(old_profile[\"cluster_assignment\"]) ):\n old_MRF = np.asarray(old_profile[\"cluster_MRFs\"][ old_profile[\"cluster_assignment\"][idx] ])\n new_MRF = np.asarray(new_profile[\"cluster_MRFs\"][ new_profile[\"cluster_assignment\"][idx] ])\n difference = np.abs(old_MRF - new_MRF)\n #print(\"difference: \" + str(difference))\n _sum = np.sum([old_MRF, new_MRF], axis = 0)\n #print(\"_sum: \" + str(_sum))\n \n difference_proportion = []\n for i in range(len(old_MRF)):\n difference_proportion.append([])\n for j in range(len(old_MRF[0])):\n if _sum[i][j] != 0:\n difference_proportion[i].append(difference[i][j] / _sum[i][j])\n else:\n difference_proportion[i].append(0)\n difference_proportion = np.asarray(difference_proportion)\n \n #difference_proportion = np.divide(difference, _sum, where = _sum != 0)\n #print(\"difference_proportion: \" + str(difference_proportion))\n difference_proportion_mean.append(np.mean(difference_proportion))\n ### drop all nan (ya why not)\n #print(\"difference_proportion_mean: \" + str(difference_proportion_mean))\n #print(~np.isnan(difference_proportion_mean))\n #difference_proportion_mean = difference_proportion_mean[~np.isnan(difference_proportion_mean)]\n difference_proportion_mean_all_MRFs = np.mean(difference_proportion_mean)\n if method != \"test\":\n print(\"difference_proportion_mean_all_MRFs: \" + str(difference_proportion_mean_all_MRFs))\n \n profiles_difference = difference_proportion_mean_all_MRFs\n profiles_difference_method_0 = profiles_difference\n \n \n if method == 1 or method == \"test\":\n \n difference_proportion = []\n for idx in range( len(old_profile[\"cluster_assignment\"]) ):\n # for idx in range(1):\n old_MRF = np.asarray(old_profile[\"cluster_MRFs\"][ old_profile[\"cluster_assignment\"][idx] ])\n new_MRF = np.asarray(new_profile[\"cluster_MRFs\"][ new_profile[\"cluster_assignment\"][idx] ])\n #print(\"old_MRF: \" + str(old_MRF))\n #print(\"new_MRF: \" + str(new_MRF))\n difference = np.abs(old_MRF - new_MRF)\n difference = np.sum(difference)\n #print(\"difference: \" + str(difference))\n _sum = np.sum(old_MRF) + np.sum(new_MRF)\n #print(\"_sum: \" + str(_sum))\n \n difference_proportion.append(difference / _sum)\n \n #print(\"difference_proportion: \" + str(difference_proportion))\n\n difference_proportion_mean = np.mean(difference_proportion)\n if method != \"test\":\n print(\"difference_proportion_mean: \" + str(difference_proportion_mean))\n \n profiles_difference = difference_proportion_mean\n profiles_difference_method_1 = profiles_difference\n \n \n if method == 2 or method == \"test\":\n \n cosine_distance = []\n for idx in range( len(old_profile[\"cluster_assignment\"]) ):\n # for idx in range(1):\n old_MRF = old_profile[\"cluster_MRFs\"][ old_profile[\"cluster_assignment\"][idx] ]\n new_MRF = new_profile[\"cluster_MRFs\"][ new_profile[\"cluster_assignment\"][idx] ]\n cosine_distance.append(spatial.distance.cosine( list(itertools.chain(*old_MRF)), list(itertools.chain(*new_MRF)) ))\n \n #print(\"difference_proportion: \" + str(difference_proportion))\n\n cosine_distance_mean = np.mean(cosine_distance)\n if method != \"test\":\n print(\"cosine_distance_mean: \" + str(cosine_distance_mean))\n \n profiles_difference = cosine_distance_mean\n profiles_difference_method_2 = profiles_difference\n \n \n if method == \"test\":\n print(\"####################################################################\")\n print(\"profiles_difference_method_0: \" + str(profiles_difference_method_0))\n print(\"profiles_difference_method_1: \" + str(profiles_difference_method_1))\n print(\"profiles_difference_method_2: \" + str(profiles_difference_method_2))\n print(\"####################################################################\")\n \n \n if profiles_difference >= self.significant_difference_threshold:\n significant_difference = True\n else:\n significant_difference = False\n \n return significant_difference\n \n \n \n def RCA_CTC(self, anomalous_run_data_MRF, ground_truth_run_data_MRF):\n \n RCA_CTC_module = importlib.import_module(\"6-RCA_CTC\")\n RCA_CTC_class = getattr(RCA_CTC_module, \"RCA_CTC\")\n RCA_CTC_instance = RCA_CTC_class(c = self.c, \\\n tau = self.tau, \\\n RCA_CTC_convergence_threshold = self.RCA_CTC_convergence_threshold, \\\n \\\n t_w = self.t_w, \\\n r = self.r)\n \n s = RCA_CTC_instance.fit(anomalous_run_data_MRF = anomalous_run_data_MRF, \\\n ground_truth_run_data_MRF = ground_truth_run_data_MRF)\n \n return s\n \n \n \nif __name__ == \"__main__\":\n \n ### get parameters form parameters.ini\n \n config = configparser.ConfigParser()\n config.read(\"parameters.ini\")\n \n \n ### parameters for load data\n \n data_folder_path = eval(config.get(\"load_data\", \"data_folder_path\"))\n tool_id = config.get(\"load_data\", \"tool_id\")\n normalize = config.getboolean(\"load_data\", \"normalize\")\n sample = config.getint(\"load_data\", \"sample\")\n load_data_mode = config.getint(\"load_data\", \"load_data_mode\")\n \n \n ### parameters for TICC_GTC\n \n r_w = config.getint(\"TICC_GTC\", \"r_w\")\n t_w = config.getint(\"TICC_GTC\", \"t_w\")\n K = config.getint(\"TICC_GTC\", \"K\")\n _lambda = config.getfloat(\"TICC_GTC\", \"_lambda\")\n beta = config.getfloat(\"TICC_GTC\", \"beta\")\n alpha = config.getfloat(\"TICC_GTC\", \"alpha\")\n maxIters = config.getint(\"TICC_GTC\", \"maxIters\")\n TICC_GTC_convergence_threshold = config.getfloat(\"TICC_GTC\", \"TICC_GTC_convergence_threshold\")\n num_proc = config.getint(\"TICC_GTC\", \"num_proc\")\n output_path = eval(config.get(\"TICC_GTC\", \"output_path\"))\n \n \n ### parameters for RCA_CTC\n \n c = config.getfloat(\"RCA_CTC\", \"c\")\n tau = config.getfloat(\"RCA_CTC\", \"tau\")\n RCA_CTC_convergence_threshold = config.getfloat(\"RCA_CTC\", \"RCA_CTC_convergence_threshold\")\n \n \n ### parameters for RCAE2E\n \n significant_difference_threshold = config.getfloat(\"RCAE2E\", \"significant_difference_threshold\")\n \n \n ### call RCAE2E\n \n RCAE2E_instance = RCAE2E(data_folder_path = data_folder_path, \\\n tool_id = tool_id, \\\n normalize = normalize, \\\n sample = sample, \\\n load_data_mode = load_data_mode, \\\n \\\n r_w = r_w, \\\n t_w = t_w, \\\n K = K, \\\n _lambda = _lambda, \\\n beta = beta, \\\n alpha = alpha, \\\n maxIters = maxIters, \\\n TICC_GTC_convergence_threshold = TICC_GTC_convergence_threshold,\\\n num_proc = num_proc, \\\n output_path = output_path, \\\n \\\n c = c, \\\n tau = tau, \\\n RCA_CTC_convergence_threshold = RCA_CTC_convergence_threshold, \\\n \\\n significant_difference_threshold = significant_difference_threshold)\n RCAE2E_instance.fit()","sub_path":"package/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":14201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"317209357","text":"import collections\nclass Solution(object):\n def uniqueLetterString(self, S):\n N = len(S)\n index = collections.defaultdict(list)\n peek = collections.defaultdict(int)\n for i, c in enumerate(S):\n index[c].append(i)\n for c in index:\n index[c].extend([N, N])\n\n def get(c):\n return index[c][peek[c] + 1] - index[c][peek[c]]\n\n ans = 0\n cur = sum(get(c) for c in index)\n for i, c in enumerate(S):\n ans += cur\n oldv = get(c)\n peek[c] += 1\n cur += get(c) - oldv\n return ans % (10**9 + 7)\nval=Solution()\nstr1=input()\nprint(val.uniqueLetterString(str1,))\n","sub_path":"countunique.py","file_name":"countunique.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"156013012","text":"from django.http import HttpResponse\nfrom django.shortcuts import render,redirect\n\nfrom .models import User, Product, Organisation, Order, OrderProduct, PdfUplaod\n\nfrom . import api\nimport os\n\ndef create_new_user(user_name):\n \n newUser = User.objects.create(name = user_name) \n newUser.save()\n \n return newUser\n \n\ndef add_new_product(item):\n \n name = item['description']\n date = item['date']\n price = item['price']\n quantity = item['quantity']\n tax = item['tax']\n total = item['discount']\n \n newProduct = Product.objects.create(name= name, time= date, price= price, quantity= quantity, total = total) \n newProduct.save()\n return newProduct\n\ndef add_new_organisation(org_name):\n \n newOrg = Organisation.objects.create(name= org_name ) \n newOrg.save()\n return newOrg\n\n\ndef create_new_order(user, biller, vender, time, file):\n \n newOrder = Order.objects.create(user= user, to_org= biller, from_org= vender, time= time, pdf= file)\n newOrder.save()\n return newOrder\n \ndef upload_pdf(file):\n \n newPDF = PdfUplaod.objects.create(pdf= file)\n newPDF.save()\n return newPDF \n\ndef upload(request):\n \n if(request.method=='POST'):\n \n user_name =request.POST['username']\n \n pdf_file = request.FILES[\"file\"]\n \n pdf_obj = upload_pdf(pdf_file)\n pdf = pdf_obj.pdf\n \n # Create new user with help of name.\n user = create_new_user(user_name)\n \n data = api.InvoiceAPI(pdf)\n # print(data)\n \n to_org = data['bill_to_name']\n from_org = data['vendor']['name']\n \n biller = add_new_organisation(to_org)\n vender = add_new_organisation(from_org)\n \n new_order = create_new_order(user, biller, vender, data['date'], pdf_obj)\n \n \n \n # List of all products, \n for item in data['line_items']:\n \n # Mapping in between product and order.\n new_product = add_new_product(item)\n \n mapping_between_product_order = OrderProduct(order = new_order, product = new_product)\n mapping_between_product_order.save()\n \n \n \n\n return render(request,'upload.html')\n\n\n'''\n\n Return the order which is related to given organisations('org_name').\n 1. If recievcer is 'org_name'.\n 2. If sender is 'ord_name'.\n \n'''\n\ndef search_biller(org_name):\n \n order_list = Order.objects.filter(to_org__name__contains = org_name)\n \n print(order_list)\n return order_list\n \n \ndef search_vender(org_name):\n \n \n order_list = Order.objects.filter(from_org__name__contains = org_name)\n \n print(order_list)\n return order_list\n \n \n \n \ndef search_product(name):\n \n \n \n products = OrderProduct.objects.filter(product__name__contains = name)\n \n # print(products)\n \n \n order_list = set()\n \n for product in products:\n order = product.order\n order_list.add(order)\n \n # print(order_list)\n return order_list\n\n\ndef base_url(base):\n \n \n url = 'media/'+str(base)\n return url\n\ndef search(request):\n \n if(request.method=='POST'):\n choice = request.POST['choice']\n query = request.POST['query']\n \n # print(choice)\n # print(query)\n if(choice== \"biller\"):\n results = search_biller(query)\n \n elif(choice == \"vender\"):\n results = search_vender(query)\n \n elif(choice == \"product\"):\n results = search_product(query)\n \n print(results)\n \n \n res_arr=[]\n \n for result in results:\n \n entry = {}\n entry['user'] = result.user.name\n entry['from'] = result.from_org.name\n entry['to'] = result.to_org.name\n entry['timestamp'] = result.time\n entry['pdf'] = base_url(result.pdf.pdf)\n \n res_arr.append(entry)\n \n print(res_arr)\n \n \n return render(request, 'result.html', {'results':res_arr})\n \n \n \n\n return render(request,'search.html')\n\ndef result(request):\n \n return render(request,'result.html')","sub_path":"invoice/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"302480711","text":"#!/usr/local/bin/python3\n#vim: set expandtab:\n#vim: tabstop=4:\n#vim: ai:\n#vim: shiftwidth=4:\n\nfrom pymarc import MARCReader, XMLWriter\nfrom io import BytesIO\nimport re\nimport sys\nfrom datetime import datetime, timedelta\nfrom marcaroni import db\nimport optparse\nimport psycopg2.extras\nimport os\n\ndef parse_config():\n parser = optparse.OptionParser(usage=\"%prog [options] [INPUT_FILE]\")\n parser.add_option(\"--init\", dest=\"init\", default=False, action=\"store_true\",\n help=\"Initialize the database.\")\n parser.add_option(\"-t\", \"--test\", dest=\"test\", default=False, action=\"store_true\",\n help=\"Use the test database config conf/.marcaroni.test.ini\")\n parser.add_option(\"-q\", \"--quiet\", dest=\"silent\", default=False, action=\"store_true\",\n help=\"Quiet mode: process without interaction. DANGER use at your own risk.\")\n parser.add_option(\"-s\", \"--source\", dest=\"source\",\n help=\"Numerical id of bib source for this batch. If empty, will prompt for this.\")\n parser.add_option(\"-u\", \"--user\", dest=\"user_id\", default='1',\n help=\"User id of the record creator and editor. [default: %default]\")\n opts, args = parser.parse_args()\n return opts.init, opts.source, opts.test, opts.silent, opts.user_id, args[0]\n\ndef load_marc_reader(filename):\n try:\n handler = open(filename, \"rb\")\n reader = MARCReader(handler, to_unicode=True, force_utf8=True)\n except Exception as e:\n print(\"Error loading marc file\")\n print(\"Exception: %s\" % str(e))\n sys.exit(1)\n else:\n return reader\n\ndef marc_record_to_xml_string(record):\n b = BytesIO()\n writer = XMLWriter(b)\n writer.write(record)\n writer.close(close_fh=False)\n\n # Transform record from bytes to string.\n b.seek(0,0)\n bytestr = b.read()\n marc = bytestr.decode('UTF-8')\n\n # Remove the XML declaration and collection stuff.\n marc = re.sub(r'^.*]*>','',marc)\n marc = re.sub(r'$','',marc)\n # Remove tab characters.\n marc = re.sub(r'\\t','',marc)\n\n # Verify cleaning worked:\n if not marc.startswith(' 0 and filename:\n response = input(\"There are [{}] unfinished records in the staging database that will be loaded if you continue. Do you want to also add the current file to these staged records? y or yes to add the file, n or no to skip the file and continue with the existing staged records. Anything else to cancel. [Y/n]\".format(count,))\n if response in ('y','Y','yes','Yes'):\n load_file = True\n elif response in ('n','no','N','No'):\n load_file = False\n else:\n print(\"Invalid choice. Exiting.\")\n exit(1)\n\n if not bib_source:\n bib_source = input(\"Please enter the number of the bib source:\").strip()\n\n\n if load_file:\n if not silent:\n print(\"Processing file: [{}].\".format(filename,))\n reader = load_marc_reader(filename)\n if not silent:\n print(\"Copying marc to staging database.\")\n start_time = datetime.now()\n copy_marc_into_insert_staging(conn, reader)\n if not silent:\n print(\"Records staged.\")\n duration = datetime.now() - start_time\n print(\"Elapsed time: %s\" % (str(duration),) )\n\n\n # PREPARE TO EXECUTE THE BIG RECORD LOAD\n count = unfinished_records_in_staging(conn)\n\n if not silent:\n print(\"Ready to insert records from the staging table:\\n# RECORDS:\\t{}\\nBIB SOURCE:\\t{}\\nUSER ID:\\t{}\\n\\n\".format(count,bib_source, user_id))\n os.system('say \"Ready to load records?\"')\n if input(\"Insert staged records?? n or Ctrl+D to quit. [Y/n]\") in ('n', 'no'):\n print(\"Exiting.\")\n exit(1)\n print(\"Inserting staged records.\")\n start_time = datetime.now()\n\n # PERFORM THE BATCH LOAD\n insert_staged_records_to_biblio_record_entry(conn, bib_source, user_id, silent)\n\n # REPORT ON THE LOAD.\n if not silent:\n duration = datetime.now() - start_time\n print(\"Elapsed time: %s\" % (str(duration),) )\n\n count = unfinished_records_in_staging(conn)\n if count == 0:\n print(\"All staged records have been inserted.\")\n else:\n print(\"There are still [{}] unfinished records in the staging database.\".format(count,))\n\n # cursor.execute(\"INSERT INTO biblio.record_entry (marc, creator, editor, source, last_xact_id) SELECT marc, '1', '1', '2', pg_backend_pid() || '.' || extract(epoch from now()) FROM public.custom_insert_staging_test where id = %s;\", (68327,))\n\n\nmain()\n\n","sub_path":"bib-insert.py","file_name":"bib-insert.py","file_ext":"py","file_size_in_byte":7937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"590128481","text":"#!/bin/env python\n\n# Example of large scale dataset processing in Tensorflow.\n# Processes the ImageNet dataset into a one-hot classificaiton\n# dataset.\n#\n# ImageNet is a mixture of images, with 1000 labeled classes.\n# Each image can have one or more class objects.\n# The annotations for each image includes class ID and bounding\n# box dimensions. The functions below use these bounding boxes\n# to chop up the original images to create single images\n# corresponding to single class labels. This simplifies the\n# network needed to label the data, but effects the final\n# network accuracy.\n#\n# questions? Taylor Childers, jchilders@anl.gov\n\nimport os,glob\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ['TF_CPP_MIN_VLOG_LEVEL'] = '3'\nimport tensorflow as tf\nimport numpy as np\nimport xml.etree.ElementTree as ET\nimport horovod.tensorflow as hvd\nhvd.init()\n\n# these are initialized in the get_datasets function and used later\nlabels_hash = None\ncrop_size = None\n\n\ndef get_datasets(config):\n # these global variables will be initizlized\n global labels_hash,crop_size\n\n # set the crop size of the output images, e.g. [256,256]\n crop_size = tf.constant(config['data']['crop_image_size'])\n # these are paths to text files containing a list, one entry per line,\n # of all the training JPEGs and testing JPEGs\n # it's assumed the full path to the JPEGs is like this:\n # /.../ILSVRC/Data/CLS-LOC/train/n02437312/n02437312_8688.JPEG\n # because the class label comes from the last folder text.\n train_filelist = config['data']['train_filelist']\n test_filelist = config['data']['test_filelist']\n\n assert os.path.exists(train_filelist)\n assert os.path.exists(test_filelist)\n\n # this function uses that class label from the filename path\n # and builds a map from the string labels like the above \"n02537312\"\n # to a unique integer value 0-999. This is more suitable for\n # network classifciation than a string.\n labels_hash = get_label_tables(train_filelist)\n\n # this function creates the tf.dataset.Dataset objects for each list\n # of input JPEGs.\n train_ds = build_dataset_from_filelist(config,train_filelist)\n valid_ds = build_dataset_from_filelist(config,test_filelist)\n\n return train_ds,valid_ds\n\n\n## Create a hash table for labels from string to int \ndef get_label_tables(train_filelist):\n\n # get the first filename\n with open(train_filelist) as file:\n filepath = file.readline().strip()\n\n # parse the filename to extract the \"n02537312\" string\n # from the full path which is assumed to be similar to this\n # /.../ILSVRC/Data/CLS-LOC/train/n02437312/n02437312_8688.JPEG\n # and convert that string to a unique value from 0-999\n\n # this extracts the path up to: /.../ILSVRC/Data/CLS-LOC/train/\n label_path = '/'.join(filepath.split('/')[:-2])\n # this globs for all the directories like \"n02537312\" to get \n # list of the string labels\n labels = glob.glob(label_path + os.path.sep + '*')\n if config['hvd'].rank() == 0:\n print(f'num labels: {len(labels)}')\n # this removes the leading path from the label directories\n labels = [os.path.basename(i) for i in labels]\n # create a list of integers as long as the number of labels\n hash_values = tf.range(len(labels))\n # convert python list of strings to a tensorflow vector\n hash_keys = tf.constant(labels, dtype=tf.string)\n # build a key-value lookup using Tensorflow tools\n labels_hash_init = tf.lookup.KeyValueTensorInitializer(hash_keys, hash_values)\n # build a lookup table based on those key-value pairs (returns -1 for undefined keys)\n labels_hash = tf.lookup.StaticHashTable(labels_hash_init, -1)\n\n return labels_hash\n\n\n# take a config dictionary and a path to a filelist\n# return a tf.dataset.Dataset object that will iterate over the JPEGs in filelist\ndef build_dataset_from_filelist(config,filelist_filename):\n if config['hvd'].rank() == 0:\n print(f'build dataset {filelist_filename}')\n\n dc = config['data']\n\n # if running horovod(MPI) need to shard the dataset based on rank\n numranks = 1\n if config['hvd']:\n numranks = config['hvd'].size()\n\n # loading full filelist\n filelist = []\n with open(filelist_filename) as file:\n for line in file:\n filelist.append(line.strip())\n\n # provide user with estimated batches in epoch\n batches_per_rank = int(len(filelist) / dc['batch_size'] / numranks)\n if config['hvd'].rank() == 0:\n print(f'input filelist contains {len(filelist)} files, estimated batches per rank {batches_per_rank}')\n \n # convert python list to tensorflow vector object\n filelist = tf.data.Dataset.from_tensor_slices(filelist)\n\n # if using horovod (MPI) shard the data based on total ranks (size) and rank\n if config['hvd']:\n filelist = filelist.shard(config['hvd'].size(), config['hvd'].rank())\n \n # shuffle the data, set shuffle buffer (needs to be large), and reshuffle after each epoch\n filelist = filelist.shuffle(dc['shuffle_buffer'],reshuffle_each_iteration=dc['reshuffle_each_iteration'])\n\n # run 'load_image_label_bb' on each input image file, process multiple files in parallel\n # this function opens the JPEG, converts it to a tensorflow vector and gets the truth class label\n ds = filelist.map(load_image_label_bb,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n \n # unbatch called because some JPEGs result in more than 1 image returned\n ds = ds.apply(tf.data.Dataset.unbatch)\n\n # batch the data\n ds = ds.batch(dc['batch_size'])\n\n # setup a pipeline that pre-fetches images before they are needed (keeps CPU busy)\n ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) \n\n return ds\n\n\n# this function parses the image path, uses the label hash to convert the string\n# label in the path to a numerical label, decodes the input JPEG, and returns\n# the input image and label\ndef load_image_label_bb(image_path):\n \n # for each JPEG, there is an Annotation file that contains a list of\n # classes contained in the image and a bounding box for each object.\n # However, some images contain a single class, in which case the\n # dataset contains no annotation file which is annoying, but...\n # Images with multiple objects per file are always the same class.\n label = tf.strings.split(image_path, os.path.sep)[-2]\n annot_path = tf.strings.regex_replace(image_path,'Data','Annotations')\n annot_path = tf.strings.regex_replace(annot_path,'JPEG','xml')\n\n # open the annotation file and retrieve the bounding boxes and indices\n bounding_boxes,box_indices = tf.py_function(get_bounding_boxes,[annot_path],[tf.float32,tf.int32])\n\n # open the JPEG\n img = tf.io.read_file(image_path)\n # convert the compressed string to a 3D uint8 tensor\n img = tf.image.decode_jpeg(img, channels=3)\n # add batching index [batch,width,height,channel]\n img = tf.expand_dims(img,0)\n\n # create individual images based on bounding boxes\n imgs = tf.image.crop_and_resize(img,bounding_boxes,box_indices,crop_size)\n\n # Use `convert_image_dtype` to convert to floats in the [0,1] range.\n imgs = tf.image.convert_image_dtype(imgs, tf.float16)\n # resize the image to the desired size. networks don't like variable sized arrays.\n imgs = tf.image.resize(imgs, crop_size)\n # convert string label to numerical label\n label = labels_hash.lookup(label)\n # duplicate labels to match the number of images created from bounding boxes\n labels = tf.fill([tf.shape(imgs)[0]],label)\n # return images and labels\n return imgs, labels\n\n\n# this function opens the annotation XML file and parses the contents\n# the contents include a list of objects in the JPEG, a label and\n# bounding box for each object\ndef get_bounding_boxes(filename):\n filename = bytes.decode(filename.numpy())\n try:\n tree = ET.parse(filename)\n root = tree.getroot()\n\n img_size = root.find('size')\n img_width = int(img_size.find('width').text)\n img_height = int(img_size.find('height').text)\n # img_depth = int(img_size.find('depth').text)\n\n objs = root.findall('object')\n bndbxs = []\n # label = None\n for object in objs:\n # label = object.find('name').text\n bndbox = object.find('bndbox')\n bndbxs.append([\n float(bndbox.find('ymin').text) / (img_height - 1),\n float(bndbox.find('xmin').text) / (img_width - 1),\n float(bndbox.find('ymax').text) / (img_height - 1),\n float(bndbox.find('xmax').text) / (img_width - 1)\n ])\n except FileNotFoundError:\n bndbxs = [[0,0,1,1]]\n\n return np.asarray(bndbxs,float),np.zeros(len(bndbxs))\n\n\n\nif __name__ == '__main__':\n # parse command line\n import argparse,json,time\n parser = argparse.ArgumentParser(description='test this')\n parser.add_argument('-c', '--config', dest='config_filename',\n help='configuration filename in json format',\n required=True)\n parser.add_argument('-l','--logdir', dest='logdir',\n help='log output directory',default='logdir')\n parser.add_argument('-n','--nsteps', dest='nsteps',\n help='number of steps to run',default=10,type=int)\n parser.add_argument('--interop',type=int,help='set Tensorflow \"inter_op_parallelism_threads\" session config varaible ',default=None)\n parser.add_argument('--intraop',type=int,help='set Tensorflow \"intra_op_parallelism_threads\" session config varaible ',default=None)\n\n args = parser.parse_args()\n\n gpus = tf.config.experimental.list_physical_devices('GPU')\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n if gpus:\n tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')\n\n print(\"GPUs Available: %s\" % tf.config.get_visible_devices('GPU'))\n\n # parse config file\n config = json.load(open(args.config_filename))\n config['hvd'] = hvd\n \n # define some parallel processing sizes\n if args.interop is not None:\n tf.config.threading.set_inter_op_parallelism_threads(args.interop)\n if args.intraop is not None:\n tf.config.threading.set_intra_op_parallelism_threads(args.intraop)\n \n # use the tensorflow profiler here\n if hvd.rank() == 0:\n tf.profiler.experimental.start(args.logdir)\n # call function to build dataset objects\n # both of the returned objects are tf.dataset.Dataset objects\n trainds, testds = get_datasets(config)\n # can iterate over a dataset object\n trainds = iter(trainds)\n start = time.time()\n for i in range(args.nsteps):\n # profile data pipeline\n with tf.profiler.experimental.Trace('train_%02d' % i, step_num=i, _r=1):\n inputs,labels = next(trainds)\n \n # print('batch_number = %s input shape = %s labels shape = %s' %(i,inputs.shape,labels.shape))\n # print('batch_number = %s labels = %s' %(i,labels))\n # measure performance in images per second\n duration = time.time() - start\n if hvd.rank() == 0:\n tf.profiler.experimental.stop()\n images = config['data']['batch_size'] * args.nsteps\n if hvd.rank() == 0:\n print('imgs/sec = %5.2f' % ((images/duration)*hvd.size()))\n","sub_path":"dataPipelines/00_tensorflowDatasetAPI/ilsvrc_dataset.py","file_name":"ilsvrc_dataset.py","file_ext":"py","file_size_in_byte":11163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"8661970","text":"from skimage.filters import gabor\r\nfrom skimage.segmentation import slic\r\nfrom sklearn.cluster import KMeans\r\nimport cv2 \r\nimport os \r\nimport matplotlib.pyplot as plt \r\nimport numpy as np\r\n\r\n\r\ndef load_images():\r\n '''\r\n !!!!!!!!!CHANGE DONT FORGET\r\n '''\r\n path = os.getcwd() + os.sep\r\n print(path)\r\n\r\n image_paths = [ path + img_path for img_path in os.listdir(path) if img_path.endswith('.png') or img_path.endswith('.jpg') ]\r\n \r\n return np.array([ cv2.cvtColor(cv2.imread(img_path) , cv2.COLOR_BGR2RGB) for img_path in image_paths ])\r\n\r\ndef image_to_map(images, map_range = 8):\r\n '''\r\n This function maps the value given range for creating histograms for given range\r\n Params :\r\n images : all images in dataset\r\n map_range : (256/map_range) creates mapping table for given range\r\n \r\n Return : \r\n mapping tables for images\r\n '''\r\n return np.array([ (image / ( 256 / map_range )).astype(np.int) for image in images ] )\r\n \r\n\r\ndef rgb_to_lab(images):\r\n '''\r\n This function convert image color space from rgb from lab \r\n \r\n Params :\r\n images : all rgb images in dataset\r\n Return : \r\n images that have lab color spaces\r\n '''\r\n return np.array([ cv2.cvtColor(image, cv2.COLOR_RGB2LAB) for image in images ] ) \r\n \r\ndef rgb_to_gray(images):\r\n '''\r\n This function convert image color space from rgb from gray \r\n \r\n Params :\r\n images : all rgb images in dataset\r\n Return : \r\n images that have gray color spaces\r\n '''\r\n return np.array( [ cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) for image in images ])\r\n\r\ndef energy_gabor(gabor_result):\r\n '''\r\n This function find norm of gabor imaginary and real parts \r\n \r\n Params : \r\n gabor_result : all result values for gabor filter that has imaginary and real parts\r\n \r\n Return : \r\n norms of gabor filters imaginary and real parts.\r\n '''\r\n return np.round(np.sqrt(np.square(gabor_result[0]) + np.square(gabor_result[1]))).astype(np.int)\r\n\r\ndef texture_feature_extractor(gray_images):\r\n '''\r\n This function find gabor filter results of the images for 4 scale and 4 rotation\r\n \r\n Params : \r\n gray_images : all result values for gabor filter that has imaginary and real parts\r\n \r\n Return : \r\n results of 16 different gabor filter for all images\r\n '''\r\n \r\n #four different scale \r\n scales = np.array([ 0.1, 0.2, 0.3, 0.4 ])\r\n \r\n #four different rotations\r\n rotations = np.array([np.deg2rad(0), np.deg2rad(45), np.deg2rad(90), np.deg2rad(135)])\r\n \r\n #list that holds gabor features\r\n gabor_features = []\r\n for gray in gray_images:\r\n \r\n #16 different feature for one images \r\n gabor_per_image = []\r\n \r\n for scale in scales: \r\n for rotation in rotations:\r\n \r\n #find norms of gaor filter result and save in list\r\n gabor_per_image.append(energy_gabor(gabor(gray, theta=rotation, frequency=scale)))\r\n #save filter results in array \r\n gabor_per_image = np.array(gabor_per_image)\r\n gabor_per_image = np.moveaxis(gabor_per_image, 0, -1)\r\n gabor_features.append(gabor_per_image) \r\n return np.array(gabor_features)\r\n\r\ndef superpixel_seg(images, n_segments=400):\r\n '''\r\n This function apply slic to all images \r\n \r\n Params : \r\n images : all rgb images to find superpixels\r\n n_segments : number of wanted segments\r\n \r\n Return : \r\n all superpixels results \r\n '''\r\n segments = []\r\n #superpixels for all images\r\n for image in images: \r\n segments.append(slic(image, n_segments = n_segments, sigma = 5))\r\n return np.array(segments)\r\n\r\ndef texture_mean_super(texture_features, super_pixels):\r\n '''\r\n This function apply slic to all images \r\n \r\n Params : \r\n texture_features : results of 16 gabor filters for all images \r\n super_pixels : super pixels mapping image for all images\r\n \r\n Return : \r\n Mean of textures for superpixel area\r\n number of superpixels segments for all images \r\n '''\r\n \r\n texture_means = []\r\n number_of_segmentation = []\r\n #append 0 first for algorithmic purpose\r\n number_of_segmentation.append(0)\r\n \r\n for i,feature in enumerate(texture_features):\r\n #all unique superpixel value for a images\r\n unique = np.unique(super_pixels[i])\r\n \r\n #number of superpixels segments \r\n number_of_segmentation.append(len(unique))\r\n \r\n #temp list for all superpixels\r\n zeros = np.zeros((len(unique), 16)).astype(np.float64)\r\n for j in unique:\r\n \r\n #save mean of 16 gabor texture superpixels for a images\r\n zeros[j] = np.mean(feature[super_pixels[i] == j] ,axis=0).astype(np.float64).reshape(1,16)\r\n \r\n #normalize mean\r\n zeros[j] = zeros[j] / ( np.linalg.norm(zeros[j]) + 0.0001)\r\n \r\n #all texture means\r\n texture_means.extend(zeros)\r\n return np.array(texture_means), np.array(number_of_segmentation)\r\n\r\ndef color_bins_super(lab_images, super_pixels ):\r\n '''\r\n This function finds color values for given map and concenate each color channel side by side as 1x24 vector\r\n \r\n Params : \r\n lab_images : all lab images to find superpixels\r\n superpixels : super pixels mapping image for all images\r\n \r\n Return : \r\n all superpixels results \r\n '''\r\n \r\n \r\n #lab images to map values \r\n lab_images_map = image_to_map(lab_images)\r\n \r\n color_descriptors = []\r\n number_of_segmentation = []\r\n \r\n #algorithmic purpose \r\n number_of_segmentation.append(0)\r\n for i, lab_image in enumerate(lab_images_map):\r\n \r\n \r\n #find unique superpixel value for an image\r\n unique = np.unique(super_pixels[i])\r\n \r\n #saves number of segmentation\r\n number_of_segmentation.append(len(unique))\r\n \r\n #temp list \r\n zeros = np.zeros((len(unique), 24)).astype(np.float64)\r\n for j in unique:\r\n \r\n #histogram of color channels to find occurence of a value\r\n l = np.histogram(lab_image[:,:,0][ super_pixels[i] == j ], bins=range(9))[0]\r\n a = np.histogram(lab_image[:,:,1][ super_pixels[i] == j ], bins=range(9))[0]\r\n b = np.histogram(lab_image[:,:,2][ super_pixels[i] == j ], bins=range(9))[0]\r\n \r\n #stack as 1x24\r\n zeros[j] = np.hstack([ l,a,b]).reshape(1,24).astype(np.float64)\r\n \r\n #normalized values \r\n zeros[j] = zeros[j] / ( np.linalg.norm(zeros[j]) + 0.0001)\r\n color_descriptors.extend(zeros)\r\n return np.array(color_descriptors), np.array(number_of_segmentation)\r\n\r\ndef kmeans_descriptor(descriptorsx,k_clusters, no_of_des):\r\n '''\r\n This fucntion applies KMEANS to finding features\r\n Params : \r\n descriptors : color and texture descriptors Nx40\r\n k_clusters : number of initial center number\r\n no_of_des : holds numpber of descriptor for all images \r\n \r\n Return : \r\n kmeans results\r\n '''\r\n \r\n #applies kmeans and predicts labels\r\n labels = KMeans( random_state=0, n_clusters= k_clusters).fit_predict(descriptorsx)\r\n \r\n #find whether k_cluster shrink due to algorithm\r\n max_labels = np.max(labels[:])\r\n \r\n return np.array([ labels[np.sum(no_of_des[:i]) : np.sum(no_of_des[:(i+1)])] for i in range(len(no_of_des)) ] ), max_labels\r\n\r\ndef random_color_values(k=1000 ):\r\n '''\r\n This function creates random rgb values for given number\r\n \r\n Params : \r\n k: number of random rgb values wanted \r\n \r\n Return : \r\n set of all rgb values for given number\r\n '''\r\n #empty set \r\n set_of_colors = set()\r\n \r\n #random seed for stabil random numbers\r\n np.random.seed(1)\r\n \r\n #find all rgb values until number of rgb values reached \r\n while(len(set_of_colors) < k ):\r\n r = np.random.randint(256)\r\n g = np.random.randint(256)\r\n b = np.random.randint(256)\r\n \r\n color = (r,g,b)\r\n \r\n \r\n set_of_colors.add(color)\r\n return list(set_of_colors)\r\n\r\ndef plot_false_color_images(gray_images, images, descriptors, super_pixels, max_label, title):\r\n '''\r\n This function plots negative images, segmentation image and image itself\r\n \r\n Params : \r\n gray_images : all gray images to plot negative image\r\n images : all iamges to plot image itself\r\n descriptors : all descriptors that clustered before \r\n super_pixels : all super pixel value for all images\r\n max_label : number of cluster\r\n \r\n Return : \r\n \r\n '''\r\n #random colors for negative images \r\n random_colors = random_color_values(max_label+1) #[(230, 25, 75),(60, 180, 75),(255, 225, 25),(0, 130, 200),(245, 130, 48),(145, 30, 180),(70, 240, 240)]\r\n \r\n\r\n for i, (gray_image,image) in enumerate(zip(gray_images,images)):\r\n \r\n #copy image for safety\r\n image_copy = image.copy()\r\n \r\n #number of unique number for super pixel map\r\n unique = np.unique(super_pixels[i])\r\n for j in range(len(unique)):\r\n \r\n #create negative image\r\n image_copy[ super_pixels[i] == unique[j] ] = random_colors[descriptors[i][unique[j]]]\r\n \r\n #alpha value for alpha blending \r\n alpha = 0.4\r\n \r\n #crete negative image with alpha blending \r\n img = cv2.addWeighted(cv2.cvtColor(gray_image, cv2.COLOR_GRAY2RGB), 1-alpha, image_copy, alpha, 0.0)\r\n \r\n #determine plot size \r\n plt.rcParams['figure.figsize'] = (10,10)\r\n \r\n #plot\r\n plt.figure()\r\n plt.imshow(img)\r\n plt.title(\"{}:Image->{}:Negative\".format(title, i))\r\n \r\n plt.show()\r\n plt.figure()\r\n plt.imshow(image_copy)\r\n plt.title(\"{}:Image->{}:Segmentation\".format(title, i))\r\n plt.show()\r\n plt.figure()\r\n plt.imshow(image)\r\n plt.title(\"{}:Image->{}:Image\".format(title, i))\r\n plt.show()\r\n \r\ndef center_and_radius_finder(super_pixel):\r\n '''\r\n This function finds center and radiuses of super pixel maps \r\n \r\n Params : \r\n superpixel : super pixels mapping for an image\r\n \r\n Return : \r\n centers and radiuses\r\n '''\r\n #empty lists for holding values\r\n y_centers = []\r\n x_centers = []\r\n radius = []\r\n \r\n #unique values for super pixel map\r\n unique = np.unique(super_pixel)\r\n \r\n for j in unique:\r\n \r\n #pixel values for given super pixel value \r\n x,y = np.where(super_pixel == j )\r\n \r\n #center of x and y axises\r\n x_centers.append((min(x)+ max(x))//2)\r\n y_centers.append((min(y)+max(y))//2)\r\n \r\n #radius\r\n radius.append((max(x) - x_centers[-1] + max(y) - y_centers[-1])/2)\r\n\r\n return np.vstack([x_centers, y_centers]).T, radius\r\n\r\ndef pixels_inside_circle(super_pixel, centers , radius, k ):\r\n '''\r\n This function finds ring pixels for given center and radius an image\r\n \r\n Params : \r\n superpixel : super pixels mapping image \r\n centers : centers of super pixel values \r\n radius : radiuses of super pixel values\r\n k : degree of ring \r\n \r\n Return : \r\n pixels inside circles\r\n '''\r\n \r\n #unique values for super pixel map\r\n unique = np.unique(super_pixel)\r\n pixels_wanted = []\r\n for i in range(len(unique)):\r\n \r\n #pixels inside rectangle for given center and radius\r\n pixels = np.moveaxis(np.mgrid[np.round(centers[i][0] - (k * radius[i])):np.round(centers[i][0] + (k *radius[i])),np.round(centers[i][1] - (k* radius[i])):np.round(centers[i][1] + (k * radius[i]))], 0, -1).reshape(-1,2).astype(np.int)\r\n\r\n #pixel cannot be lower than 0 \r\n pixels[ pixels < 0 ] = 0\r\n \r\n #cannot be bigger than widths and heights\r\n pixels[:,0][ pixels[:,0] >= super_pixel.shape[0] -1] = super_pixel.shape[0]- 1\r\n pixels[:,1][ pixels[:,1] >= super_pixel.shape[1] -1] = super_pixel.shape[1] - 1\r\n \r\n #normalixed pixels values \r\n pixels_normalized = pixels - centers[i]\r\n \r\n #norm between center and pixel values \r\n distances = np.sqrt(np.square(pixels_normalized[:,0]) + np.square(pixels_normalized[:,1]))\r\n \r\n #if distance lower than given ring degree, pixels inside of circle\r\n inside_circle = distances <= (radius[i] * k) + 0.5\r\n \r\n #pixels inside circle\r\n pixels_inside = pixels[inside_circle]\r\n \r\n #super pixel values for given pixels\r\n pixels_wanted.append(super_pixel[pixels_inside[:,0], pixels_inside[:,1]])\r\n \r\n return np.array(pixels_wanted)\r\n\r\n\r\n\r\ndef first_ring(pixels,super_pixel ):\r\n '''\r\n This function finds super pixel value for given pixels values\r\n \r\n Params : \r\n lab_images : all lab images to find superpixels\r\n superpixels : super pixels mapping image for all images\r\n \r\n Return : \r\n all ring super pixel value\r\n '''\r\n \r\n rings = []\r\n for center, pixel in enumerate(pixels):\r\n \r\n #uniques values for given pixel values\r\n unique = np.unique( pixel )\r\n \r\n first_rings = []\r\n for i in unique:\r\n #complete area of given pixels\r\n complete_area = len(super_pixel[ super_pixel == i ])\r\n \r\n #founded area \r\n founded_area = len( pixel[ pixel == i ])\r\n \r\n #if i isnot center superpixel and area ratio greater than half means super pixel value is included\r\n if (center != i ) and ((founded_area/complete_area) >= 0.5) :\r\n first_rings.append(i)\r\n\r\n rings.append(np.array(first_rings))\r\n \r\n return np.array(rings)\r\n\r\n\r\ndef second_ring(first_rings, pixels, super_pixel ):\r\n '''\r\n This function finds super pixel value for given pixels values\r\n \r\n Params : \r\n lab_images : all lab images to find superpixels\r\n superpixels : super pixels mapping image for all images\r\n \r\n Return : \r\n all ring super pixel value\r\n '''\r\n \r\n rings = []\r\n for center, pixel in enumerate(pixels):\r\n \r\n #uniques values for given pixel values\r\n unique = np.unique( pixel )\r\n \r\n second_rings = []\r\n for i in unique:\r\n #complete area of given pixels\r\n complete_area = len(super_pixel[ super_pixel == i ])\r\n \r\n #founded area \r\n founded_area = len( pixel[ pixel == i ])\r\n \r\n \r\n #if i isnot center superpixel and area ratio greater than half means super pixel value is included\r\n if (center != i ) and ((founded_area/complete_area) >= 0.5) :\r\n second_rings.append(i)\r\n \r\n #find difference of second ring and firs ring to remove first ring values from second ring \r\n ring = np.setdiff1d(second_rings, first_rings[center])\r\n rings.append(np.array(ring))\r\n \r\n return np.array(rings) \r\n\r\n\r\n\r\ndef contextual_represantion(super_pixels, descriptors, no_of_des):\r\n '''\r\n This function finds contextual representation of given values\r\n \r\n Params : \r\n superpixels : super pixels mapping image for all images\r\n decriptors : center, first ring and second ring contextuals 1x120\r\n no_of_des : number of descriptors \r\n\r\n Return : \r\n contextual values \r\n '''\r\n contextuals = []\r\n for i in range(len(super_pixels)):\r\n \r\n #centers and radiues\r\n centers, radiuses = center_and_radius_finder(super_pixels[i])\r\n \r\n #first ring inside circle pixel values\r\n first_ring_pixels = pixels_inside_circle(super_pixels[i], centers, radiuses, 3 )\r\n \r\n #second ring inside circle pixel values\r\n second_ring_pixels = pixels_inside_circle(super_pixels[i], centers, radiuses, 5 )\r\n \r\n #first ring super pixel value\r\n first_rings = first_ring(first_ring_pixels, super_pixels[i])\r\n \r\n #second ring super pixel value\r\n second_rings = second_ring(first_rings, second_ring_pixels, super_pixels[i])\r\n contextual = []\r\n \r\n for j in range(len(first_rings)):\r\n #center \r\n center = descriptors[np.sum(no_of_des[:i+1]) : np.sum(no_of_des[:(i+2)])][j]\r\n \r\n #first ring \r\n first = np.mean(descriptors[np.sum(no_of_des[:i+1]) : np.sum(no_of_des[:(i+2)])][first_rings[j]],axis=0)\r\n \r\n #second ring \r\n second = np.mean(descriptors[np.sum(no_of_des[:i+1]) : np.sum(no_of_des[:(i+2)])][second_rings[j]],axis=0)\r\n contextual.append(np.hstack([center,first,second]))\r\n \r\n contextuals.extend(np.array(contextual))\r\n \r\n return np.array(contextuals)\r\n\r\n\r\n\r\n#number of superpixel segmentation\r\nn_segmentation = 350\r\n\r\n#k means clusters\r\nk_clusters = 7\r\n\r\n#load all images \r\nimages = load_images()\r\n\r\n#convert rgb to lab \r\nlab_images = rgb_to_lab(images)\r\n\r\n#convert rgb to gray\r\ngray_images = rgb_to_gray(images)\r\n\r\nprint('Part-1 is loading...')\r\n#super pixels \r\nsuper_pixels = superpixel_seg(images, n_segmentation)\r\n\r\n\r\nprint('Part-2 is loading...')\r\n#get gabor textures \r\ntexture_features = texture_feature_extractor(gray_images)\r\n\r\nprint('Part-3 is loading...')\r\ntexture_descriptors, no_tex = texture_mean_super(texture_features, super_pixels)\r\ncolor_descriptors, no_color = color_bins_super(lab_images, super_pixels)\r\ndescriptors = np.hstack([color_descriptors, texture_descriptors])\r\n\r\nprint('Part-4 is loading...')\r\nkmeans_des,max_label = kmeans_descriptor(descriptors, k_clusters,no_color)\r\nplot_false_color_images(gray_images,images, kmeans_des[1:], super_pixels, max_label,\"Part4\")\r\n\r\n\r\nprint('Part-5 is loading...')\r\nsuper_pixels = superpixel_seg(images, n_segmentation)\r\ncontextual_descriptors = contextual_represantion(super_pixels, descriptors, no_color)\r\nkmeans_contextual,max_label_con = kmeans_descriptor(contextual_descriptors, k_clusters,no_color)\r\nplot_false_color_images(gray_images, images, kmeans_contextual[1:], super_pixels, max_label_con, \"Part5\")\r\nplt.show()","sub_path":"hw3/akturk_sait_21501734_hw3.py","file_name":"akturk_sait_21501734_hw3.py","file_ext":"py","file_size_in_byte":18464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"160449151","text":"from django import forms\nfrom .models import ResourceStock, ResourcesAssignment\nfrom personal.models import Person\n\ndef adicionarClase(campo, clase):\n campo.widget.attrs.update({'class': clase})\n return campo\n\nclass SearchPersonForm(forms.Form):\n def __init__(self, *args, **kwargs):\n super(SearchPersonForm, self).__init__(*args, **kwargs)\n self.fields['person'] = adicionarClase(self.fields['person'], 'one')\n\n person = forms.ModelChoiceField(required=True, queryset=Person.objects.all(), widget=forms.Select(attrs={'size': '6', 'readonly': True}))\n\nclass MovePersonForm(forms.Form):\n person = forms.ModelChoiceField(required=True, queryset=None)\n\n def __init__(self, *args, **kwargs):\n self.id_asignado = kwargs.pop('id')\n super(MovePersonForm, self).__init__(*args, **kwargs)\n\n old_p = ResourcesAssignment.objects.get(id=self.id_asignado).person_warehouse\n self.fields['person'].queryset = Person.objects.all().exclude(id=old_p.id)\n self.fields['person'] = adicionarClase(self.fields['person'], 'one')\n\nclass ResourcesAssignmentForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(ResourcesAssignmentForm, self).__init__(*args, **kwargs)\n self.fields['person_warehouse'] = adicionarClase(self.fields['person_warehouse'], 'one')\n self.fields['resource_warehouse'] = adicionarClase(self.fields['resource_warehouse'], 'one')\n\n assignment_resources = ResourcesAssignment.objects.all()\n id_assigned_resources = []\n for i in assignment_resources:\n id_assigned_resources.append(i.resource_warehouse.id)\n visibles_resources = ResourceStock.objects.exclude(id__in=id_assigned_resources)\n\n self.fields['resource_warehouse'].queryset = visibles_resources\n\n class Meta:\n model = ResourcesAssignment\n fields = ['person_warehouse', 'resource_warehouse']\n","sub_path":"resourcestock/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"272103162","text":"# -*- coding: utf-8 -*-\n##########################################################################\n# NSAp - Copyright (C) CEA, 2019\n# Distributed under the terms of the CeCILL-B license, as published by\n# the CEA-CNRS-INRIA. Refer to the LICENSE file or to\n# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html\n# for details.\n##########################################################################\n\n\n# Third party import\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as func\nfrom torch.autograd import Variable\nfrom torch.nn.modules.loss import _Loss\n\n\ndef dice_loss_1(logits, true, eps=1e-7):\n \"\"\" Computes the Sørensen–Dice loss.\n Note that PyTorch optimizers minimize a loss. In this\n case, we would like to maximize the dice loss so we\n return the negated dice loss.\n true: a tensor of shape [B, 1, H, W].\n logits: a tensor of shape [B, C, H, W]. Corresponds to\n the raw output or logits of the model.\n eps: added to the denominator for numerical stability.\n dice_loss: the Sørensen–Dice loss.\n \"\"\"\n num_classes = logits.shape[1]\n if num_classes == 1:\n true_1_hot = torch.eye(num_classes + 1)[true.squeeze(1)]\n true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()\n true_1_hot_f = true_1_hot[:, 0:1, :, :]\n true_1_hot_s = true_1_hot[:, 1:2, :, :]\n true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)\n pos_prob = torch.sigmoid(logits)\n neg_prob = 1 - pos_prob\n probas = torch.cat([pos_prob, neg_prob], dim=1)\n else:\n true_1_hot = torch.eye(num_classes)[true.squeeze(1)]\n true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()\n probas = F.softmax(logits, dim=1)\n true_1_hot = true_1_hot.type(logits.type())\n dims = (0,) + tuple(range(2, true.ndimension()))\n intersection = torch.sum(probas * true_1_hot, dims)\n cardinality = torch.sum(probas + true_1_hot, dims)\n dice_loss = (2. * intersection / (cardinality + eps)).mean()\n return (1 - dice_loss)\n\n\ndef dice_loss_2(output, target, weights=1):\n \"\"\"\n output : NxCxHxW Variable\n target : NxHxW LongTensor\n weights : C FloatTensor\n \"\"\"\n output = func.softmax(output, dim=1)\n target = torch.argmax(target, dim=1).type(torch.LongTensor)\n encoded_target = output.data.clone().zero_()\n encoded_target.scatter_(1, target.unsqueeze(1), 1)\n encoded_target = Variable(encoded_target)\n\n assert output.size() == encoded_target.size(), \"Input sizes must be equal.\"\n assert output.dim() == 4, \"Input must be a 4D Tensor.\"\n\n num = (output * encoded_target).sum(dim=3).sum(dim=2)\n den1 = output.pow(2).sum(dim=3).sum(dim=2)\n den2 = encoded_target.pow(2).sum(dim=3).sum(dim=2)\n\n dice = (2 * num / (den1 + den2)) * weights\n return dice.sum() / dice.size(0)\n\n\nclass MultiDiceLoss(object):\n \"\"\" Define a multy classes dice loss.\n\n Note that PyTorch optimizers minimize a loss. In this case, we would like\n to maximize the dice loss so we return the negated dice loss.\n \"\"\"\n def __init__(self, weight=None, ignore_index=None, nb_batch=None):\n \"\"\" Class instanciation.\n\n Parameters\n ----------\n weight: FloatTensor (C), default None\n a manual rescaling weight given to each class.\n ignore_index: int, default None\n specifies a target value that is ignored and does not contribute\n to the input gradient.\n nb_batch: int, default None\n the number of mini batch to rescale loss between 0 and 1.\n \"\"\"\n self.weight = weight or 1\n self.ignore_index = ignore_index\n self.nb_batch = nb_batch or 1\n\n def __call__(self, output, target):\n \"\"\" Compute the loss.\n\n Note that this criterion is performing nn.Softmax() on the model\n outputs.\n\n Parameters\n ----------\n output: Variable (NxCxHxW)\n unnormalized scores for each class (the model output) where C is\n the number of classes.\n target: LongTensor (NxCxHxW)\n the class indices.\n \"\"\"\n eps = 1 # 0.0001\n n_classes = output.size(1) * self.nb_batch\n\n output = func.softmax(output, dim=1)\n target = torch.argmax(target, dim=1).type(torch.LongTensor)\n # output = output.exp()\n\n encoded_target = output.detach() * 0\n if self.ignore_index is not None:\n mask = target == self.ignore_index\n target = target.clone()\n target[mask] = 0\n encoded_target.scatter_(1, target.unsqueeze(1), 1)\n mask = mask.unsqueeze(1).expand_as(encoded_target)\n encoded_target[mask] = 0\n else:\n encoded_target.scatter_(1, target.unsqueeze(1), 1)\n\n intersection = output * encoded_target\n numerator = 2 * intersection.sum(0).sum(1).sum(1) + eps\n denominator = output + encoded_target\n if self.ignore_index is not None:\n denominator[mask] = 0\n denominator = denominator.sum(0).sum(1).sum(1) + eps\n loss_per_channel = self.weight * (1 - (numerator / denominator))\n print(loss_per_channel)\n\n return loss_per_channel.sum() / n_classes\n\n\nclass SoftDiceLoss(_Loss):\n \"\"\" Soft Dice Loss.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(SoftDiceLoss, self).__init__()\n\n def forward(self, y_pred, y_true, eps=1e-8):\n intersection = torch.sum(torch.mul(y_pred, y_true))\n union = (torch.sum(torch.mul(y_pred, y_pred)) +\n torch.sum(torch.mul(y_true, y_true)) + eps)\n dice = 2 * intersection / union\n dice_loss = 1 - dice\n return dice_loss\n\n\nclass CustomKLLoss(_Loss):\n \"\"\" KL Loss.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(CustomKLLoss, self).__init__()\n\n def forward(self, mean, std):\n return (torch.mean(torch.mul(mean, mean)) +\n torch.mean(torch.mul(std, std)) -\n torch.mean(torch.log(torch.mul(std, std))) - 1)\n\n\nclass CombinedLoss(_Loss):\n \"\"\" Combined Loss.\n\n Diceloss + k1 * L2loss + k2 * KLloss\n Since the output of the segmentation decoder has N channels (prediction\n for each tumor subregion), we simply add the N dice loss functions.\n A hyper-parameter weight of k1=0.1, k2=0.1 was found empirically in the\n paper.\n \"\"\"\n def __init__(self, num_classes, k1=0.1, k2=0.1):\n super(CombinedLoss, self).__init__()\n self.num_classes = num_classes\n self.k1 = k1\n self.k2 = k2\n self.dice_loss = SoftDiceLoss()\n self.l2_loss = nn.MSELoss()\n self.kl_loss = CustomKLLoss()\n\n def forward(self, outputs, y_true):\n y_pred, y_mid = outputs\n est_mean, est_std = (y_mid[:, :128], y_mid[:, 128:])\n seg_pred = y_pred[:, :self.num_classes]\n seg_truth = y_true[:, :self.num_classes]\n vae_pred = y_pred[:, self.num_classes:]\n vae_truth = y_true[:, self.num_classes:]\n dice_loss = None\n for idx in range(self.num_classes):\n if dice_loss is None:\n dice_loss = self.dice_loss(\n seg_pred[:, idx], seg_truth[:, idx])\n else:\n dice_loss += self.dice_loss(\n seg_pred[:, idx], seg_truth[:, idx])\n l2_loss = self.l2_loss(vae_pred, vae_truth)\n kl_div = self.kl_loss(est_mean, est_std)\n combined_loss = dice_loss + self.k1 * l2_loss + self.k2 * kl_div\n print(\"dice_loss:%.4f, L2_loss:%.4f, KL_div:%.4f, combined_loss:\"\n \"%.4f\" % (dice_loss, l2_loss, kl_div, combined_loss))\n return combined_loss\n","sub_path":"pynet/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":7648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"595722335","text":"#Will return the index of a move our agent should make (range: [1,7])\n#MiniMax is a backtracking based algorithm, for each possible current move it\n#looks ahead (DFS) a certain number of turns (treeDepth) then backtracks and makes a\n#well informed move.\n\n#an alternative would be to create one big tree that is used to inform multiple\n#consecutive moves. But a big tree like that would require storing.\n\n#note: nodeindex always represents the position in the current level\n\nimport math\nimport sys\nimport numpy as np\nimport copy\n\nMAXDEPTH = 8\nSCORE_W = 0.3\nSEEDS_W = 0.2\nOURCAPT_W = 0.2\nOPPCAPT_W = 0.2\nEXTRA_W = 0.1\ndef alphabeta (curDepth, isMaximizingPlayer, branchFactor, previousBoard, currentBoard, rootBoard, alpha, beta, extraTurns):\n\tglobal MAXDEPTH\n\tvalue = 0\n\n\tif (curDepth == MAXDEPTH-2) or (currentBoard.gameOver()):\n\t\tvalue = evaluateBoard(previousBoard,currentBoard,rootBoard, extraTurns)\n\t\treturn value\n\n\t#If player turn: set find max value and set alpha\n\telif (isMaximizingPlayer):\n\t\tvalue = -9999\n\t\tprune = False\n\n\t\t#Child scores will be sent up and stored here\n\t\t#Run alphabeta on each child (next turns)\n\t\tfor moveIndex in range (0,branchFactor):\n\t\t\tif not prune:\n\n\t\t\t\tif moveIsLegal(moveIndex,currentBoard, isMaximizingPlayer):\n\t\t\t\t\t#Get board produced by this move from this node\n\n\t\t\t\t\tnextBoard = copy.deepcopy(currentBoard)\n\t\t\t\t\tnextArray = makeNextBoard(nextBoard.agentSide, nextBoard.getBoardArray(), moveIndex)\n\t\t\t\t\tnextBoard.setBoardArray(nextArray)\n\n\t\t\t\t\t#by default next turn is opp's (Min's)\n\t\t\t\t\tnextPlayerIsMax = False\n\t\t\t\t\t#unless it's a move that gives an extra turn to Max\n\t\t\t\t\tif givesExtraTurn(moveIndex,copy.deepcopy(currentBoard), isMaximizingPlayer):\n\t\t\t\t\t\tnextPlayerIsMax = True\n\t\t\t\t\t\textraTurns = extraTurns + 1\n\n\t\t\t\t\t#pass board to child\n\t\t\t\t\tvalue = max(value, alphabeta(curDepth + 1, nextPlayerIsMax, branchFactor,copy.deepcopy(currentBoard), nextBoard, rootBoard, alpha, beta, extraTurns))\n\t\t\t\t\talpha = max(value, alpha)\n\t\t\t\t\tif alpha >= beta:\n\t\t\t\t\t\tprune = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\treturn value\n\t\t\t\telse:\n\t\t\t\t\talpha = -9999\n\n\t#if not player turn: find minimum value and set beta\n\telse:\n\t\tprune = False\n\t\tvalue = 9999\n\t\t#print node description\n\n\t\tfor moveIndex in range (0,branchFactor):\n\t\t\tif not prune:\n\t\t\t\tif moveIsLegal(moveIndex,currentBoard,isMaximizingPlayer):\n\t\t\t\t\t#Get board produced by this move from this node\n\n\t\t\t\t\tnextBoard = copy.deepcopy(currentBoard)\n\t\t\t\t\tnextArray = makeNextBoard(nextBoard.oppSide, nextBoard.getBoardArray(), moveIndex)\n\t\t\t\t\tnextBoard.setBoardArray(nextArray)\n\n\t\t\t\t\t#by default next turn is agents (Max's)\n\t\t\t\t\tnextPlayerIsMax = True\n\t\t\t\t\t#unless it's a move that gives an extra turn to Max\n\t\t\t\t\tif givesExtraTurn(moveIndex,copy.deepcopy(currentBoard), isMaximizingPlayer):\n\t\t\t\t\t\tnextPlayerIsMax = False\n\n\t\t\t\t\t#pass board to child\n\t\t\t\t\tvalue = min(value, alphabeta(curDepth + 1, nextPlayerIsMax, branchFactor,copy.deepcopy(currentBoard), nextBoard,rootBoard, alpha, beta, extraTurns))\t\n\t\t\t\t\tbeta = min(value, beta)\n\t\t\t\t\tif alpha >= beta:\n\t\t\t\t\t\tprune = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\treturn value\n\t\t\t\telse:\n\t\t\t\t\t# Move Illegal, set Beta to extremely high value\n\t\t\t\t\tbeta = 9999\n\treturn value\n\n\ndef makeNextBoard(playerSide, currentBoard, moveIndex):\n\tresBoard = currentBoard\n\tNScorePit = 7\n\tSScorePit = 15\n\n\tif (playerSide == 1): #move is being made by South\n\t\tmovePit = moveIndex + 8\n\t\tleftMostPit = 8\n\t\trightMostPit = 14 #excluding score pit\n\t\tscorePit = 15\n\t\tacrossIncrement = -8\n\t\tskipSouth = False\n\t\tskipNorth = True\n\n\telse:\t#move is being made by North\n\t\tmovePit = moveIndex\n\t\tleftMostPit = 0\n\t\trightMostPit = 6\n\t\tscorePit = 7\n\t\tacrossIncrement = 8\n\t\tskipSouth = True #must skip first pit when sowing\n\t\tskipNorth = False\n\n\t#collect the seeds, emptying the pit\n\tseedStash = resBoard[movePit]\n\tresBoard[movePit] = 0\n\tcurPit = movePit\n\n\t#sow the seeds anti-clockwise\n\twhile seedStash > 0:\n\t\t#find next pit index\n\t\tif (curPit == SScorePit):\n\t\t\tcurPit = 0 #end of array was reached, reset\n\t\telse:\n\t\t\tcurPit +=1\n\n\t\t#if the pit index isn't opps score pit, put a seed in\n\t\tif not((skipNorth and curPit == NScorePit) or\n\t\t\t\t(skipSouth and curPit == SScorePit)):\n\t\t\tresBoard[curPit]+=1\n\t\t\tseedStash-=1\n\n\n\t#if player's last seed ended up in one of their empty playable pits\n\tif ((resBoard[curPit] == 1) and (leftMostPit<= curPit <= rightMostPit)):\n\t#take whatever is across and place it in player's score pit\n\t\tresBoard[scorePit]+=resBoard[curPit+acrossIncrement]\n\t\t#the scorepit across was non empty (i.e a capture occured)\n\t\tif resBoard[curPit+acrossIncrement]!=0:\n\t\t\t#also capture the capturing seed\n\t\t\tresBoard[curPit]-=1\n\t\t\tresBoard[scorePit]+=1\n\t\t#empty opp's pit\n\t\tresBoard[curPit+acrossIncrement] = 0\n\n\treturn resBoard\n\ndef givesExtraTurn(moveIndex, currentBoard, fromMaxNode):\n\tresBoard = currentBoard.getBoardArray()\n\tNScorePit = 7\n\tSScorePit = 15\n\tif(fromMaxNode):\n\t\tindex = currentBoard.agentSide\n\telse:\n\t\tindex = currentBoard.getOppSide()\n\n\tif (index == 1): #move is being made by South\n\t\tmovePit = moveIndex + 8\n\t\tscorePit = 15\n\t\tskipNorthPit = True #must skip first pit when sowing\n\t\tskipSouthPit = False\n\n\telse: #indexing for North Side\n\t\tmovePit = moveIndex\n\t\tscorePit = 7\n\t\tskipNorthPit = False\n\t\tskipSouthPit = True\n\n\t#collect the seeds, emptying the pit\n\tseedStash = resBoard[movePit]\n\tresBoard[movePit] = 0\n\tcurPit = movePit\n\n\t#sow the seeds anti-clockwise\n\twhile seedStash > 0:\n\t\t#find next pit index\n\t\tif (curPit == SScorePit):\n\t\t\tcurPit = 0 #end of array was reached, reset\n\t\telse:\n\t\t\tcurPit +=1\n\n\t\t#if the pit index isn't opps score pit, put a seed in\n\t\tif not((skipNorthPit and curPit == NScorePit) or\n\t\t\t\t(skipSouthPit and curPit == SScorePit)):\n\t\t\tresBoard[curPit]+=1\n\t\t\tseedStash-=1\n\n\treturn curPit == scorePit\n\n# evaluateBoard : works out the value of the board based on hueristic\ndef evaluateBoard(previousBoard,board,rootBoard, extraTurns):\n\t#global #log\n\tresBoard = board.getBoardArray()\n\tresPrevBoard = previousBoard.getBoardArray()\n\tresRootBoard = rootBoard.getBoardArray()\n\n\tseedsOnSouthSide = sum(resBoard[8:15])\n\tseedsOnNorthSide = sum(resBoard[0:7])\n\tscoreNorth = resBoard[7]\n\tscoreSouth = resBoard[15]\n\tprevScoreNorth = resPrevBoard[7]\n\tdiffScoreNorth = scoreNorth - prevScoreNorth\n\tprevScoreSouth = resPrevBoard[15]\n\tdiffScoreSouth = scoreSouth - prevScoreSouth\n\n\tresBoard = board.getBoardArray()\n\tnorthScorePit= 7\n\tsouthScorePit = 15\n\n\n\n\t#get all indexes where we may attack is possible\n\tsouthAttackIndexes = []\n\tnorthAttackIndexes = []\n\tfor pitIndex in range(0,7):# [8,14] or [0,6]\n\n\t\tif resBoard[pitIndex] == 0 and resBoard[pitIndex+8] != 0:\n\t\t\tnorthAttackIndexes.append(pitIndex)\n\n\t\tif resBoard[pitIndex+8] == 0 and resBoard[pitIndex] != 0:\n\t\t\tsouthAttackIndexes.append(pitIndex+8)\n\n\t#given all those attack points find move Pit indexes where an oppotunity to\n\t#attack exists\n\tnorthCaptures=0\n\tsouthCaptures=0\n\tfor attackIndex in northAttackIndexes:\n\t\t#for each move pit except it\n\t\tfor moveIndex in range(0,northScorePit+1):\n\t\t\tif moveIndex == attackIndex:\n\t\t\t\tbreak\n\t\t\tif moveIndex < attackIndex:\n\t\t\t\tnorthCaptures += (resBoard[moveIndex] == attackIndex - moveIndex)\n\t\t\tif moveIndex > attackIndex:\n\t\t\t\tnorthCaptures += (resBoard[moveIndex] == (15+attackIndex) - moveIndex)\n\n\tfor attackIndex in southAttackIndexes:\n\t\t#for each move pit except it\n\t\tfor moveIndex in range(8,southScorePit+1):\n\t\t\tif moveIndex == attackIndex:\n\t\t\t\tbreak\n\t\t\tif moveIndex > attackIndex:\n\t\t\t\tsouthCaptures += (resBoard[moveIndex] == attackIndex - moveIndex)\n\t\t\tif moveIndex < attackIndex:\n\t\t\t\tsouthCaptures += (resBoard[moveIndex] == (15+attackIndex) - moveIndex)\n\n\tif (board.agentSide == 0): #North\n\t\tourCaptures = northCaptures\n\t\toppCaptures = southCaptures\n\telse: #South\n\t\tourCaptures = southCaptures\n\t\toppCaptures = northCaptures\n\n\n\tif (board.agentSide == 1):\n\t\treturn (SCORE_W*(scoreSouth - scoreNorth)) + (SEEDS_W*(seedsOnSouthSide - seedsOnNorthSide)) + (OURCAPT_W*ourCaptures)-(OPPCAPT_W*oppCaptures) + (EXTRA_W*extraTurns)\n\treturn (SCORE_W*(scoreNorth - scoreSouth)) + (SEEDS_W*(seedsOnNorthSide - seedsOnSouthSide)) + (OURCAPT_W*ourCaptures)-(OPPCAPT_W*oppCaptures) + (EXTRA_W*extraTurns)\n\n# moveIsLegal : True if legal, False if Illegal\ndef moveIsLegal(moveIndex,board,isMaxTurn):\n\tif (isMaxTurn):\n\t\treturn board.getSeeds(board.agentSide, moveIndex) != 0\n\treturn board.getSeeds(board.getOppSide(), moveIndex) != 0\n\n\n#-------------------------------Implementation----------------------------------\ndef run_alphabeta(initialBoard):\n\tbranchFactor = 7\n\tvalue = alpha = maxValue = -9999\n\tbeta = 9999\n\tmoveIndex = move = 0\n\n\tfor moveIndex in range (0,branchFactor):\n\t\tif moveIsLegal(moveIndex, initialBoard, True):\n\n\t\t\tnextBoard = copy.deepcopy(initialBoard)\n\t\t\tnextArray = makeNextBoard(nextBoard.agentSide, nextBoard.getBoardArray(), moveIndex)\n\t\t\tnextBoard.setBoardArray(nextArray)\n\n\n\t\t\tnextPlayerIsMax = False\n\t\t\textraTurns = 0\n\t\t\tif givesExtraTurn(moveIndex, copy.deepcopy(initialBoard), True):\n\t\t\t\tnextPlayerIsMax = True\n\t\t\t\textraTurns=extraTurns+1\n\n\t\t\tvalue = max(value, alphabeta(0, nextPlayerIsMax, branchFactor,copy.deepcopy(initialBoard), copy.deepcopy(nextBoard),copy.deepcopy(initialBoard), alpha, beta, extraTurns))\n\t\t\tif maxValue < value:\n\t\t\t\tmaxValue = value\n\t\t\t\tbestMove = moveIndex\n\t\t\t\tbestBoard = nextBoard\n\n\treturn bestMove + 1\n\ndef run_ab(changeM, board, depth):\n\tglobal MAXDEPTH\n\tMAXDEPTH = depth\n\twords = changeM.split(\";\")\n\tstate = words[2].split(\",\")\n\tstate = [int(word) for word in state]\n\tstatearray = np.reshape(state, (-1, 8))\n\tboard.setBoard(statearray)\n\treturn run_alphabeta(board)\n","sub_path":"g19Bot/manc_alphabeta.py","file_name":"manc_alphabeta.py","file_ext":"py","file_size_in_byte":9504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"214734082","text":"import scipy.signal\nimport signal\nimport numpy as np\nimport rl_utils\n\n\"\"\"\n\n\n\"\"\"\n\nclass Agent(object):\n def __init__(self, arch, policy, val_func, model, env, logger, policy_episodes=20, policy_steps=10, gamma1=0.0, gamma2=9.995, lam=0.98,\n use_timestep=False, monitor=None, recurrent_steps=1, seg_len=None):\n self.arch = arch\n self.env = env\n self.monitor = monitor\n self.policy_steps = policy_steps\n self.logger = logger\n self.use_timestep = use_timestep\n self.policy = policy \n self.val_func = val_func\n self.model = model\n self.policy_episodes = policy_episodes\n self.gamma1 = gamma1\n self.gamma2 = gamma2\n\n self.lam = lam\n self.global_steps = 0\n self.recurrent_steps = recurrent_steps\n\n if seg_len is None:\n self.seg_len = recurrent_steps\n else:\n self.seg_len = seg_len\n\n print('Agent') \n \"\"\" \n\n Args:\n policy: policy object with update() and sample() methods\n val_func: value function object with fit() and predict() methods\n env: environment\n logger: Logger object\n\n policy_episodes: number of episodes collected before update\n policy_steps: minimum number of steps before update\n (will update when either episodes > policy_episodes or steps > policy_steps)\n\n gamma: discount rate\n lam: lambda for GAE calculation\n use_timestep: boolean, True enables time step feature which sometimes works better than a \n low discount rate for continuing tasks with per-step rewards (like Mujoco envs)\n monitor: A monitor object like RL_stats to plot interesting stats as learning progresses\n Monitor object implements update_episode() and show() methods \n\n \"\"\" \n \n def run_episode(self):\n traj = self.arch.run_episode(self.env, self.policy, self.val_func, self.model, self.recurrent_steps, self.use_timestep)\n full_traj = traj.copy()\n seg_trajectories = self.segment_trajectory(traj)\n \n for traj in seg_trajectories: \n padded_traj = {}\n for k,v in traj.items():\n key = 'padded_' + k\n padded_traj[key], mask = rl_utils.add_padding(traj[k], self.recurrent_steps)\n\n traj.update(padded_traj)\n\n return seg_trajectories, full_traj\n\n def segment_trajectory(self, trajectory):\n segmented_trajectory = {}\n keys = trajectory.keys()\n t_len = trajectory['masks'].shape[0]\n\n if t_len % self.seg_len == 0:\n num_seg = t_len // self.seg_len\n else:\n num_seg = t_len // self.seg_len + 1\n\n segmented_trajectories = []\n for i in range(num_seg):\n seg_traj = {}\n for k in keys:\n seg_traj[k] = trajectory[k][i*self.seg_len:i*self.seg_len+self.seg_len]\n segmented_trajectories.append(seg_traj)\n return segmented_trajectories\n\n def run_policy(self,episode_cnt,warmup=False):\n \"\"\" Run policy and collect data for a minimum of min_steps and min_episodes\n Args:\n episode_cnt: current episode number, used for logging stats \n\n Returns: list of trajectory dictionaries, list length = number of episodes\n 'observes' : NumPy array of states from episode\n 'actions' : NumPy array of actions from episode\n 'rewards' : NumPy array of (un-discounted) rewards from episode\n \"\"\"\n total_steps = 0\n e_cnt = 0\n trajectories = []\n while e_cnt <= self.policy_episodes or total_steps < self.policy_steps:\n seg_trajectories, full_traj = self.run_episode()\n if self.monitor is not None and not warmup:\n self.monitor.update_episode(np.sum(full_traj['rewards1']) + np.sum(full_traj['rewards2']), full_traj['observes'].shape[0])\n total_steps += full_traj['observes'].shape[0]\n for st in seg_trajectories:\n trajectories.append(st)\n e_cnt += 1\n self.add_disc_sum_rew(trajectories, self.gamma1, self.gamma2) # calculated discounted sum of Rs\n # concatenate all episodes into single NumPy arrays\n keys = trajectories[0].keys()\n if 'vpreds' not in keys and self.val_func is not None:\n print('vpred not found in keys, adding')\n self.add_vpreds(trajectories) \n rollouts = {}\n for k in keys:\n rollouts[k] = np.concatenate([t[k] for t in trajectories])\n\n self.arch.update_scalers(self.policy, self.val_func, self.model, rollouts)\n\n if not warmup:\n self.arch.update(self.policy, self.val_func, self.model, rollouts, self.logger)\n self.log_batch_stats(rollouts['observes'], rollouts['actions'], rollouts['disc_sum_rew'], episode_cnt)\n self.global_steps += total_steps\n self.logger.log({'_MeanReward': np.mean([t['rewards1'].sum() + t['rewards2'].sum() for t in trajectories]),\n '_StdReward': np.std([t['rewards1'].sum() + t['rewards2'].sum() for t in trajectories]),\n '_MinReward': np.min([t['rewards1'].sum() + t['rewards2'].sum() for t in trajectories]),\n 'Steps': total_steps,\n 'TotalSteps' : self.global_steps})\n if self.monitor is not None: \n self.monitor.show()\n return trajectories\n\n def train(self,train_episodes, train_samples=None, warmup_updates=1):\n for i in range(warmup_updates):\n _ = self.run_policy(-1,warmup=True)\n print('*** SCALER WARMUP COMPLETE *** ')\n episode = 0\n \n if train_samples is not None:\n while self.global_steps < train_samples:\n trajectories = self.run_policy(episode)\n self.logger.write(display=True)\n episode += len(trajectories)\n else: \n while episode < train_episodes: \n trajectories = self.run_policy(episode)\n self.logger.write(display=True)\n episode += len(trajectories)\n \n \n def discount(self,x, gamma):\n \"\"\" Calculate discounted forward sum of a sequence at each point \"\"\"\n return scipy.signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1]\n\n\n def add_disc_sum_rew(self,trajectories, gamma1, gamma2):\n \"\"\" Adds discounted sum of rewards to all time steps of all trajectories\n\n Args:\n trajectories: as returned by run_policy()\n gamma: discount\n\n Returns:\n None (mutates trajectories dictionary to add 'disc_sum_rew')\n \"\"\"\n for i in range(len(trajectories)):\n trajectory = trajectories[i]\n if gamma1 < 0.999: # don't scale for gamma ~= 1\n rewards1 = trajectory['rewards1'] * (1 - gamma1)\n else:\n rewards1 = trajectory['rewards1'] * (1-0.999)\n\n if gamma2 < 0.999: # don't scale for gamma ~= 1\n rewards2 = trajectory['rewards2'] * (1 - gamma2)\n else:\n rewards2 = trajectory['rewards2'] * (1-0.999)\n\n done = trajectory['dones'][-1]\n if not done:\n rewards1 = np.hstack((rewards1, trajectories[i+1]['vpreds'][-1]))\n rewards2 = np.hstack((rewards2, trajectories[i+1]['vpreds'][-1]))\n \n disc_sum_rew1 = self.discount(rewards1, gamma1)[0:-1]\n disc_sum_rew2 = self.discount(rewards2, gamma2)[0:-1]\n\n else:\n disc_sum_rew1 = self.discount(rewards1, gamma1)\n disc_sum_rew2 = self.discount(rewards2, gamma2)\n\n trajectory['disc_sum_rew'] = disc_sum_rew1 + disc_sum_rew2\n trajectory['padded_disc_sum_rew'], _ = rl_utils.add_padding(trajectory['disc_sum_rew'], self.recurrent_steps)\n\n \n def add_vpreds(self,trajectories):\n \"\"\" Adds estimated value to all time steps of all trajectories\n\n Args:\n trajectories: as returned by run_policy()\n val_func: object with predict() method, takes observations\n and returns predicted state value\n\n Returns:\n None (mutates trajectories dictionary to add 'values')\n \"\"\"\n for trajectory in trajectories:\n observes = trajectory['observes']\n values, _ = self.val_func.predict(observes, np.zeros_like(self.val_func.net.initial_state))\n trajectory['vpreds'] = values\n\n def log_batch_stats(self,observes, actions, disc_sum_rew, episode):\n \"\"\" Log various batch statistics \"\"\"\n self.logger.log({'_mean_obs': np.mean(observes),\n '_min_obs': np.min(observes),\n '_max_obs': np.max(observes),\n '_std_obs': np.mean(np.var(observes, axis=0)),\n '_mean_act': np.mean(actions),\n '_min_act': np.min(actions),\n '_max_act': np.max(actions),\n '_std_act': np.mean(np.var(actions, axis=0)),\n '_mean_discrew': np.mean(disc_sum_rew),\n '_min_discrew': np.min(disc_sum_rew),\n '_max_discrew': np.max(disc_sum_rew),\n '_std_discrew': np.var(disc_sum_rew),\n '_Episode': episode\n })\n\n","sub_path":"RL_lib/Agents/PPO/agent_seg.py","file_name":"agent_seg.py","file_ext":"py","file_size_in_byte":9695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"158614083","text":"# -*- coding: utf-8 -*-\n\ndef derivative_numerical(f, x0, i, delta = 1e-8):\n xi_plus = x0.copy()\n xi_plus[i] += delta\n\n xi_minus = x0.copy()\n xi_minus[i] -= delta\n return (f(xi_plus) - f(xi_minus)) / (2 * delta)\n\n\n\ndef gradient_numerical(f, x0, delta = 1e-8):\n \"\"\"\n function calculates the numerical gradient for function f in \n the point x0\n \"\"\"\n N = len(x0)\n grad_num = np.zeros([N, 1])\n for i in range(N):\n grad_num[i] = derivative_numerical(f, x0, i, delta)\n return grad_num\n\n\ndef check_grad(f, gradf, x0, delta = 1e-8, verbose = True):\n grad = np.array(gradf(x0))\n grad_num = gradient_numerical(f, x0, delta)\n if (verbose):\n print('check_grad: precise gradient = ', grad)\n print('check_grad: approximate gradient = ', grad_num)\n print('check_grad: gradient error = ', grad - grad_num) \n \n return np.sqrt(np.sum((grad - grad_num) ** 2))\n\ndef second_derivative_numerical(f, x0, i, k, delta = 1e-5):\n \"\"\"\n\tfunction calculates second derivative\n returns d^2f/(dx_k dx_i)\n \"\"\"\n xk_plus = x0.copy()\n xk_plus[k] += delta\n\n xk_minus = x0.copy()\n xk_minus[k] -= delta\n \n dfi_plus = derivative_numerical(f, xk_plus, i, delta)\n dfi_minus = derivative_numerical(f, xk_minus, i, delta)\n \n return (dfi_plus - dfi_minus) / (2 * delta)\n\ndef hessian_numerical(f, x0, delta = 1e-5):\n \"\"\"\n\t# function calculates the hessian matrix\n \"\"\"\n assert x.shape[1] == 1, 'hessian_numerical: input array should have shape [N, 1]'\n \n N = len(x)\n hessian = np.zeros([N, N], dtype = np.float64)\n for i in range(N):\n for k in range(i, N):\n hessian[i, k] = second_derivative_numerical(f, x0, i, k, delta)\n if i != k:\n hessian[k, i] = hessian[i, k]\n return hessian\n\ndef check_hessian(f, hess_analytical, x0, delta = 1e-5, verbose = True):\n \"\"\"\n\tfunction checks he hessian matrix\n \"\"\"\n hessian_analytical = np.array(hess_analytical)\n hessian_num = hessian_numerical(f, x0, delta)\n if verbose:\n print('check_hessian: hessian_analytical = ', hessian_analytical)\n print('check_hessian: hessian_num = ', hessian_num)\n print('check_hessian: hessian difference = ', \n hessian_analytical - hessian_num)\n \n return np.sqrt(np.sum((hessian_analytical - hessian_num) ** 2))\n","sub_path":"solver/num_der.py","file_name":"num_der.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"577016108","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\n# ============================================================================\n# Copyright (c) 2013-2018 nexB Inc. http://www.nexb.com/ - All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport codecs\nfrom collections import OrderedDict\nimport logging\nfrom posixpath import basename, dirname, exists, join, normpath\nimport sys\n\nif sys.version_info[0] < 3: \n # Python 2\n import backports.csv as csv #NOQA\nelse:\n # Python 3\n import csv #NOQA\n\nfrom attributecode import ERROR\nfrom attributecode import CRITICAL\nfrom attributecode import INFO\nfrom attributecode import Error\nfrom attributecode import model\nfrom attributecode import util\nfrom attributecode.util import add_unc\nfrom attributecode.util import to_posix\nfrom attributecode.util import UNC_PREFIX_POSIX\n\n\nLOG_FILENAME = 'error.log'\n\nlogger = logging.getLogger(__name__)\nhandler = logging.StreamHandler()\nhandler.setLevel(logging.CRITICAL)\nhandler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\nlogger.addHandler(handler)\nfile_logger = logging.getLogger(__name__ + '_file')\n\n\ndef check_duplicated_columns(location):\n \"\"\"\n Return a list of errors for duplicated column names in a CSV file\n at location.\n \"\"\"\n location = add_unc(location)\n # FIXME: why ignore errors?\n with codecs.open(location, 'rb', encoding='utf-8', errors='ignore') as csvfile:\n reader = csv.reader(csvfile)\n columns = next(reader)\n columns = [col for col in columns]\n\n seen = set()\n dupes = OrderedDict()\n for col in columns:\n c = col.lower()\n if c in seen:\n if c in dupes:\n dupes[c].append(col)\n else:\n dupes[c] = [col]\n seen.add(c.lower())\n\n errors = []\n if dupes:\n dup_msg = []\n for name, names in dupes.items():\n names = u', '.join(names)\n msg = '%(name)s with %(names)s' % locals()\n dup_msg.append(msg)\n dup_msg = u', '.join(dup_msg)\n msg = ('Duplicated column name(s): %(dup_msg)s\\n' % locals() +\n 'Please correct the input and re-run.')\n errors.append(Error(ERROR, msg))\n return errors\n\n\ndef check_duplicated_about_file_path(inventory_dict):\n \"\"\"\n Return a list of errors for duplicated about_file_path in a CSV file at location.\n \"\"\"\n afp_list = []\n errors = []\n for component in inventory_dict:\n # Ignore all the empty path\n if component['about_file_path']:\n if component['about_file_path'] in afp_list:\n msg = (\"The input has duplicated values in 'about_file_path' field: \" + component['about_file_path'])\n errors.append(Error(CRITICAL, msg))\n else:\n afp_list.append(component['about_file_path'])\n return errors\n\n\ndef load_inventory(location, base_dir, license_notice_text_location=None,\n use_mapping=False, mapping_file=None):\n \"\"\"\n Load the inventory file at `location` for ABOUT and LICENSE files\n stored in the `base_dir`. Return a list of errors and a list of\n About objects validated against the base_dir.\n Optionally use `license_notice_text_location` as the location of\n license and notice texts.\n Optionally use mappings for field names if `use_mapping` is True\n or a custom mapping_file if provided.\n \"\"\"\n errors = []\n abouts = []\n base_dir = util.to_posix(base_dir)\n if location.endswith('.csv'):\n dup_cols_err = check_duplicated_columns(location)\n if dup_cols_err:\n errors.extend(dup_cols_err)\n return errors, abouts\n inventory = util.load_csv(location, use_mapping, mapping_file)\n else:\n inventory = util.load_json(location, use_mapping, mapping_file)\n\n try:\n dup_about_paths_err = check_duplicated_about_file_path(inventory)\n if dup_about_paths_err:\n errors.extend(dup_about_paths_err)\n return errors, abouts\n except:\n msg = (\n \"The essential field 'about_file_path' is not found.\\n\"\n \"Use the --mapping or --mapping-file option to map the \"\n \"input keys and verify the mapping information are correct.\\n\"\n \"OR correct the column names in the \"\n )\n errors.append(Error(CRITICAL, msg))\n return errors, abouts\n\n for i, fields in enumerate(inventory):\n # check does the input contains the required fields\n required_fields = model.About.required_fields\n\n for f in required_fields:\n if f not in fields:\n msg = (\n \"Required column: %(f)r not found.\\n\"\n \"Use the --mapping or --mapping-file option to map the \"\n \"input keys and verify the mapping information are correct.\\n\"\n \"OR correct the column names in the \"\n ) % locals()\n\n errors.append(Error(ERROR, msg))\n return errors, abouts\n afp = fields.get(model.About.about_file_path_attr)\n\n if not afp or not afp.strip():\n msg = 'Empty column: %(afp)r. Cannot generate .ABOUT file.' % locals()\n errors.append(Error(ERROR, msg))\n continue\n else:\n afp = util.to_posix(afp)\n loc = join(base_dir, afp)\n about = model.About(about_file_path=afp)\n about.location = loc\n running_inventory = False\n ld_errors = about.load_dict(fields, base_dir, running_inventory,\n use_mapping, mapping_file, license_notice_text_location,\n with_empty=False)\n # 'about_resource' field will be generated during the process.\n # No error need to be raise for the missing 'about_resource'.\n for e in ld_errors:\n if e.message == 'Field about_resource is required':\n ld_errors.remove(e)\n for e in ld_errors:\n if not e in errors:\n errors.extend(ld_errors)\n abouts.append(about)\n return errors, abouts\n\n\ndef generate(location, base_dir, license_notice_text_location=None,\n fetch_license=False, policy=None, conf_location=None,\n with_empty=False, with_absent=False, use_mapping=False, mapping_file=None):\n \"\"\"\n Load ABOUT data from a CSV inventory at `location`. Write ABOUT files to\n base_dir using policy flags and configuration file at conf_location.\n Policy defines which action to take for merging or overwriting fields and\n files. Return errors and about objects.\n \"\"\"\n not_exist_errors = []\n api_url = ''\n api_key = ''\n gen_license = False\n # Check if the fetch_license contains valid argument\n if fetch_license:\n # Strip the ' and \" for api_url, and api_key from input\n api_url = fetch_license[0].strip(\"'\").strip('\"')\n api_key = fetch_license[1].strip(\"'\").strip('\"')\n gen_license = True\n\n bdir = to_posix(base_dir)\n errors, abouts = load_inventory(\n location=location,\n base_dir=bdir,\n license_notice_text_location=license_notice_text_location,\n use_mapping=use_mapping,\n mapping_file=mapping_file)\n\n if gen_license:\n license_dict, err = model.pre_process_and_fetch_license_dict(abouts, api_url, api_key)\n if err:\n for e in err:\n # Avoid having same error multiple times\n if not e in errors:\n errors.append(e)\n\n for about in abouts:\n if about.about_file_path.startswith('/'):\n about.about_file_path = about.about_file_path.lstrip('/')\n dump_loc = join(bdir, about.about_file_path.lstrip('/'))\n\n # The following code is to check if there is any directory ends with spaces\n split_path = about.about_file_path.split('/')\n dir_endswith_space = False\n for segment in split_path:\n if segment.endswith(' '):\n msg = (u'File path : '\n u'%(dump_loc)s '\n u'contains directory name ends with spaces which is not '\n u'allowed. Generation skipped.' % locals())\n errors.append(Error(ERROR, msg))\n dir_endswith_space = True\n break\n if dir_endswith_space:\n # Continue to work on the next about object\n continue\n\n try:\n # Generate value for 'about_resource' if it does not exist\n if not about.about_resource.value:\n about.about_resource.value = OrderedDict()\n about_resource_value = ''\n if about.about_file_path.endswith('/'):\n about_resource_value = u'.'\n else:\n about_resource_value = basename(about.about_file_path)\n about.about_resource.value[about_resource_value] = None\n about.about_resource.present = True\n # Check for the existence of the 'about_resource'\n # If the input already have the 'about_resource' field, it will\n # be validated when creating the about object\n loc = util.to_posix(dump_loc)\n about_file_loc = loc\n path = join(dirname(util.to_posix(about_file_loc)), about_resource_value)\n if not exists(path):\n path = util.to_posix(path.strip(UNC_PREFIX_POSIX))\n path = normpath(path)\n msg = (u'Field about_resource: '\n u'%(path)s '\n u'does not exist' % locals())\n not_exist_errors.append(msg)\n\n if gen_license:\n # Write generated LICENSE file\n license_key_name_context_url_list = about.dump_lic(dump_loc, license_dict)\n if license_key_name_context_url_list:\n # Do not help user to fill in the license name\n # if not about.license_name.present:\n # about.license_name.value = lic_name\n # about.license_name.present = True\n if not about.license_file.present:\n about.license_file.value = OrderedDict()\n for lic_key, lic_name, lic_context, lic_url in license_key_name_context_url_list:\n gen_license_name = lic_key + u'.LICENSE'\n about.license_file.value[gen_license_name] = lic_context\n about.license_file.present = True\n if not about.license_name.present:\n about.license_name.value.append(lic_name)\n if not about.license_url.present:\n about.license_url.value.append(lic_url)\n if about.license_url.value:\n about.license_url.present = True\n if about.license_name.value:\n about.license_name.present = True\n\n # Write the ABOUT files\n about.dump(dump_loc, use_mapping=use_mapping, mapping_file=mapping_file, with_empty=with_empty, with_absent=with_absent)\n for e in not_exist_errors:\n errors.append(Error(INFO, e))\n except Exception as e:\n # only keep the first 100 char of the exception\n emsg = repr(e)[:100]\n msg = (u'Failed to write .ABOUT file at : '\n u'%(dump_loc)s '\n u'with error: %(emsg)s' % locals())\n errors.append(Error(ERROR, msg))\n dedup_errors = deduplicate(errors)\n return dedup_errors, abouts\n\n\ndef deduplicate(sequence):\n \"\"\"\n Return a list of unique items found in sequence. Preserve the original\n sequence order.\n \"\"\"\n deduped = []\n for item in sequence:\n if item not in deduped:\n deduped.append(item)\n return deduped\n","sub_path":"src/attributecode/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":12794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"103463106","text":"import random\n\n\ndef keuzemenu():\n print(\"'\\033[37m---Mastermind game---\")\n while True:\n keuze = (input(\"Kies een van de opties:\\n\"\"1.Raad de code\\n\"\"2.Maak de code\\n\"\n \"3.Maak de code (met makkelijk algoritmen)\\n\"\"'\\r\"))\n if '1' in keuze:\n start_randen_optie()\n break\n elif '2' in keuze:\n print('\\r')\n verwijder_gokken(computer_guess(), alle_mogelijk_gok())\n break\n elif '3' in keuze:\n algoritme_makkelijk(computer_guess(), alle_mogelijk_gok())\n break\n else:\n print('\\033[31m' + \"Voer een geldige keuze in !\\r\" + '\\033[31m')\n\n\ndef gok_vragen(): # for player guess\n kleuren = [\"wit\", \"rood\", \"groen\", \"geel\", \"blauw\", \"zwart\"]\n gok = []\n while len(gok) < 4:\n code_input = (input(\"Guess de kleuren:\")).lower()\n if code_input == \"stop\":\n print('\\33[100m'\"Het spel is gestopt.\"+'\\33[100m')\n exit()\n if code_input not in kleuren:\n print('\\033[91m' + \"Voer een geldig kleur in!\"+'\\033[0m')\n else:\n gok.append(code_input)\n return gok\n\n\ndef start_randen_optie():\n print(\" \\nJe gaat de code raden, dus...\\nKies uit de volgende kleuren: wit, rood, groen, geel, blauw en zwart. \")\n gok = gok_vragen()\n secret_code = random_secret_code()\n klopt_positie, klopt_kleuren = vergelijking(gok, secret_code)\n feedback_printen(klopt_positie, klopt_kleuren)\n\n\ndef random_secret_code():\n kleuren = [\"wit\", \"rood\", \"groen\", \"geel\", \"blauw\", \"zwart\"]\n secret_code_random = random.sample(kleuren, 4)\n return secret_code_random\n\n\ndef vergelijking(gok, secret_code): # Bron: Adam (de twee lijsten methode)\n if gok == secret_code:\n return 4, 0\n else:\n while gok != secret_code:\n klopt_kleuren = 0 # wit\n klopt_positie = 0 # zwart\n code_speler_list = []\n code_list = []\n for kleur_index in range(0, 4): # zwart\n if gok[kleur_index] == secret_code[kleur_index]:\n klopt_positie += 1\n else:\n code_speler_list.append(gok[kleur_index])\n code_list.append(secret_code[kleur_index])\n for items in code_speler_list: # wit\n for items2 in code_list:\n if items == items2:\n klopt_kleuren += 1\n code_list.remove(items2)\n break\n else:\n continue\n return klopt_positie, klopt_kleuren\n\n\ndef feedback_printen(klopt_positie, klopt_kleuren):\n count_aantal_pogingen = 1\n while klopt_positie != 10:\n if count_aantal_pogingen == 10:\n print('\\033[91m' + \"Je hebt de maximale aantal pogingen bereikt. Probeert het opnieuw\" + '\\033[91m')\n exit()\n elif klopt_positie == 4:\n print('\\33[4m' + \"Goed gedaan! Je bent een Mastermind!\" + '\\33[4m')\n print(\"Je hebt het binnen {} pogingen in gedaan.\".format(count_aantal_pogingen))\n else:\n print(\"\\r\\033[33mHet aantal zwart pin(s) is {}\"\n \"\\nHet aantal wit pin(s) is {}\\r\\033[33m\".format(klopt_positie, klopt_kleuren))\n gok_vragen()\n count_aantal_pogingen += 1\n\n\ndef computer_guess(): # voor computer guess\n\n print(\"Maak de secret code,\\n\" \n \"Kies uit de volgende kleuren: \\33[34m'wit, rood, groen, geel, blauw en zwart.\\n'\\33[33m\")\n kleuren = [\"wit\", \"rood\", \"groen\", \"geel\", \"blauw\", \"zwart\"]\n secret_code = []\n while len(secret_code) < 4:\n code_input = (input(\"Geef me de kleuren:\")).lower()\n if code_input == \"stop\":\n print('\\33[100m'\"Het spel is gestopt.\"+'\\33[100m')\n exit()\n if code_input not in kleuren:\n print('\\033[91m' + \"Voer een geldig kleur in!\"+'\\033[0m')\n else:\n secret_code.append(code_input)\n print(\"De gekozen secret code is: {} \".format(secret_code))\n return secret_code\n\n\ndef alle_mogelijk_gok():\n kleuren = [\"wit\", \"rood\", \"groen\", \"geel\", \"blauw\", \"zwart\"]\n alle_mogelijkheden = []\n for i in kleuren:\n for k in kleuren:\n for c in kleuren:\n for g in kleuren:\n alle_mogelijkheden.append([i, k, c, g])\n alle_mogelijkheden.sort()\n return alle_mogelijkheden\n\n\ndef verwijder_gokken(secret_code, alle_mogelijkheden): # combinatie van de simple strategy en de expected size,\n teller = 0\n while True: # Bron for using the while loop : Iwan\n if teller == 0:\n gok1 = ['wit', 'wit', 'rood', 'groen'] # vaste gok, het beste gok volgens de Expected Size Strategy\n teller = 1\n else:\n gok1 = alle_mogelijkheden[0]\n lijst_return = []\n print(gok1)\n if secret_code == gok1:\n print(\"De computer heeft je secret code binnen {} keer geraden !\".format(teller))\n break\n feedback1 = vergelijking(gok1, secret_code)\n for item in alle_mogelijkheden:\n a = vergelijking(gok1, item)\n if a == feedback1:\n lijst_return.append(item)\n alle_mogelijkheden = lijst_return\n teller += 1\n\n\ndef algoritme_makkelijk(secret_code, alle_mogelijkheden): # eigen algoritmen\n teller = 0\n while True:\n gok = alle_mogelijkheden[0]\n if gok == secret_code:\n print(gok)\n print(\"De algoritme heeft je secret code binnen {} keer geraden!\".format(teller))\n break\n else:\n alle_mogelijkheden.remove(gok)\n teller += 1\n\n\nkeuzemenu()\n","sub_path":"Mastermind.py","file_name":"Mastermind.py","file_ext":"py","file_size_in_byte":5715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"509728549","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 27 15:27:04 2019\n\n@author: rawabe\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nx1 = [10,20,30]\ny1 = [20,40,10]\n\nplt.plot(x1, y1, label = \"line 1\")\n\nx2 = [10,20,30]\ny2 = [40,10,30]\n\nplt.plot(x2, y2, label = \"line 2\")\nplt.xlabel('x - axis')\n\nplt.ylabel('y - axis')\n\nplt.title('Two or more lines on same plot with suitable legends ')\n\nplt.legend()\n\nplt.show()","sub_path":"Soluations/week2/day4/Ex13.py","file_name":"Ex13.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"241504559","text":"import requests\nfrom urllib.parse import urlparse, urljoin\nfrom bs4 import BeautifulSoup\nimport colorama\n\ncolorama.init()\n\nBLUE = colorama.Fore.BLUE\nGREEN = colorama.Fore.GREEN\nRED = colorama.Fore.RED\nRESET = colorama.Fore.RESET\nYELLOW = colorama.Fore.YELLOW\n\ninternal_urls = set()\nwiki_source = ''\n\ndef get_links(url, target):\n # domain name of the URL without the protocol\n domain_name = urlparse(url).netloc\n soup = BeautifulSoup(requests.get(url).content, \"html.parser\")\n links = []\n found = None\n for a_tag in soup.findAll(\"a\"):\n href = a_tag.attrs.get(\"href\")\n if href == \"\" or href is None or ':' in href or href.startswith(wiki_source) :\n # href empty tag\n continue\n # join the URL if it's relative (not absolute link)\n href = urljoin(url, href)\n parsed_href = urlparse(href)\n # remove URL GET parameters, URL fragments, etc.\n href = parsed_href.scheme + \"://\" + parsed_href.netloc + parsed_href.path\n if not (bool(urlparse(href).netloc) and bool(urlparse(href).scheme)):\n # not a valid URL\n continue\n if href in internal_urls:\n # already in the set\n continue\n internal_urls.add(href)\n links.append(href)\n if href == target:\n found = href\n break\n\n return (found, links)\n\ndef crawl(url, target, max_urls, query_urls):\n if max_urls == 0:\n return False\n\n (found, links) = get_links(url, target)\n\n if found is not None:\n print(f\"{GREEN}[!] Article connection found! {RESET}\")\n for url in query_urls:\n print(f\"- {url}\")\n print(f\"- {found}\")\n return True\n\n for link in links:\n query_urls.append(link)\n if crawl(link, target, max_urls-1, query_urls):\n return True\n query_urls.pop()\n\n return False\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(description=\"Wikipedia crawler that will find a connection between article A and B.\")\n parser.add_argument(\"article_a\", help=\"Article name A.\")\n parser.add_argument(\"article_b\", help=\"Article name B.\")\n parser.add_argument(\"-w\", \"--wiki-source\", help=\"\", default=\"https://de.wikipedia.org/wiki/\", type=str)\n parser.add_argument(\"-m\", \"--max-urls\", help=\"Number of max URLs to crawl.\", default=50, type=int)\n\n args = parser.parse_args()\n wiki_source = args.wiki_source\n article_a = wiki_source + args.article_a\n article_b = wiki_source + args.article_b\n max_urls = args.max_urls\n\n internal_urls.add(article_a)\n\n if not crawl(article_a, article_b, max_urls, [article_a]):\n print(f\"{RED}[!] Articles aren't connected. {RESET}\")\n\n","sub_path":"metaedia.py","file_name":"metaedia.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"139848402","text":"#!/usr/bin/env python\n\n# pylint: disable=bad-indentation\n\nfrom setuptools import setup, Extension # pylint: disable=multiple-import\n\nimport glob\nimport os\n\n\ndef scan_package_data(path, pattern, check):\n # We start off in the setup.py directory, but package_data is relative to\n # the pytype/ directory.\n package_dir = 'pytype'\n path = os.path.join(*path)\n full_path = os.path.join(package_dir, path)\n result = []\n for subdir, _, _ in os.walk(full_path):\n full_pattern = os.path.join(subdir, pattern)\n if glob.glob(full_pattern):\n # Once we know that it matches files, we store the pattern itself,\n # stripping off the prepended pytype/\n result.append(os.path.relpath(full_pattern, package_dir))\n assert os.path.join(path, *check) in result\n return result\n\n\ndef get_builtin_files():\n builtins = scan_package_data(['pytd', 'builtins'], '*.py*',\n check=['3', '*.py*'])\n stdlib = scan_package_data(['pytd', 'stdlib'], '*.pytd',\n check=['3', 'asyncio', '*.pytd'])\n typeshed = scan_package_data(['typeshed'], '*.pyi',\n check=['stdlib', '2', '*.pyi'])\n return builtins + stdlib + typeshed\n\n\nparser_ext = Extension(\n 'pytype.pyi.parser_ext',\n sources = [\n 'pytype/pyi/parser_ext.cc',\n 'pytype/pyi/lexer.lex.cc',\n 'pytype/pyi/parser.tab.cc',\n ],\n extra_compile_args=['-std=c++11']\n)\n\n\nsetup(\n name='pytype',\n version='0.2',\n description='Python type inferencer',\n maintainer='Google',\n maintainer_email='pytypedecl-dev@googlegroups.com',\n url='http://github.com/google/pytype',\n packages=['pytype',\n 'pytype/pyc',\n 'pytype/pyi',\n 'pytype/pytd',\n 'pytype/pytd/parse',\n 'pytype/tools',\n 'pytype/tools/analyze_project',\n 'pytype/typegraph',\n ],\n scripts=['scripts/pytype',\n 'scripts/pytd',\n 'scripts/pytype-all',\n ],\n package_data={'pytype': get_builtin_files()},\n requires=['pyyaml (>=3.11)', 'six'],\n install_requires=['pyyaml (>=3.11)', 'six'],\n classifiers=[\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.6',\n ],\n ext_modules = [parser_ext],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"341922867","text":"################################\n# Week 3.1: k-means clustering #\n################################\n# importing libraries\nimport random # library for random numbers generator\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.datasets.samples_generator import make_blobs\n\n#print('libraries imported')\n\n##############################################\n# 1. k-means on a Randomly Generated Dataset #\n##############################################\n\n# 30 different points, belonging to two clusters\n\n# data\nx1 = [-4.9, -3.5, 0, -4.5, -3, -1, -1.2, -4.5, -1.5, -4.5, -1, -2, -2.5, -2, -1.5, 4, 1.8, 2, 2.5, 3, 4, 2.25, 1, 0, 1, 2.5, 5, 2.8, 2, 2]\nx2 = [-3.5, -4, -3.5, -3, -2.9, -3, -2.6, -2.1, 0, -0.5, -0.8, -0.8, -1.5, -1.75, -1.75, 0, 0.8, 0.9, 1, 1, 1, 1.75, 2, 2.5, 2.5, 2.5, 2.5, 3, 6, 6.5]\n\n# checking length to confirm 30 elements\n# print(len(x1))\n# print(len(x2))\n\n# print('Datapoints defined!')\n\n# defining function for assigning clusters\ncolors_map = np.array(['b', 'r'])\ndef assign_members(x1, x2, centers):\n compare_to_first_center = np.sqrt(np.square(np.array(x1) - centers[0][1]) + np.square(np.array(x2) - centers[0][1]))\n compare_to_second_center = np.sqrt(np.square(np.array(x1) - centers[0][1]) + np.square(np.array(x2) - centers[0][1]))\n class_of_points = compare_to_first_center > compare_to_second_center\n colors = colors_map[class_of_points + 1 - 1]\n return colors, class_of_points\n\n#print('assign_members function defined!')\n\n# defining a function that recalculates and reassigns centroids\n# update means\ndef update_centers(x1, x2, class_of_points):\n center1 = [np.mean(np.array(x1)[~class_of_points]), np.mean(np.array(x2)[~class_of_points])]\n center2 = [np.mean(np.array(x1)[class_of_points]), np.mean(np.array(x2)[class_of_points])]\n return [center1, center2]\n\n#print('assign_members function defined!')\n\n# defining a function to plot clusters and centroids\ndef plot_points(centroids=None, colors='g', figure_title=None):\n # plotting the figure\n fig = plt.figure(figsize=(15,10))\n ax = fig.add_subplot(1, 1, 1)\n\n centroid_colors = ['bx', 'rd']\n if centroids:\n for (i, centroid) in enumerate(centroids):\n ax.plot(centroid[0], centroid[1], centroid_colors[i], markeredgewidth=5, markersize=20)\n plt.scatter(x1, x2, s=500, c=colors)\n\n # defining the ticks\n xticks = np.linspace(-6, 8, 15, endpoint=True)\n yticks = np.linspace(-6, 6, 13, endpoint=True)\n\n # fixing the horizontal axis\n ax.set_xticks(xticks)\n ax.set_yticks(yticks)\n\n # add tick labels\n xlabels = xticks\n ax.set_xticklabels(xlabels)\n ylabels = yticks\n ax.set_yticklabels(ylabels)\n\n # style the ticks\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.tick_params('both', length=2, width=1, which='major', labelsize=15)\n\n # add labels to axes\n ax.set_xlabel('x1', fontsize=20)\n ax.set_ylabel('x2', fontsize=20)\n\n # add title to figure\n ax.set_title(figure_title, fontsize=24)\n\n plt.show()\n\n# print('plot_points function defined!')\n\n# initializing k means data points\n#plot_points(figure_title='Scatter Plot of x2 vs x1')\n\n# initializing k means - arbitrarily defining clusters\ncenters = [[-2,2], [2,-2]]\n#plot_points(centers, figure_title='k-means Initialization')\n\n# running 4 iterations\n# number_of_iterations = 4\n# for i in range(number_of_iterations):\n# input('Iteration {} - Press Enter to update the members of each cluster'.format(i + 1))\n# colors, class_of_points = assign_members(x1, x2, centers)\n# title = 'Iteration {} - Cluster Assignment'.format(i + 1)\n# plot_points(centers, colors, figure_title=title)\n# input('Iteration {} - Press Enter to update the centers'.format(i + 1))\n# centers = update_centers(x1, x2, class_of_points)\n# title = 'Iteration {} - Centroid Update'.format(i + 1)\n# plot_points(centers, colors, figure_title=title)\n\n# generating the data\nnp.random.seed(0)\n\n# feature matrix\nX, y = make_blobs(n_samples=5000, centers=[[4,4], [-2,-1], [2,-3], [1,1]], cluster_std=0.9)\n\nplt.figure(figsize=(15,10))\nplt.scatter(X[:,0], X[:,1], marker='.')\n# plt.show()\n\n# Setting up k-means\n\n# output parameter\nk_means = KMeans(init=\"k-means++\", n_clusters=4, n_init=12)\n\n# fitting model\nk_means.fit(X)\n\n# labelling each point\nk_means_labels = k_means.labels_\nk_means_labels\n\n# also pulling cluster centers\nk_means_cluster_centers = k_means.cluster_centers_\nk_means_cluster_centers\n\n# Visualizing Cluster Results\n\n# initializing plot, plus dimensions\nfig = plt.figure(figsize=(15,10))\n\n# using colors to differentiate clustering levels\ncolors = plt.cm.Spectral(np.linspace(0, 1, len(set(k_means_labels))))\n\n# create a plot\nax = fig.add_subplot(1, 1, 1)\n\n# looping through, to plot clusters\n# k range is 1 to 3, for 3 clusters\nfor k, col in zip(range(len([[4,4], [-2, -1], [2, -3], [1, 1]])), colors):\n\n # listing datapoints, marking ones in cluster as true\n # and ones outside cluster as false\n my_members = (k_means_labels == k)\n\n # assigning the cluster centroid\n cluster_center = k_means_cluster_centers[k]\n\n # using color maps for clusters\n ax.plot(X[my_members, 0], X[my_members, 1], 'w', markerfacecolor=col, marker='.')\n\n # plotting centroids with a darker outline than cluster points\n ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=6)\n\n# titling plot\nax.set_title('Kmeans')\n\n# removing x axis ticks\nax.set_xticks(())\n\n# removing y axis ticks\nax.set_yticks(())\n\n# displaying plot\n# plt.show()\n\n###########################################################\n# 2. Applying K-means clustering to customer segmentation #\n###########################################################\nimport wget\n\n# downloading csv\nurl = \"https://cocl.us/customer_dataset\"\n# wget.download(url, 'customer_segmentation.csv')\n\n# print('data downloaded')\n\n# converting to a dataframe\ncustomers_df = pd.read_csv('customer_segmentation.csv')\ncustomers_df.head()\n\n# Getting ready for pre-processing.\n# clustering only applies to numeric vars\ndf = customers_df.drop('Address', axis=1)\ndf.head()\n\n# normalization to help interpretability across features\nfrom sklearn.preprocessing import StandardScaler\n\nX = df.values[:,1:] # assigning dataframe values\nX = np.nan_to_num(X) # transforming\ncluster_dataset = StandardScaler().fit_transform(X)\nprint(cluster_dataset)\n\n# modeling\nnum_clusters = 3\n\nk_means = KMeans(init=\"k-means++\", n_clusters=num_clusters, n_init=12)\nk_means.fit(cluster_dataset)\nlabels = k_means.labels_\n\nprint(labels)\n\n# insights drawn\n\n# every row / customer is assigned a label / cluster\ndf[\"Labels\"] = labels\nprint(df.head(5))\n\n# averaging cluster features, to check centroid\nprint(df.groupby('Labels').mean())\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# in order to display plot within window\n# plt.show()\n","sub_path":"wk3/wk3.1_k_means_clustering.py","file_name":"wk3.1_k_means_clustering.py","file_ext":"py","file_size_in_byte":6935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"14465524","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\n\n#Klassisk Gauss Funktion\ndef gaussFunc(x, a, x0, sigma):\n return a*np.exp(-(x - x0)**2 / (2*sigma**2))\n\n# Funktion der blot indlæser counts og channels fra mca-fil\ndef loadData(fileName):\n \n with open(fileName) as text:\n \n counts = []\n lister = text.readlines()\n begin = lister.index('<>\\n')+1\n end = lister.index('<>\\n')-1\n data = lister[begin:end]\n for i in data:\n counts.append(float(i)) \n channels = range(0,len(data))\n \n return channels, counts\n\n# par_limits er grænserne for fitteparametrene approksimeret fra plottet fra raw_data_plot\ndef gaussFitPlot(fileName, region, par_limits):\n \n channels, counts = loadData(fileName)\n channels = np.array(channels[region[0]:region[1]])\n counts = np.array(counts[region[0]:region[1]])\n\n popt, pcov = curve_fit(gaussFunc, channels, counts, bounds = par_limits)\n plt.figure()\n plt.plot(channels,counts)\n plt.plot(channels, gaussFunc(channels, *popt), 'r-')\n plt.xlabel('Channels')\n plt.ylabel('Counts')\n \n \npar_limits = ([1500,800,5],[1700,1000,50])\nregion = [800,1000]\ngaussFitPlot('180320_data/kali_am_600.mca',region, par_limits)\n\npar_limits = ([40,3700,5],[80,3900,50])\nregion = [3700,3900]\ngaussFitPlot('180320_data/kali_am_600.mca',region, par_limits)\n\n\n","sub_path":"gauss_fit_xrays.py","file_name":"gauss_fit_xrays.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"322345649","text":"\"\"\"\n\n\n\"\"\"\n\n\n\nimport math\n\n\n# Add any extra import statements you may need here\n\n\n# Add any helper functions you may need here\n\n\ndef getBillionUsersDay(growthRates):\n day_counter = 0\n target_number_of_users = 1000000000\n current_sum_of_users = 0\n\n while current_sum_of_users < target_number_of_users:\n day_counter += 1\n current_sum_of_users = 0\n for g in growthRates:\n current_sum_of_users += math.pow(g,day_counter)\n if current_sum_of_users >= target_number_of_users:\n print(day_counter)\n break\n return day_counter\n\n\ndef printInteger(n):\n print('[', n, ']', sep='', end='')\n\n\ntest_case_number = 1\n\n\ndef check(expected, output):\n global test_case_number\n result = False\n if expected == output:\n result = True\n rightTick = '\\u2713'\n wrongTick = '\\u2717'\n if result:\n print(rightTick, 'Test #', test_case_number, sep='')\n else:\n print(wrongTick, 'Test #', test_case_number, ': Expected ', sep='', end='')\n printInteger(expected)\n print(' Your output: ', end='')\n printInteger(output)\n print()\n test_case_number += 1\n\n\nif __name__ == \"__main__\":\n test_1 = [1.1, 1.2, 1.3]\n expected_1 = 79\n output_1 = getBillionUsersDay(test_1)\n check(expected_1, output_1)\n\n test_2 = [1.01, 1.02]\n expected_2 = 1047\n output_2 = getBillionUsersDay(test_2)\n check(expected_2, output_2)\n\n # Add your own test cases here\n","sub_path":"1 billion users.py","file_name":"1 billion users.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"569294129","text":"import subprocess\nimport sys\nimport json\nfrom os.path import expanduser\nimport os\nimport getpass\n\nhome = expanduser(\"~\")\n\ndef start(args, logfile, errfile):\n conf = { \n 'database_host' : args.database_host,\n 'workers' : args.max_threads,\n }\n with open('mojolicious/app.conf', 'w') as f:\n f.write(json.dumps(conf))\n\n try:\n # os.environ[\"MOJO_MODE\"] = \"production\"\n subprocess.Popen(\"hypnotoad ./app.pl\", shell=True, cwd=\"mojolicious\", stderr=errfile, stdout=logfile)\n return 0\n except subprocess.CalledProcessError:\n return 1\n\ndef stop(logfile, errfile):\n try:\n subprocess.call(\"hypnotoad -s ./app.pl\", shell=True, cwd=\"mojolicious\", stderr=errfile, stdout=logfile)\n p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)\n out, err = p.communicate()\n for line in out.splitlines():\n if 'hypnotoad' in line:\n pid = int(line.split(None, 2)[1])\n os.kill(pid, 15)\n return 0\n except subprocess.CalledProcessError:\n return 1\n","sub_path":"mojolicious/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"218798898","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\r\nimport os\r\nimport urllib.parse as urlparse\r\n#url = 'http://foo.appspot.com/abc?def=ghi'\r\n#parsed = urlparse.urlparse(url)\r\n#print(urlparse.parse_qs(parsed.query)['def'])\r\n\r\nclass testHTTPServer_RequestHandler(BaseHTTPRequestHandler):\r\n \r\n\tdef do_GET(self):\r\n\t\tfilename = self.path[1:9]\r\n\t\tif filename == 'telegram':\r\n\t\t\tparsed = urlparse.parse_qs(urlparse.urlparse(self.path).query)\r\n\t\t\tif 'username' in parsed:\r\n\t\t\t\tif 'act' in parsed:\r\n\t\t\t\t\tos.system(\"python bot/bot.py \" + parsed['username'][0] + \" \" + parsed['act'][0])\r\n\t\tself.send_response(200)\r\n\t\tself.send_header('Content-type','text/html')\r\n\t\tself.end_headers()\r\n\t\tmessage = \"Hello world!\"\r\n\t\tself.wfile.write(bytes(message, \"utf8\"))\r\n\t\treturn\r\n \r\ndef run():\r\n\tprint('Starting server')\r\n\r\n\tserver_address = ('127.0.0.1', 8080)\r\n\thttpd = HTTPServer(server_address, testHTTPServer_RequestHandler)\r\n\tprint('Running server')\r\n\thttpd.serve_forever()\r\n\r\nrun()","sub_path":"sz_aloner/bot/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"48989440","text":"import logging\nimport os\n\nfrom airflow import configuration\nfrom airflow.exceptions import AirflowException\nfrom airflow.hooks import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils import apply_defaults\n\n\nclass RedshiftTestsOperator(BaseOperator):\n \"\"\"\n Executes sql test commands for basic validations in ETL\n \"\"\"\n ui_color = '#ededed'\n\n @apply_defaults\n def __init__(\n self,\n autocommit=True,\n sql='',\n *args, **kwargs):\n super(RedshiftTestsOperator, self).__init__(*args, **kwargs)\n self.postgres_conn_id = configuration.get(\n \"core\", \"redshift_connection_id\")\n self.autocommit = autocommit\n self.sql = sql\n\n def execute(self, context):\n logging.info('Testing ETL using sql:')\n sql = self.sql\n logging.info(sql)\n self.hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)\n result = self.hook.get_first(sql)\n if result[0] is False:\n raise AirflowException('Quality check returned false.')\n","sub_path":"plugins/operators/redshift_tests_operator.py","file_name":"redshift_tests_operator.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"140928644","text":"import unittest\nimport numpy as np\nfrom hac.experience_buffer import ExperienceBuffer\n\n\nclass TestExperienceBuffer(unittest.TestCase):\n \"\"\"Tests the methods of the ExperienceBuffer object.\"\"\"\n\n def test_experience_buffer(self):\n # some fixed variables for this test\n batch_size = 2\n max_buffer_size = 12\n state_dim = 5\n goal_dim = 4\n action_dim = 3\n\n # Instantiate the buffer object.\n buff = ExperienceBuffer(max_buffer_size, batch_size)\n\n # Test the `add` method.\n for i in range(max_buffer_size-1):\n # add a new (random) element\n state = np.random.rand(state_dim)\n next_state = np.random.rand(state_dim)\n action = np.random.rand(action_dim)\n goal = np.random.rand(goal_dim)\n terminal = False\n reward = 0\n info = {}\n buff.add([state, action, reward, next_state, goal, terminal, info])\n\n # check the size of the buffer after adding each element\n self.assertEqual(buff.size, i+1)\n\n # Test the `add` method one the max buffer size is hit.\n #\n # This is done by adding a new element once the buffer is full, and\n # checking the new size of the buffer once this is done.\n state = np.random.rand(state_dim)\n next_state = np.random.rand(state_dim)\n action = np.random.rand(action_dim)\n goal = np.random.rand(goal_dim)\n terminal = False\n reward = 0\n info = {}\n buff.add([state, action, reward, next_state, goal, terminal, info])\n self.assertEqual(buff.size, int(5/6*max_buffer_size))\n\n # Test the `get_batch` method.\n s, a, r, s_p, g, t = buff.get_batch()\n self.assertTupleEqual(np.array(r).shape, (batch_size,))\n self.assertTupleEqual(np.array(t).shape, (batch_size,))\n self.assertTupleEqual(np.array(s).shape, (batch_size, state_dim))\n self.assertTupleEqual(np.array(a).shape, (batch_size, action_dim))\n self.assertTupleEqual(np.array(g).shape, (batch_size, goal_dim))\n self.assertTupleEqual(np.array(s_p).shape, (batch_size, state_dim))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/fast_tests/test_experience_buffer.py","file_name":"test_experience_buffer.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"305356992","text":"import matplotlib.pyplot as plt\ndef letter_subplots(axes=None, letters=None, xoffset=-0.1, yoffset=1.0, xlabel=None, ylabel=None, **kwargs):\n \"\"\"Add letters to the corners of subplots. By default each axis is given an\n upper-case bold letter label.\n\n axes: list of pyplot ax objects.\n letters: list of strings to use as labels, default [\"A\", \"B\", \"C\", ...]\n xoffset, yoffset: positions of each label relative to plot frame (default\n -0.1,1.0 = upper left margin). Can also be a list of offsets, in which\n case it should be the same length as the number of axes.\n xlabel,ylabel: (optional) add label(s) to all the axes\n\n Other arguments will be passed to plt.annotate()\n\n Examples:\n >>> fig, axes = plt.subplots(1,3)\n >>> letter_subplots(axes, letters=['(a)', '(b)', '(c)'], fontweight='normal')\n\n >>> fig, axes = plt.subplots(2,2, sharex=True, sharey=True)\n >>> letter_subplots(fig.axes) # fig.axes is a list when axes is a 2x2 matrix\n \"\"\"\n\n # handle single axes:\n if axes is None:\n axes = plt.gcf().axes\n try:\n iter(axes)\n except TypeError:\n axes = [axes]\n\n # set up letter defaults (and corresponding fontweight):\n fontweight = \"bold\"\n ulets = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ'[:len(axes)])\n llets = list('abcdefghijklmnopqrstuvwxyz'[:len(axes)])\n if letters is None or letters == \"A\":\n letters = ulets\n elif letters == \"(a)\":\n letters = [ \"({})\".format(lett) for lett in llets ]\n fontweight = \"normal\"\n elif letters == \"(A)\":\n letters = [ \"({})\".format(lett) for lett in ulets ]\n fontweight = \"normal\"\n elif letters == \"lower\" or letters == \"lowercase\" or letters == \"a\":\n letters = llets\n\n # make sure there are x and y offsets for each ax in axes:\n if isinstance(xoffset, (int, float)):\n xoffset = [xoffset]*len(axes)\n else:\n assert len(xoffset) == len(axes)\n if isinstance(yoffset, (int, float)):\n yoffset = [yoffset]*len(axes)\n else:\n assert len(yoffset) == len(axes)\n\n # defaults for annotate (kwargs is second so it can overwrite these defaults):\n my_defaults = dict(fontweight=fontweight, fontsize='large', ha=\"center\",\n va='center', xycoords='axes fraction', annotation_clip=False)\n kwargs = dict( list(my_defaults.items()) + list(kwargs.items()))\n\n list_txts = []\n for ax,lbl,xoff,yoff in zip(axes,letters,xoffset,yoffset):\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n t = ax.annotate(lbl, xy=(xoff,yoff), **kwargs)\n list_txts.append(t)\n return list_txts\n","sub_path":"src/bltools.py","file_name":"bltools.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"145404946","text":"f = open('textdata.txt', 'r')\n# d = f.read()\n# words = d.split()\n# lines = d.splitlines()\n# print(len(words))\n# print(len(lines))\n\ncl = cw = cc = 0\nfor line in f:\n cl += 1\n word = line.split()\n cw += len(word)\n cc += len(line)\nprint('lines:',cl,'\\nwords:',cw,'\\ncharactors:',cc)","sub_path":"FileHandling/CountNumberOfLinesAndWords.py","file_name":"CountNumberOfLinesAndWords.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"169565438","text":"from .yolov5 import Detector\n\nimport numpy as np\n\n__all__ = ['YoloHumanCounter']\n\n\nclass YoloHumanCounter(Detector):\n def __init__(self,\n weights,\n img_size=(640, 640),\n conf_thresh=0.4,\n iou_thresh=0.5,\n agnostic_nms=False,\n device='cpu',\n human_class_id=0):\n super(YoloHumanCounter, self).__init__(weights, img_size, conf_thresh, iou_thresh, agnostic_nms, device)\n self.human_class_id = human_class_id\n\n def count(self, imgs, return_detection=False):\n single_input = isinstance(imgs, np.ndarray)\n if single_input:\n imgs = [imgs]\n dets = self.detect(imgs)\n dets = [det[det[:, -1].eq(self.human_class_id)] for det in dets]\n counts = [det.size(0) for det in dets]\n if single_input:\n counts, dets = counts[0], dets[0]\n if return_detection:\n return counts, dets\n return counts\n\n def __call__(self, imgs, return_detection=False):\n return self.count(imgs, return_detection=return_detection)\n","sub_path":"yolo_human_counter/human_counter.py","file_name":"human_counter.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"125800299","text":"class Solution:\n def canPartitionKSubsets(self, nums: List[int], k: int) -> bool:\n target = sum(nums) / k\n used = [False] * len(nums)\n nums.sort()\n \n def backtrack(idx, k, subsum):\n if k == 0:\n return True\n if subsum == target:\n return backtrack(0, k - 1, 0)\n \n for i in range(idx, len(nums)):\n if i - 1 >= 0 and not used[i - 1] and nums[i] == nums[i - 1]:\n continue\n if used[i] or subsum + nums[i] > target:\n continue\n used[i] = True\n if backtrack(i + 1, k, subsum + nums[i]):\n return True\n used[i] = False\n return False\n \n return backtrack(0, k, 0)","sub_path":"698_PartitionToKEqualSumSubsets.py","file_name":"698_PartitionToKEqualSumSubsets.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"146868708","text":"import requests\nimport smtplib\ndef store_emails():\n global emails_dict\n emails_dict = {}\n try:\n emails = open(\"emails.txt\", 'r')\n for line in emails:\n (email, name) = line.split(',')\n emails_dict[email] = name\n emails.close()\n except FileNotFoundError as err:\n print(err)\ndef send_email():\n server = smtplib.SMTP('smtp.gmail.com', '587')\n server.starttls()\n from_email = input(\"Input sender email: \")\n password = input(\"Passord: \")\n server.login(from_email, password)\n for to_email, name in emails_dict.items():\n tmp_message = \"Subject: Yoooooooo! Let's check the weather\\n\"\n tmp_message += 'Hi ' + name + '!\\n\\n'\n tmp_message += messages\n server.sendmail(from_email, to_email, tmp_message)\ndef get_weather_info():\n global temp, temp_max, temp_min, temp_desc\n url = \"http://api.openweathermap.org/data/2.5/weather?q=Toronto,on&units=metric&appid=0010475052a54445a3a241857f6d9c1f\"\n request = requests.get(url).json()\n temp = request['main']['temp']\n temp_desc = request['weather'][0]['description']\n temp_max = request['main']['temp_max']\n temp_min = request['main']['temp_min']\ndef set_message():\n global messages\n try:\n messages = 'Current weather is ' + str(int(temp)) + ', ' + str(temp_desc) + '\\n'\n messages += '\\tWith a high of ' + str(int(temp_max))\n messages += '\\n\\tand a low of ' + str(int(temp_min))\n messages += '\\n'\n print(messages)\n except FileNotFoundError as err:\n print(err)\ndef main():\n store_emails()\n get_weather_info()\n set_message()\n send_email()\n\n\nmain()\n","sub_path":"Weather_mailer/emailer.py","file_name":"emailer.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"111366924","text":"from datetime import datetime\n\nimport pyexcel as pe\nfrom _compact import PY2, BytesIO\n\n\nclass TestSheet:\n def init(self):\n from webtest import TestApp\n\n from myproject import main\n\n self.raw_app = main({})\n self.app = TestApp(self.raw_app)\n\n def done(self):\n from myproject import close\n\n close()\n\n def test_single_sheet_file(self):\n array = [[\"id\", \"name\"], [1, \"News\"], [2, \"Sports\"]]\n for upload_file_type in [\"ods\", \"xls\"]:\n self.init()\n print(\"Uploading %s\" % upload_file_type)\n file_name = \"test.%s\" % upload_file_type\n io = pe.save_as(array=array, dest_file_type=upload_file_type)\n if not PY2:\n if isinstance(io, BytesIO):\n content = io.getvalue()\n else:\n content = io.getvalue().encode(\"utf-8\")\n else:\n content = io.getvalue()\n response = self.app.post(\n \"/upload/categories\",\n upload_files=[(\"file\", file_name, content)],\n )\n ret = pe.get_array(file_type=\"xls\", file_content=response.body)\n assert array == ret\n self.done()\n\n\nclass TestBook:\n def setUp(self):\n from webtest import TestApp\n\n from myproject import main\n\n self.raw_app = main({})\n self.app = TestApp(self.raw_app)\n\n def test_book_file(self):\n data = {\n \"category\": [[\"id\", \"name\"], [1, \"News\"], [2, \"Sports\"]],\n \"post\": [\n [\"id\", \"title\", \"body\", \"pub_date\", \"category\"],\n [\n 1,\n \"Title A\",\n \"formal\",\n datetime(2015, 1, 20, 23, 28, 29),\n \"News\",\n ],\n [\n 2,\n \"Title B\",\n \"informal\",\n datetime(2015, 1, 20, 23, 28, 30),\n \"Sports\",\n ],\n ],\n }\n for upload_file_type in [\"xls\"]:\n print(\"Uploading %s\" % upload_file_type)\n file_name = \"test.%s\" % upload_file_type\n io = pe.save_book_as(\n bookdict=data, dest_file_type=upload_file_type\n )\n if not PY2:\n if isinstance(io, BytesIO):\n content = io.getvalue()\n else:\n content = io.getvalue().encode(\"utf-8\")\n else:\n content = io.getvalue()\n response = self.app.post(\n \"/upload/all\", upload_files=[(\"file\", file_name, content)]\n )\n ret = pe.get_book_dict(file_type=\"xls\", file_content=response.body)\n assert data[\"category\"] == ret[\"category\"]\n sheet = pe.Sheet(data[\"post\"], name_columns_by_row=0)\n sheet.column.format(\"pub_date\", lambda d: d.isoformat())\n sheet2 = pe.Sheet(ret[\"post\"], name_columns_by_row=0)\n for key in sheet.colnames:\n if key == \"category\":\n continue\n assert sheet.column[key] == sheet2.column[key]\n assert sheet2.column[\"category_id\"] == [1, 2]\n\n def tearDown(self):\n from myproject import close\n\n close()\n","sub_path":"tests/test_database_io.py","file_name":"test_database_io.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"156931766","text":"import numpy as np \r\n\r\n\r\ndef get_available_actions(R, state):\r\n current_state_row = R[state,]\r\n actions_with_max_reward = np.max(current_state_row)\r\n action_indices = np.where(actions_with_max_reward == current_state_row)[1]\r\n return action_indices\r\n\r\ndef update_brain(Q, current_state, action, gamma):\r\n # May be many indices if same value in multiple indices\r\n max_indices = np.where(Q[action,]== np.max(Q[action,]))[1]\r\n # Since each index leads to a different state randomize the selection of state\r\n max_index = int(np.random.choice(max_indices, size=1))\r\n # Q learning formula\r\n Q[current_state,action] = R[current_state,action] + gamma * Q[action, max_index]\r\n\r\n\r\ndef get_random_state(Q):\r\n return np.random.randint(0, int(Q.shape[0]))\r\n\r\ndef get_random_state_excluding_val(Q, vals = []):\r\n result = get_random_state(Q)\r\n while result not in vals:\r\n result = get_random_state(Q)\r\n return result\r\n\r\ndef train(Q, R, learning_rate=0.2, n_epochs = 500):\r\n brain = Q.copy()\r\n gamma = learning_rate\r\n # Training of the algorithm\r\n for _i in range(n_epochs):\r\n current_state = get_random_state(brain)\r\n available_actions = get_available_actions(R, current_state)\r\n action = int(np.random.choice(available_actions, 1))\r\n update_brain(brain, current_state, action, gamma)\r\n\r\n return brain\r\n\r\ndef predict_path(Q, current_state, end_state = 5):\r\n # Maximum assumes we can't have more states than number of iteration\r\n # therefore exit since path not found\r\n max_number_of_iterations = Q.shape[0]\r\n steps = [current_state]\r\n for _ in range(max_number_of_iterations):\r\n if current_state == end_state:\r\n break\r\n possible_next_step_indices = np.where(Q[current_state,]== np.max(Q[current_state,]))[1]\r\n current_state = int(np.random.choice(possible_next_step_indices, size=1))\r\n steps.append(current_state)\r\n\r\n return steps\r\n\r\nR = np.matrix([[-1, -1, -1, -1, 0, -1],\r\n [-1, -1, -1, 0, -1, 100],\r\n [-1, -1, -1, 0, -1, -1],\r\n [-1, 0, 0, -1, 0, -1],\r\n [-1, 0, 0, -1, 0, 100],\r\n [-1, 0, -1, -1, 0, 100],])\r\nQ = np.matrix(np.zeros([6,6]))\r\ntrained_brain = train(Q, R, learning_rate=0.2, n_epochs=150)\r\n# Display the trained Q matrix\r\nprint(\"Original Q matrix\")\r\nprint(Q)\r\nprint(\"Trained Q matrix\")\r\nprint(trained_brain / np.max(trained_brain) * 100)\r\n\r\n# Q Matix is optimized to lead to state 5\r\n# Testing all paths leading to state 5\r\ngoal_state = 5\r\nfor current_state in range(6):\r\n print(\"Starting point: \", current_state)\r\n print(\"Goal: \", goal_state)\r\n steps = predict_path(trained_brain, current_state, end_state=goal_state)\r\n print(\"Selected path: \", steps)","sub_path":"qreinforcement.py","file_name":"qreinforcement.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"500990284","text":"import os\n\ndef clear_screen():\n os.system('cls' if os.name == 'nt' else 'clear')\n\ndef get_spoons(cup_qnt: float) -> float:\n return (14 * cup_qnt) / 7\n\ndef get_water(cup_qnt: float, cup_size: float) -> float:\n if cup_qnt > 1:\n return (float(14.285714285714) * cup_size / 100) * cup_qnt\n else:\n return cup_size / 7\n\ndef get_milk(cup_qnt: float, cup_size: float) -> float:\n if cup_qnt > 1:\n return (float(85.714285714286) * cup_size / 100) * cup_qnt\n else:\n return cup_size - get_water(cup_qnt, cup_size)\n\ndef show_recipe(cup_qnt: float, cup_size: float) -> None:\n print(f'''\n Ingredientes: \\n\n [+] {get_spoons(cup_qnt):.0f} colheres de pó de capuccino. (Tamanho_da_colher = \"sopa\")\n [+] {get_water(cup_qnt, cup_size):.2f}ml de água.\n [+] {get_milk(cup_qnt, cup_size):.2f}ml de leite.\n ''')\n\ndef show_steps(cup_qnt, cup_size) -> None:\n print(f'''\n Modo de Preparo: \\n\n [!] Esquente {get_water(cup_qnt, cup_size):.2f}ml de água (sem deixar ferver!).\n [!] Adicione {get_spoons(cup_qnt):.0f} colheres (de sopa) de pó de capuccino e misture bem.\n [!] Pegue essa mistura e coloque na geladeira, até que esfrie.\n [!] Após esfriar, coloque no liquidificador e bata junto com {get_milk(cup_qnt, cup_size):.2f}ml de leite.\n [!] E pronto, seu capuccino gelado está pronto para beber!!\n\n OBS:\n [+] O liquidificador deixa mais cremoso\n [+] Use canudo de metal, bambu, madeira e papel. Temos que salvar as tartaruguinhas SZ.\n [!] Receita by: Rafaelvis Presley!\n ''')\ndef main():\n cup_qnt = int(input('Digite a quantidade de copos: '))\n cup_size = float(input('Digite o tamanho dos copos em ml: '))\n clear_screen()\n show_recipe(cup_qnt, cup_size)\n show_steps(cup_qnt, cup_size)\n input('Pressione ENTER para sair...')\n\nif __name__ == '__main__':\n main()","sub_path":"capuccino.py","file_name":"capuccino.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"482726731","text":"import yaml\nimport random\nimport string\nfrom flask import Flask\nfrom flask import session\nfrom flask import request\nfrom flask import jsonify\nfrom flask_cors import CORS, cross_origin\nfrom luis import *\nfrom text_analysis import *\n\nlanguages = [\"de\", \"en\"]\nintents = get_all_intents()\ntext_intent = {\n \"de\": {\n \"not_classified\": \"Konnte nicht klassifiziert werden. Bitte geben Sie eine der untenstehenden Optionen an.\",\n \"illegal_entry\": \"Ungültige Eingabe\",\n \"added_utterance\": \"Zum Modell hinzugefügt\"\n },\n \"it\": {\n \"not_classified\": \"Non è stato possibile classificare. Inserire una delle seguenti opzioni.\",\n \"illegal_entry\": \"Ingresso non valido\",\n \"added_utterance\": \"Aggiunto al modello\"\n }\n}\napp = Flask(__name__)\nCORS(app)\n\n@app.route(\"/\")\ndef main():\n return \"Hello World!\"\n\n@app.route(\"/api/send\", methods=[\"GET\", \"POST\"])\n@app.route(\"/api/send/\", methods=[\"GET\", \"POST\"])\ndef handle_message(message=None):\n if message.lower() in intents:\n language = \"de\"\n else:\n language = detect_language(message)\n if language not in text_intent.keys():\n return jsonify({\n \"intent\":\"\",\n \"score\":\"\",\n \"text\": \"I don't speak your language, sorry!\",\n \"accurate\": True\n })\n last_message = request.data.decode(\"utf-8\")\n if last_message != \"\":\n last_message = json.loads(last_message)[\"last_message\"]\n if message not in intents:\n return text_intent[language][\"illegal_entry\"]\n add_utterance(language, message, last_message)\n train_model(language)\n return text_intent[language][\"added_utterance\"]\n else:\n intent = get_intent(message, language)\n if intent[\"score\"] < 0.4:\n not_classified = text_intent[language][\"not_classified\"]\n for i in range(len(intents)):\n not_classified += str(i) + \")\" + intents[i] + \"\\n\"\n return jsonify({\n \"intent\":intent[\"intent\"],\n \"score\":intent[\"score\"],\n \"text\": not_classified,\n \"accurate\": False\n })\n return jsonify({\n \"intent\": intent[\"intent\"],\n \"score\": intent[\"score\"],\n \"text\": \"\",\n \"accurate\": True\n }) \n\n\nif __name__ == \"__main__\":\n app.run(debug=True, threaded=True)\n","sub_path":"Case1/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"2013761","text":"#Boa:Dialog:GerenciarCargos\r\n# -*- coding: cp1252 -*-\r\n\r\nimport wx\r\nfrom Controle.Cargo import Cargo\r\nfrom Controle.FuncionarioController import FuncionarioController\r\n\r\ndef create(parent):\r\n return GerenciarCargos(parent)\r\n\r\n[wxID_GERENCIARCARGOS, wxID_GERENCIARCARGOSBUTTONADD, \r\n wxID_GERENCIARCARGOSLISTBOX1, wxID_GERENCIARCARGOSTEXTCTRLADD, \r\n wxID_GERENCIARCARGOSTOOLBAR1, \r\n] = [wx.NewId() for _init_ctrls in range(5)]\r\n\r\n[wxID_GERENCIARCARGOSTOOLBAR1TOOLSCANCELAR, \r\n wxID_GERENCIARCARGOSTOOLBAR1TOOLSSALVAR, \r\n] = [wx.NewId() for _init_coll_toolBar1_Tools in range(2)]\r\n\r\nclass GerenciarCargos(wx.Dialog):\r\n def _init_coll_toolBar1_Tools(self, parent):\r\n # generated method, don't edit\r\n\r\n parent.DoAddTool(bitmap=wx.Bitmap(u\"Img/disquete.gif\",\r\n wx.BITMAP_TYPE_GIF),\r\n bmpDisabled=wx.Bitmap(u\"Img/disquete disabled.gif\",\r\n wx.BITMAP_TYPE_GIF), id=wxID_GERENCIARCARGOSTOOLBAR1TOOLSSALVAR,\r\n kind=wx.ITEM_NORMAL, label=u'Salvar Altera\\xe7\\xf5es',\r\n longHelp=u'Salvar Altera\\xe7\\xf5es',\r\n shortHelp=u'Salvar Altera\\xe7\\xf5es')\r\n parent.DoAddTool(bitmap=wx.Bitmap(u\"Img/cancelar.gif\",\r\n wx.BITMAP_TYPE_GIF),\r\n bmpDisabled=wx.Bitmap(u\"Img/cancelar disabled.gif\",\r\n wx.BITMAP_TYPE_GIF), id=wxID_GERENCIARCARGOSTOOLBAR1TOOLSCANCELAR,\r\n kind=wx.ITEM_NORMAL, label=u'Cancelar Altera\\xe7\\xf5es',\r\n longHelp=u'Cancelar Altera\\xe7\\xf5es', shortHelp=u'Cancelar')\r\n self.Bind(wx.EVT_TOOL, self.OnToolBar1ToolscancelarTool,\r\n id=wxID_GERENCIARCARGOSTOOLBAR1TOOLSCANCELAR)\r\n self.Bind(wx.EVT_TOOL, self.OnToolBar1ToolssalvarTool,\r\n id=wxID_GERENCIARCARGOSTOOLBAR1TOOLSSALVAR)\r\n\r\n parent.Realize()\r\n\r\n def _init_ctrls(self, prnt):\r\n # generated method, don't edit\r\n wx.Dialog.__init__(self, id=wxID_GERENCIARCARGOS,\r\n name=u'GerenciarCargos', parent=prnt, pos=wx.Point(394, 140),\r\n size=wx.Size(489, 372), style=wx.DEFAULT_DIALOG_STYLE,\r\n title=u'Gerenciar Cargos')\r\n self.SetClientSize(wx.Size(481, 339))\r\n self.SetBackgroundColour(wx.Colour(255, 255, 255))\r\n\r\n self.toolBar1 = wx.ToolBar(id=wxID_GERENCIARCARGOSTOOLBAR1,\r\n name='toolBar1', parent=self, pos=wx.Point(0, 0),\r\n size=wx.Size(481, 27), style=wx.TB_HORIZONTAL | wx.NO_BORDER)\r\n\r\n self.listBox1 = wx.ListBox(choices=[], id=wxID_GERENCIARCARGOSLISTBOX1,\r\n name='listBox1', parent=self, pos=wx.Point(16, 72),\r\n size=wx.Size(448, 248), style=0)\r\n self.listBox1.Bind(wx.EVT_LISTBOX_DCLICK, self.OnListBox1ListboxDclick,\r\n id=wxID_GERENCIARCARGOSLISTBOX1)\r\n\r\n self.textCtrlAdd = wx.TextCtrl(id=wxID_GERENCIARCARGOSTEXTCTRLADD,\r\n name=u'textCtrlAdd', parent=self, pos=wx.Point(16, 40),\r\n size=wx.Size(360, 24), style=0, value=u'')\r\n self.textCtrlAdd.Bind(wx.EVT_TEXT_ENTER, self.OnTextCtrlAddTextEnter,\r\n id=wxID_GERENCIARCARGOSTEXTCTRLADD)\r\n\r\n self.buttonAdd = wx.Button(id=wxID_GERENCIARCARGOSBUTTONADD,\r\n label=u'Adicionar', name=u'buttonAdd', parent=self,\r\n pos=wx.Point(384, 40), size=wx.Size(75, 23), style=0)\r\n self.buttonAdd.Bind(wx.EVT_BUTTON, self.OnButtonAddButton,\r\n id=wxID_GERENCIARCARGOSBUTTONADD)\r\n\r\n self._init_coll_toolBar1_Tools(self.toolBar1)\r\n\r\n def __init__(self, parent):\r\n self._init_ctrls(parent)\r\n self.__carregarCargosListBox__()\r\n \r\n def __carregarCargosListBox__(self):\r\n controller = FuncionarioController()\r\n self.listaCargos = controller.pesquisarCargos()\r\n for i in self.listaCargos:\r\n self.listBox1.Append(i.getDescricao())\r\n \r\n\r\n def OnToolBar1ToolscancelarTool(self, event):\r\n self.Close()\r\n\r\n#===============================================================================\r\n# Salvar os cargos atuais da listCtrl\r\n#===============================================================================\r\n def OnToolBar1ToolssalvarTool(self, event):\r\n novaListaCargos = [] \r\n for i in self.listBox1.GetItems():\r\n cargo = Cargo(descricao = i)\r\n novaListaCargos.append(cargo)\r\n \r\n controller = FuncionarioController()\r\n cargosSalvos = controller.salvarCargos(novaListaCargos)\r\n if cargosSalvos == True:\r\n wx.MessageBox(\"Cargos Salvos com Sucesso!\",\"Cadastro de cargos\",wx.OK)\r\n self.Close()\r\n elif cargosSalvos == False:\r\n wx.MessageBox(\"Cargos não salvos\",\"Cadastro de cargos\",wx.ICON_ERROR)\r\n else:\r\n wx.MessageBox(cargosSalvos,\"Cadastro de cargos\",wx.ICON_ERROR)\r\n self.Close()\r\n \r\n\r\n#===============================================================================\r\n# Adicionar Cargo a listCtrl de cargos\r\n#===============================================================================\r\n def OnButtonAddButton(self, event):\r\n nomeCargo = self.textCtrlAdd.GetValue()\r\n #Valida o nome do cargo\r\n while \" \" in nomeCargo:\r\n nomeCargo = nomeCargo.replace(\" \",\" \")\r\n if len(nomeCargo) < 2:\r\n wx.MessageBox(\"Favor preencher o campo - CARGO - corretamente\",\"Cadastro de Cargos\",wx.ICON_EXCLAMATION)\r\n self.textCtrlAdd.SetFocus()\r\n self.textCtrlAdd.SetSelection(-1,-1)\r\n return False\r\n nomeAux = nomeCargo.replace(' ','')\r\n if not nomeAux.isalpha():\r\n wx.MessageBox(\"Favor preencher o campo - CARGO - corretamente\",\"Cadastro de Cargos\",wx.ICON_EXCLAMATION)\r\n self.textCtrlAdd.SetFocus()\r\n self.textCtrlAdd.SetSelection(-1,-1)\r\n return False\r\n \r\n self.listBox1.Append(nomeCargo)\r\n self.textCtrlAdd.SetFocus()\r\n self.textCtrlAdd.SetSelection(-1,-1)\r\n\r\n def OnTextCtrlAddTextEnter(self, event):\r\n self.OnTextCtrlAddTextEnter(event)\r\n\r\n#===============================================================================\r\n# Remoção de Cargo\r\n#===============================================================================\r\n def OnListBox1ListboxDclick(self, event):\r\n pos = self.listBox1.GetSelection()\r\n cargo = self.listaCargos[pos]\r\n deletarCargo = wx.MessageBox(\"Você tem certeza que deseja excluir o cargo:\\n\" + cargo.getDescricao(),\"Deletar Cargo\" ,wx.YES_NO)\r\n if deletarCargo == wx.YES:\r\n self.listaCargos.remove(cargo)\r\n self.listBox1.Delete(pos)\r\n \r\n \r\n","sub_path":"Src/Apresentacao/Dialog_GerenciarCargos.py","file_name":"Dialog_GerenciarCargos.py","file_ext":"py","file_size_in_byte":6693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"63382215","text":"from fractions import Fraction\nimport cv2\nimport numpy as np\nfrom copy import copy, deepcopy\n\nimg = 'Horizontal_motion_blurred_image.jpg'\nblur_img = cv2.imread(img)\nimg_gray = cv2.cvtColor(blur_img, cv2.COLOR_BGR2GRAY)\nprint('origin_image = \\n', img_gray)\n\nH, W = img_gray.shape\n\nimg_vector = np.zeros((H * W, 1))\nfor i in range(H):\n for j in range(W):\n img_vector[j + i * W] = img_gray[i, j]\nprint('img vector = \\n', img_vector)\n\n\ndef swap(u, r, p): # check element U[i,i]\n temp = np.zeros((1, H * W), dtype=float)\n temp_p = np.zeros((1, H * W), dtype=float)\n counter = r + 1\n\n while True:\n if counter == H*W:\n break\n if u[counter, r] == 0:\n counter += 1\n continue\n else:\n break\n # swap U[i] with U[counter]\n for a in range(H * W):\n temp[0, a] = u[r, a]\n temp_p[0, a] = p[r, a]\n\n u[r, a] = u[counter, a]\n p[r, a] = p[counter, a]\n\n u[counter, a] = temp[0, a]\n p[counter, a] = temp_p[0, a]\n return u\n\n\n# LU decomposition\n\n# starting from gauss elimination for A to get U\n\n# with permutation matrix P indicated the row swapping\nP = np.eye(H * W, dtype=float)\n\n# get motion matrix A\nmotion_matrix = np.zeros((H * W, H * W), dtype=float)\nfor i in range(H * W):\n for j in range(H * W):\n if (j - i == 0) or (j - i == 1) or (i - j == 1):\n motion_matrix[i, j] = 1\nprint('motion_matrix = \\n', motion_matrix)\n\n# gauss elimination in A to get U\nU = deepcopy(motion_matrix)\nfor i in range(H * W):\n for j in range(i + 1, H * W):\n # make sure U[i,i] != 0 and swap row in this case\n if U[i, i] == 0:\n # swap row function here with permutation matrix\n swap(U, i, P)\n if U[j, i] != 0:\n # do the subtraction\n fraction = float(U[j, i] / U[i, i])\n for k in range(H * W):\n # subtract the row\n U[j, k] = float(U[j, k]) - fraction * float(U[i, k])\n\nprint('U matrix = \\n', U)\nprint('P matrix = \\n', P)\nP_times_A = np.dot(P, motion_matrix)\nprint('PA = UL = \\n', P_times_A)\n\n# get matrix L through LU = PA\n# build matrix L\n# FORMULA METHOD\n# get matrix L through formula using the swapped A matrix\nL_FORMULA = np.eye(H * W, dtype=float)\n\n# gauss elimination without swapping and should get the same answer\nU2 = deepcopy(P_times_A)\n\n\ndef get_l(upper, row):\n for r in range(row + 1, H * W):\n L_FORMULA[r, row] = upper[r, row] / upper[row, row]\n return L_FORMULA\n\n\nfor i in range(H * W):\n # Get L matrix here\n get_l(U2, i)\n for j in range(i + 1, H * W):\n # make sure U[i,i] != 0 and swap row in this case\n if U2[j, i] != 0:\n # do the subtraction\n fraction = float(U2[j, i] / U2[i, i])\n for k in range(H * W):\n # subtract the row\n U2[j, k] = U2[j, k] - fraction * U2[i, k]\n\nprint('U matrix using swapped PA = \\n', U2)\nprint('L matrix (FORMULA METHOD) = \\n', L_FORMULA)\n\n# get swapped D matrix\n\nprint('result matrix = \\n', img_vector)\nB_swapped = np.dot(P, img_vector)\nprint('swapped result matrix B = \\n', B_swapped)\n\n# we know that L*D = B_swapped\nD = np.zeros((H * W, 1), dtype=float)\n\n\ndef d_sum(row, lower):\n sum_d = 0.0\n for r in range(row):\n sum_d += lower[row, r] * D[r, 0]\n return sum_d\n\n\nfor i in range(H * W):\n D[i, 0] = B_swapped[i, 0] - d_sum(i, L_FORMULA)\n\nprint('matrix D = \\n', D)\n\n# get X matrix sloven\n# using UX = D\n\nX = np.zeros((H * W, 1), dtype=float)\n\n\ndef x_sum(row, upper):\n sum_x = 0.0\n for r in range(H * W - row - 1):\n sum_x += upper[row, H * W - 1 - r] * X[H * W - 1 - r, 0]\n return sum_x\n\n\nfor i in range(H * W - 1, -1, -1):\n X[i, 0] = (D[i, 0] - x_sum(i, U2)) / U2[i, i]\n\nprint('matrix X = \\n', X)\n\n# convert back to matrix\nimg_Deblurred = np.zeros((H, W), dtype=float)\nfor i in range(H):\n for j in range(W):\n img_Deblurred[i, j] = X[W * i + j, 0]\n if img_Deblurred[i, j] > 255:\n img_Deblurred[i, j] = 255\n elif img_Deblurred[i, j] < 0:\n img_Deblurred[i, j] = 0\n\nprint('recovered Horizontal motion image = \\n', img_Deblurred)\n\ncv2.imwrite('Horizontal_motion_Deblurred_image.jpg', img_Deblurred)\n","sub_path":"Project_2/Proj2_python/Horizontal_Deblur.py","file_name":"Horizontal_Deblur.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"565847335","text":"from Token import keyWordIds\nfrom Stack import Stack\nfrom SemanticAnalysis import SemanticAnalysis\nfrom Paths import actionTablePath, auxTablePath\n\n\nclass SyntaxAnalysis():\n\n def __init__(self, lexicalAnalisys):\n \"\"\"\n\n :param lexicalAnalisys:\n :return:\n \"\"\"\n self.la = lexicalAnalisys\n self.stack = Stack()\n self.action = []\n self.len = []\n self.left = []\n\n self.readActionTable()\n self.readAuxTable()\n\n self.sa = SemanticAnalysis()\n\n def check(self):\n \"\"\"\n\n :return:\n \"\"\"\n q = 0\n self.stack.push(0)\n currentToken = self.la.nextToken()\n aTemp, _, _ = currentToken\n a = keyWordIds[aTemp]\n\n while True:\n p = self.action[q][a]\n # print(p, '=', q, a)\n if self.IS_SHIFT(p):\n self.stack.push(p)\n aTemp, _, _ = self.la.nextToken()\n a = keyWordIds[aTemp]\n elif self.IS_REDUCTION(p):\n r = self.RULE(p)\n self.stack.pop(self.len[r])\n self.stack.push(self.action[self.stack.top()][self.left[r]])\n self.sa.semantics(r, currentToken)\n else:\n print('SyntaxError') # Erro de Sintaxe\n raise Exception('SyntaxError')\n q = self.stack.top()\n\n if q == self.FINAL():\n print('Done!')\n break\n\n def readActionTable(self):\n \"\"\"\n\n Note: As there is a comma in the grammar, a CSV file would cause trouble\n We replaced the comma for a character that doesn't exist in the grammar\n such as #\n\n\n :return:\n \"\"\"\n # Read file\n f = open(actionTablePath, 'U')\n actionTemp = []\n for line in f.readlines():\n line = line.strip('\\n')\n actionTemp.append(line.split('#'))\n\n # Loop aux variables\n line0 = actionTemp[0]\n lenX = len(actionTemp)\n lenY = len(line0)\n\n # Change blank values to 0\n for i in range(1, lenX):\n for j in range(1, lenY):\n if actionTemp[i][j] == '':\n actionTemp[i][j] = 0\n\n # Change table to Book\n maxPreviousLine = 0\n maxCurrentLine = 0\n max = 0\n\n for i in range(1, lenX):\n maxPreviousLine = maxCurrentLine\n maxCurrentLine = 0\n for j in range(1, lenY):\n if actionTemp[i][j] == 'a':\n max = maxPreviousLine\n elif maxCurrentLine < self.myInt(actionTemp[i][j]):\n maxCurrentLine = self.myInt(actionTemp[i][j])\n\n for i in range(1, lenX):\n for j in range(0, lenY):\n if actionTemp[i][j] == 'a':\n actionTemp[i][j] = 1 + max\n elif self.myInt(actionTemp[i][j]) > max:\n actionTemp[i][j] = 1 + self.myInt(actionTemp[i][j])\n # elif self.myInt(actionTemp[i][j]) < (-1)*max:\n # actionTemp[i][j] = self.myInt(actionTemp[i][j]) - 1\n else:\n actionTemp[i][j] = self.myInt(actionTemp[i][j])\n\n newLine = []\n for i in range(lenY):\n newLine.append(0)\n newLine[0] = max + 1\n\n actionTemp = actionTemp[:max+2] + [newLine] + actionTemp[max+2:]\n\n # Create action table\n newLine = {}\n for i in range(1, lenX+1):\n for j in range(1, lenY):\n newLine[line0[j]] = actionTemp[i][j]\n self.action.append(newLine)\n newLine = {}\n\n # self.printActionTable()\n\n def myInt(self, num):\n try:\n return int(num)\n except ValueError:\n return 0\n\n\n def printActionTable(self):\n print('===== Action table ====')\n for line in self.action:\n print(line)\n print('=======================')\n\n def readAuxTable(self):\n \"\"\"\n\n :return:\n \"\"\"\n # Read file\n f = open(auxTablePath, 'U')\n for line in f.readlines():\n line = line.strip('\\n')\n self.len.append(line.split(',')[1])\n self.left.append(line.split(',')[2])\n\n def IS_SHIFT(self, p):\n return int(p) > 0\n\n def IS_REDUCTION(self, p):\n return int(p) < 0\n\n def RULE(self, p):\n return (-1)*int(p)\n\n def FINAL(self):\n return len(self.len) - 1","sub_path":"SyntaxAnalysis.py","file_name":"SyntaxAnalysis.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"528196989","text":"\n\nfrom xai.brain.wordbase.verbs._retail import _RETAIL\n\n#calss header\nclass _RETAILING(_RETAIL, ):\n\tdef __init__(self,): \n\t\t_RETAIL.__init__(self)\n\t\tself.name = \"RETAILING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"retail\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_retailing.py","file_name":"_retailing.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"226407745","text":"from django.contrib import admin\nfrom django.urls import path ,include\nfrom music import views\n\nurlpatterns = [\n path('',views.homepage,name='homepage'),\n path('mymusic/',views.mymusic,name='mymusic'),\n path('lectures/',views.lecture,name='mymusic'),\n path('channel/',views.favchannel,name='channel'),\n path('signin/',views.ourmember,name='signin'),\n path('college/',views.college,name='college'),\n path('pythonlecture/',views.python,name='pythonlecture'),\n path('lectures/gamedev/',views.game,name='gamedev'),\n path('lectures/discord/',views.discord,name='discord')\n]\n","sub_path":"music/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"471338074","text":"from itertools import chain\n\nfrom wagtail.wagtailcore.fields import StreamField\nfrom wagtail.wagtailadmin.edit_handlers import TabbedInterface, ObjectList, \\\n StreamFieldPanel\nfrom wagtail.wagtailcore import blocks\nfrom wagtail.wagtailimages.blocks import ImageChooserBlock\nfrom wagtail.contrib.table_block.blocks import TableBlock\n\nfrom .base import CFGOVPage\nfrom ..atomic_elements import molecules, organisms\nfrom ..util import filterable_list, util\n\n\nclass SublandingPage(CFGOVPage):\n header = StreamField([\n ('hero', molecules.Hero()),\n ], blank=True)\n content = StreamField([\n ('text_introduction', molecules.TextIntroduction()),\n ('featured_content', molecules.FeaturedContent()),\n ('image_text_25_75_group', organisms.ImageText2575Group()),\n ('image_text_50_50_group', organisms.ImageText5050Group()),\n ('full_width_text', organisms.FullWidthText()),\n ('half_width_link_blob_group', organisms.HalfWidthLinkBlobGroup()),\n ('post_preview_snapshot', organisms.PostPreviewSnapshot()),\n ('well', organisms.Well()),\n ('table', organisms.Table(editable=False)),\n ('table_block', TableBlock(table_options={'renderer':'html'})),\n ('contact', organisms.MainContactInfo()),\n ('formfield_with_button', molecules.FormFieldWithButton()),\n ('reg_comment', organisms.RegComment()),\n ], blank=True)\n sidebar_breakout = StreamField([\n ('slug', blocks.CharBlock(icon='title')),\n ('heading', blocks.CharBlock(icon='title')),\n ('paragraph', blocks.RichTextBlock(icon='edit')),\n ('breakout_image', blocks.StructBlock([\n ('image', ImageChooserBlock()),\n ('is_round', blocks.BooleanBlock(required=False, default=True,\n label='Round?')),\n ('icon', blocks.CharBlock(help_text='Enter icon class name.')),\n ('heading', blocks.CharBlock(required=False, label='Introduction Heading')),\n ('body', blocks.TextBlock(required=False, label='Introduction Body')),\n ], heading='Breakout Image', icon='image')),\n ('related_posts', organisms.RelatedPosts()),\n ], blank=True)\n\n # General content tab\n content_panels = CFGOVPage.content_panels + [\n StreamFieldPanel('header'),\n StreamFieldPanel('content'),\n ]\n\n sidebar_panels = [\n StreamFieldPanel('sidebar_breakout'),\n ] + CFGOVPage.sidefoot_panels\n\n # Tab handler interface\n edit_handler = TabbedInterface([\n ObjectList(content_panels, heading='General Content'),\n ObjectList(sidebar_panels, heading='Sidebar'),\n ObjectList(CFGOVPage.settings_panels, heading='Configuration'),\n ])\n\n template = 'sublanding-page/index.html'\n\n def get_browsefilterable_posts(self, request, limit):\n filter_pages = [p.specific for p in self.get_appropriate_descendants(request.site.hostname)\n if 'FilterablePage' in p.specific_class.__name__ and 'archive' not in p.title.lower()]\n filtered_controls = {}\n for page in filter_pages:\n id = str(util.get_form_id(page))\n if id not in filtered_controls.keys():\n filtered_controls.update({id: []})\n form_class = page.get_form_class()\n posts = page.get_page_set(form_class(parent=page, hostname=request.site.hostname), request.site.hostname)\n if filtered_controls[id]:\n filtered_controls[id] += posts\n else:\n filtered_controls[id] = posts\n posts_tuple_list = [(id, post) for id, posts in filtered_controls.iteritems() for post in posts]\n posts = sorted(posts_tuple_list, key=lambda p: p[1].date_published, reverse=True)[:limit]\n return posts\n","sub_path":"cfgov/v1/models/sublanding_page.py","file_name":"sublanding_page.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"610962967","text":"import os\nfrom options import options\nfrom flask import Flask, request, jsonify\nfrom flask_cors import CORS\nfrom operations import FaceOperations\nfrom database import FaceDatabase\nfrom storage import FaceStorage\nfrom uuid import uuid4\nfrom routes.faces_create import faces_create\nfrom routes.faces_get import faces_get\nfrom routes.faces_relation import faces_relation\nfrom routes.faces_identify import faces_identify\nfrom routes.original_get import original_get\nfrom routes.person_get import person_get\n\n\nclass InvalidUsage(Exception):\n status_code = 400\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv['message'] = self.message\n return rv\n\n\napp = Flask(__name__, static_url_path='/')\nCORS(app)\n\n\n@app.route('/uploads/')\ndef send_files(path):\n return app.send_static_file(os.path.join('uploads', path))\n\n\nserver_options = options.get('server', {})\nface_ops = FaceOperations(**options.get('face', {}))\ndatabase = FaceDatabase(**options.get('database', {}))\nstorage = FaceStorage(**options.get('storage', {}))\n\n\n@app.errorhandler(InvalidUsage)\ndef handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n\ndef get_file_extension(file_path):\n split = file_path.rsplit('.', 1)\n return None if len(split) < 2 else split[1].lower()\n\n\n@app.route('/faces/create', methods=['POST'])\ndef faces_create_route():\n image_file = request.files['image']\n if image_file is None:\n raise InvalidUsage(\"No image is sent with the request.\")\n\n extension = get_file_extension(image_file.filename)\n if extension not in [\"png\", \"jpeg\", \"jpg\"]:\n raise InvalidUsage(\"Expected to find png, jpeg or jpg file.\")\n\n temp_path = \"/tmp/\" + str(uuid4()) + \".\" + extension\n image_file.save(temp_path)\n result = faces_create(storage, face_ops, database, temp_path)\n\n if result is None:\n raise InvalidUsage(\"An invalid response returned by the server. Probably no face detected.\", 406)\n return jsonify(result)\n\n\n@app.route('/faces/', methods=['GET'])\ndef faces_get_route(face_id):\n result = faces_get(storage, database, face_id)\n if result is None:\n raise InvalidUsage(\"Wrong id number.\", 404)\n return jsonify(result)\n\n\n@app.route('/originals/', methods=['GET'])\ndef originals_get_route(original_id):\n result = original_get(storage, database, original_id)\n if result is None:\n raise InvalidUsage(\"Wrong id number.\", 404)\n return jsonify(result)\n\n\n@app.route('/person/', methods=['GET'])\ndef person_get_route(person_id):\n result = person_get(storage, database, person_id)\n if result is None:\n raise InvalidUsage(\"Wrong id number.\", 404)\n return jsonify(result)\n\n\n@app.route('/faces/relation', methods=['POST'])\ndef faces_relation_route():\n data = request.get_json()\n if data is None or 'faces' not in data:\n raise InvalidUsage(\"This endpoint requires data in json format to be posted. With faces key.\")\n faces = data.get('faces', [])\n person = data.get('person', None)\n\n return jsonify(faces_relation(database, faces, person))\n\n\n@app.route('/faces/identify', methods=['POST'])\ndef faces_identify_route():\n data = request.get_json()\n if data is None or 'faces' not in data:\n raise InvalidUsage(\"This endpoint requires data in json format to be posted. With faces key.\")\n faces = data.get('faces', [])\n grouping = data.get('grouping', True)\n\n return jsonify(faces_identify(database, face_ops, faces, grouping is not False))\n\n\nif __name__ == '__main__':\n app.run(**server_options)\n","sub_path":"faces.py","file_name":"faces.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"82342828","text":"#TC: O(n)\r\n#SC: O(h), height of tree\r\n#DFS recursive solution\r\n\r\n# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, val=0, left=None, right=None):\r\n# self.val = val\r\n# self.left = left\r\n# self.right = right\r\nclass Solution:\r\n def largestValues(self, root: Optional[TreeNode]) -> List[int]:\r\n \r\n def helper(node, depth):\r\n if node:\r\n if depth in dic:\r\n dic[depth] = max(dic[depth], node.val)\r\n else:\r\n dic[depth] = node.val\r\n \r\n helper(node.left, depth+1)\r\n helper(node.right,depth+1)\r\n \r\n dic = {}\r\n helper(root,0)\r\n return dic.values()","sub_path":"515. Find Largest Value in Each Tree Row.py","file_name":"515. Find Largest Value in Each Tree Row.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"332168942","text":"from __future__ import division\nfrom __future__ import print_function\nimport os, sys\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.simplefilter(action='ignore', category=RuntimeWarning)\nwarnings.simplefilter(action='ignore', category=UserWarning)\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))\n# For replicating the experiments\nSEED = 42\nimport argparse\nimport time\nimport random\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\n\nnp.random.seed(SEED)\ntorch.manual_seed(SEED)\nfrom torch import optim\nimport torch.nn.functional as F\nfrom .models import LinTrans, LogReg\nfrom .optimizer import loss_function\nfrom .utils import *\nfrom utils.utils import load_data, preprocess_graph, mask_test_edges\nfrom sklearn.cluster import SpectralClustering, KMeans\nfrom tqdm import tqdm\nfrom sklearn.preprocessing import normalize, MinMaxScaler\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gnnlayers', type=int, default=1, help=\"Number of gnn layers\")\nparser.add_argument('--linlayers', type=int, default=1, help=\"Number of hidden layers\")\nparser.add_argument('--epochs', type=int, default=400, help='Number of epochs to train.')\nparser.add_argument('--dims', type=int, default=[500], help='Number of units in hidden layer 1.')\nparser.add_argument('--lr', type=float, default=0.001, help='Initial learning rate.')\nparser.add_argument('--upth_st', type=float, default=0.0011, help='Upper Threshold start.')\nparser.add_argument('--lowth_st', type=float, default=0.1, help='Lower Threshold start.')\nparser.add_argument('--upth_ed', type=float, default=0.001, help='Upper Threshold end.')\nparser.add_argument('--lowth_ed', type=float, default=0.5, help='Lower Threshold end.')\nparser.add_argument('--upd', type=int, default=10, help='Update epoch.')\nparser.add_argument('--bs', type=int, default=10000, help='Batchsize.')\nparser.add_argument('--dataset', type=str, default='wiki', help='type of dataset.')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='Disables CUDA training.')\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nif args.cuda is True:\n print('Using GPU')\n torch.cuda.manual_seed(SEED)\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"5\"\n\ndef clustering(Cluster, feature, true_labels):\n f_adj = np.matmul(feature, np.transpose(feature))\n predict_labels = Cluster.fit_predict(f_adj)\n \n cm = clustering_metrics(true_labels, predict_labels)\n db = -metrics.davies_bouldin_score(f_adj, predict_labels)\n acc, nmi, adj = cm.evaluationClusterModelFromLabel(tqdm)\n\n return db, acc, nmi, adj\n\ndef update_similarity(z, upper_threshold, lower_treshold, pos_num, neg_num):\n f_adj = np.matmul(z, np.transpose(z))\n cosine = f_adj\n cosine = cosine.reshape([-1,])\n pos_num = round(upper_threshold * len(cosine))\n neg_num = round((1-lower_treshold) * len(cosine))\n \n pos_inds = np.argpartition(-cosine, pos_num)[:pos_num]\n neg_inds = np.argpartition(cosine, neg_num)[:neg_num]\n \n return np.array(pos_inds), np.array(neg_inds)\n\ndef update_threshold(upper_threshold, lower_treshold, up_eta, low_eta):\n upth = upper_threshold + up_eta\n lowth = lower_treshold + low_eta\n return upth, lowth\n\n\ndef gae_for(args):\n print(\"Using {} dataset\".format(args.dataset))\n if args.dataset == 'cora':\n n_clusters = 7\n Cluster = SpectralClustering(n_clusters=n_clusters, affinity = 'precomputed', random_state=0)\n elif args.dataset == 'citeseer':\n n_clusters = 6\n Cluster = SpectralClustering(n_clusters=n_clusters, affinity = 'precomputed', random_state=0)\n elif args.dataset == 'pubmed':\n n_clusters = 3\n Cluster = SpectralClustering(n_clusters=n_clusters, affinity = 'precomputed', random_state=0)\n elif args.dataset == 'wiki':\n n_clusters = 17\n Cluster = SpectralClustering(n_clusters=n_clusters, affinity = 'precomputed', random_state=0)\n \n adj, features, true_labels, idx_train, idx_val, idx_test = load_data(args.dataset)\n n_nodes, feat_dim = features.shape\n dims = [feat_dim] + args.dims\n \n layers = args.linlayers\n # Store original adjacency matrix (without diagonal entries) for later\n \n adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)\n adj.eliminate_zeros()\n adj_orig = adj\n\n adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj)\n adj = adj_train\n n = adj.shape[0]\n\n adj_norm_s = preprocess_graph(adj, args.gnnlayers, norm='sym', renorm=True)\n sm_fea_s = sp.csr_matrix(features).toarray()\n \n print('Laplacian Smoothing...')\n for a in adj_norm_s:\n sm_fea_s = a.dot(sm_fea_s)\n adj_1st = (adj + sp.eye(n)).toarray()\n\n adj_label = torch.FloatTensor(adj_1st)\n \n model = LinTrans(layers, dims)\n \n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n \n sm_fea_s = torch.FloatTensor(sm_fea_s)\n adj_label = adj_label.reshape([-1,])\n\n if args.cuda:\n model.cuda()\n inx = sm_fea_s.cuda()\n adj_label = adj_label.cuda()\n\n pos_num = len(adj.indices)\n neg_num = n_nodes*n_nodes-pos_num\n\n up_eta = (args.upth_ed - args.upth_st) / (args.epochs/args.upd)\n low_eta = (args.lowth_ed - args.lowth_st) / (args.epochs/args.upd)\n\n pos_inds, neg_inds = update_similarity(normalize(sm_fea_s.numpy()), args.upth_st, args.lowth_st, pos_num, neg_num)\n upth, lowth = update_threshold(args.upth_st, args.lowth_st, up_eta, low_eta)\n\n bs = min(args.bs, len(pos_inds))\n length = len(pos_inds)\n \n pos_inds_cuda = torch.LongTensor(pos_inds).cuda()\n best_lp = 0.\n print('Start Training...')\n for epoch in tqdm(range(args.epochs)):\n \n st, ed = 0, bs\n batch_num = 0\n model.train()\n length = len(pos_inds)\n \n while ( ed <= length ):\n sampled_neg = torch.LongTensor(np.random.choice(neg_inds, size=ed-st)).cuda()\n sampled_inds = torch.cat((pos_inds_cuda[st:ed], sampled_neg), 0)\n t = time.time()\n optimizer.zero_grad()\n xind = sampled_inds // n_nodes\n yind = sampled_inds % n_nodes\n x = torch.index_select(inx, 0, xind)\n y = torch.index_select(inx, 0, yind)\n zx = model(x)\n zy = model(y)\n batch_label = torch.cat((torch.ones(ed-st), torch.zeros(ed-st))).cuda()\n batch_pred = model.dcs(zx, zy)\n loss = loss_function(adj_preds=batch_pred, adj_labels=batch_label, n_nodes=ed-st)\n \n loss.backward()\n cur_loss = loss.item()\n optimizer.step()\n \n st = ed\n batch_num += 1\n if ed < length and ed + bs >= length:\n ed += length - ed\n else:\n ed += bs\n\n \n if (epoch + 1) % args.upd == 0:\n model.eval()\n mu = model(inx)\n hidden_emb = mu.cpu().data.numpy()\n upth, lowth = update_threshold(upth, lowth, up_eta, low_eta)\n pos_inds, neg_inds = update_similarity(hidden_emb, upth, lowth, pos_num, neg_num)\n bs = min(args.bs, len(pos_inds))\n pos_inds_cuda = torch.LongTensor(pos_inds).cuda()\n val_auc, val_ap = get_roc_score(hidden_emb, adj_orig, val_edges, val_edges_false)\n if val_auc + val_ap >= best_lp:\n best_lp = val_auc + val_ap\n best_emb = hidden_emb\n tqdm.write(\"Epoch: {}, train_loss_gae={:.5f}, time={:.5f}\".format(\n epoch + 1, cur_loss, time.time() - t))\n \n \n \n tqdm.write(\"Optimization Finished!\")\n auc_score, ap_score = get_roc_score(best_emb, adj_orig, test_edges, test_edges_false)\n tqdm.write('Test AUC score: ' + str(auc_score))\n tqdm.write('Test AP score: ' + str(ap_score))\n \n\nif __name__ == '__main__':\n gae_for(args)","sub_path":"algorithms/age/link_pred.py","file_name":"link_pred.py","file_ext":"py","file_size_in_byte":8107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"417662833","text":"from urllib.request import urlopen\nfrom antigate import AntiGate, AntiGateError\n\n_key = open('antigate.txt').read().strip()\n_a = None\n\ndef solve(url, timeout=10):\n try:\n data = urlopen(url, timeout=timeout).read()\n except Exception:\n print('[ERROR] captcha timeout')\n return None\n with open('captcha.png', 'wb') as f:\n f.write(data)\n global _a\n try:\n _a = AntiGate(_key, 'captcha.png')\n except AntiGateError as e:\n print(e)\n return None\n return str(_a)\n","sub_path":"captcha.py","file_name":"captcha.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"59875607","text":"\"\"\"\nhttps://leetcode.com/problems/move-zeroes/\n\nGiven an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-zero elements.\n\nExample:\n\nInput: [0,1,0,3,12]\nOutput: [1,3,12,0,0]\nNote:\n\nYou must do this in-place without making a copy of the array.\nMinimize the total number of operations.\n\n\"\"\"\n\n\nfrom typing import List\n\nclass Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n p = 0\n for i, e in enumerate(nums):\n if e != 0:\n nums[p] = e\n p += 1\n while p < len(nums):\n nums[p] = 0\n p += 1\n\n\nif __name__ == '__main__':\n nums = [0,1,0,3,12]\n print(nums)\n Solution().moveZeroes(nums)\n print(nums)\n","sub_path":"python/283.py","file_name":"283.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"589073559","text":"import os\nimport pickle\nfrom tqdm import tqdm, trange\nimport numpy as np\nimport torch\nimport torchvision as tv\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom config import lr_size, hr_size\nfrom utils import Div2KDataset\nfrom models import Generator, Discriminator\n\nroot_train = \"data/DIV2K_train_HR/\"\nroot_val = \"data/DIV2K_valid_HR/\"\ngen_dir = \"model/mse/generator.pt\"\ndisc_dir = \"model/mse/discriminator.pt\"\ngen_optim_dir = \"model/mse/gen_optim.pt\"\ndisc_optim_dir = \"model/mse/disc_optim.pt\"\ngs_dir = \"summary/mse_gs.pkl\"\nlog_dir = \"summary/\"\n\ngs = 1\nbatch_size = 16\nwriter = SummaryWriter(log_dir=log_dir)\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ngen = Generator().to(device)\ndisc = Discriminator().to(device)\ngen_optim = torch.optim.Adam(gen.parameters(), lr=10e-4, betas=(.9, .999))\ndisc_optim = torch.optim.Adam(disc.parameters(), lr=10e-4, betas=(.9, .999))\nmse_criterion = torch.nn.MSELoss()\nbce_criterion = torch.nn.BCELoss()\n\nretrain = False\nif retrain:\n print(\"Resuming training...\")\n if os.path.exists(gs_dir):\n gs = pickle.load(open(gs_dir, \"rb\"))\n else:\n raise FileNotFoundError(\"Global step not found.\")\n if os.path.exists(gen_dir):\n state_dict = torch.load(gen_dir, map_location=device)\n gen.load_state_dict(state_dict)\n else:\n raise FileNotFoundError(\"Generator model not found.\")\n if os.path.exists(disc_dir):\n state_dict = torch.load(disc_dir, map_location=device)\n disc.load_state_dict(state_dict)\n else:\n raise FileNotFoundError(\"Discriminator model not found.\")\n if os.path.exists(gen_optim_dir):\n state_dict = torch.load(gen_optim_dir, map_location=device)\n gen_optim.load_state_dict(state_dict)\n else:\n raise FileNotFoundError(\"Generator optimizer not found.\")\n if os.path.exists(disc_optim_dir):\n state_dict = torch.load(disc_optim_dir, map_location=device)\n disc_optim.load_state_dict(state_dict)\n else:\n raise FileNotFoundError(\"Discriminator optimizer not found.\")\n del state_dict\n\ndef train_disc(lr_img, labels):\n disc_optim.zero_grad()\n sr_img = gen(lr_img)\n pred = disc(sr_img)\n loss = bce_criterion(pred, labels)\n loss.backward()\n disc_optim.step()\n return loss.item()\n\ndef train_gen(lr_img, hr_img, labels):\n gen_optim.zero_grad()\n disc_optim.zero_grad()\n sr_img = gen(lr_img)\n pred = disc(sr_img)\n adv_loss = bce_criterion(pred, labels)\n mse_loss = mse_criterion(sr_img, hr_img)\n loss = mse_loss + 10e-3 * adv_loss\n loss.backward()\n gen_optim.step()\n return loss.item()\n\nprint(\"Generating dataset...\")\nnum_epochs = 100000\nds_train = Div2KDataset(root_train, num_epochs)\ndl_train = torch.utils.data.DataLoader(ds_train, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=0)\nds_val = Div2KDataset(root_val, num_epochs)\ndl_val = torch.utils.data.DataLoader(ds_val, batch_size=4, shuffle=True, drop_last=True, num_workers=0)\n\nprint(\"Training started.\")\nfor lr_img, hr_img in tqdm(dl_train):\n lr_img = lr_img.to(device)\n hr_img = hr_img.to(device)\n # Train discriminator on real data\n disc_optim.zero_grad()\n labels = torch.ones(batch_size, 1).to(device)\n pred = disc(hr_img)\n loss = bce_criterion(pred, labels)\n loss.backward()\n disc_optim.step()\n loss = loss.item()\n writer.add_scalar(\"mse_training/loss/disc_loss_real\", loss, gs)\n # Train discriminator on fake data\n labels = torch.zeros(batch_size, 1).to(device)\n loss = train_disc(lr_img, labels)\n writer.add_scalar(\"mse_training/loss/disc_loss_fake\", loss, gs)\n # Train generator\n labels = torch.ones(batch_size, 1).to(device)\n loss = train_gen(lr_img, hr_img, labels)\n writer.add_scalar(\"mse_training/loss/gen_loss\", loss, gs)\n if (gs)%100 == 0:\n # Save state of training\n pickle.dump(gs, open(gs_dir, \"wb\"))\n torch.save(gen.state_dict(), gen_dir)\n torch.save(disc.state_dict(), disc_dir)\n torch.save(gen_optim.state_dict(), gen_optim_dir)\n torch.save(disc_optim.state_dict(), disc_optim_dir)\n # Visualize some generated images\n gen.eval()\n lr_img, hr_img = next(iter(dl_val))\n lr_img, hr_img = lr_img.to(device), hr_img.to(device)\n sr_img = gen(lr_img)\n # Scale the images to [0, 1]\n hr_img = hr_img * .5 + .5\n sr_img = sr_img * .5 + .5\n writer.add_image(\"mse_training/image/original_images\", tv.utils.make_grid(hr_img, nrow=2), 0)\n writer.add_image(\"mse_training/image/superresolved_images\", tv.utils.make_grid(sr_img, nrow=2), 0)\n gen.train()\n writer.flush()\n gs += 1\nwriter.flush()\nwriter.close()","sub_path":"mse_train.py","file_name":"mse_train.py","file_ext":"py","file_size_in_byte":4713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"448907720","text":"#!/usr/bin/env python\n# coding:utf-8\nfrom PyPDF2 import PdfFileMerger, PdfFileReader, PdfFileWriter\nimport os\nimport sys\nimport json\n\n\ndef add_bookmarks(path, file_dir):\n with open(file_dir + '\\\\bookmark.json', 'rb') as f:\n bookmarks = json.load(f)['Data']\n book = PdfFileReader(path)\n pdf = PdfFileWriter()\n pdf.cloneDocumentFromReader(book)\n for bookmark in bookmarks:\n try:\n pdf.addBookmark(bookmark['Title'], bookmark['Page'] - 1)\n except:\n break\n try:\n with open(path[0:path.rfind('.')] + '.bookmark.pdf', 'wb') as fout:\n pdf.write(fout)\n except FileNotFoundError:\n pass\n\n\ndef file_name_walk(file_dir):\n for root, dirs, files in os.walk(file_dir):\n if 'bookmark.json' in files:\n files.remove('bookmark.json')\n files.sort(key=lambda x: int(x[x.rfind('-') + 1:][:-4]))\n file_list = [file_dir + '\\\\' + file for file in files]\n merger = PdfFileMerger(strict=False)\n for pdf in file_list:\n merger.append(pdf)\n path = files[0][:files[0].rfind('-')] + '.pdf'\n merger.write(path)\n add_bookmarks(path, file_dir)\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n file_name_walk(sys.argv[1])\n","sub_path":"merge_pdf.py","file_name":"merge_pdf.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"274933398","text":"from collections import OrderedDict\n\nfrom cinch import db\nfrom cinch.models import STRING_LENGTH\n\n\njob_projects = db.Table(\n 'job_projects',\n db.Column('job_id', db.Integer, db.ForeignKey('jobs.id'),\n primary_key=True),\n db.Column('project_id', db.Integer, db.ForeignKey('projects.id'),\n primary_key=True),\n)\n\n\nclass Job(db.Model):\n __tablename__ = \"jobs\"\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(STRING_LENGTH), unique=True, nullable=False)\n projects = db.relationship('Project', secondary=job_projects)\n\n def __str__(self):\n return \"{} {}\".format(self.name, self.type_id)\n\n def ordered_projects(self):\n return sorted(list(self.projects), key=lambda p: p.name)\n\n\nbuild_commits = db.Table(\n 'build_commits',\n db.Column('build_id', db.Integer, db.ForeignKey('builds.id'),\n primary_key=True),\n db.Column('commit_sha', db.String(40), db.ForeignKey('commits.sha'),\n primary_key=True),\n)\n\n\nclass Build(db.Model):\n __tablename__ = \"builds\"\n\n id = db.Column(db.Integer, primary_key=True)\n build_number = db.Column(db.Integer)\n job_id = db.Column(db.Integer, db.ForeignKey('jobs.id'))\n success = db.Column(db.Boolean, nullable=True)\n status = db.Column(db.Text, nullable=True, default=\"\")\n\n job = db.relationship('Job', backref='builds')\n commits = db.relationship(\n 'Commit', secondary=build_commits, backref='builds')\n\n def __str__(self):\n return \"{}/{}\".format(self.job.name, self.build_number)\n\n def project_commits(self):\n commits = OrderedDict()\n for project in self.job.ordered_projects():\n commits[project.name] = None\n\n for commit in self.commits:\n commits[commit.project.name] = commit.sha\n\n return commits\n\n def matches_pull_request(self, pull_request):\n return (pull_request.head in self.commits)\n","sub_path":"cinch/jenkins/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"649565173","text":"from django.shortcuts import render\nfrom django.views import generic\nfrom .models import Article, Tag, TagAndArticleId\n# Create your views here.\ndef index(request):\n article = Article.objects.order_by('-id')[:150]\n tag = Tag.objects.order_by('-id')[:150]\n return render(request, 'b/index.html', {\n 'article':article,\n 'tag' : tag,\n })\n\ndef detail(request):\n article_id = request.GET['id']\n article = Article.objects.get(id=article_id)\n tag_id_list = TagAndArticleId.objects.filter(article_id=article_id).values_list('tag_id')\n tag = Tag.objects.filter(id__in=tag_id_list)\n return render(request, 'b/detail.html', {\n 'article' : article,\n 'tag' : tag\n })\n\ndef tagList(request):\n tag_id = request.GET['id']\n article_id_list = TagAndArticleId.objects.filter(tag_id=tag_id).values_list('article_id')\n article = Article.objects.filter(id__in=article_id_list).order_by('-id')[:150]\n\n tag = Tag.objects.order_by('-id')[:150]\n return render(request, 'b/index.html', {\n 'article':article,\n 'tag' : tag,\n })","sub_path":"django/mysite/b/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"7908559","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport PIL\nimport scipy.misc\nimport matplotlib.pyplot as plt\n\n\nfile = \"PMA1033319007U.001.gif\"\ndata = scipy.misc.imread(file)\nZ= np.transpose(data)\n\n# Sampling 60 points in both dimensions\nT = np.linspace(np.pi, np.pi * 2 + np.pi, np.size(Z[1,:]))\nR = np.linspace(0, 1.0, np.size(Z[:,1]))\n\n# Create a polar axes\nax = plt.subplot(111, projection='polar')\nc = ax.pcolor(T, R, Z,cmap='cool')\n# activate next line for smaller size\n# with lower quality\n#c.set_rasterized(True)\nax.axis('off')\nplt.title(file)\n\nplt.savefig(\"polarplot3.pdf\")\n","sub_path":"polarplot3.py","file_name":"polarplot3.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"93838977","text":"from channels.routing import route\nfrom collaborative_crm.consumers import *\n\nchannel_routing = [\n route('websocket.connect', updates_connect, path=r'^/updates/$'),\n route('websocket.disconnect', updates_disconnect, path=r'^/updates/$'),\n route('notify_user', notify_user),\n route('email_notification', email_notification),\n route('notify_company', notify_company),\n route('execute_scheduled_tasks', execute_scheduled_tasks),\n route('execute_scheduled_task', execute_scheduled_task),\n route('delete_scheduled_task', delete_scheduled_task),\n route('new_available_property', new_available_property),\n route('notify_added_to_event', notify_added_to_event),\n route('notify_removed_from_event', notify_removed_from_event),\n route('notify_event_edition', notify_event_edition),\n route('notify_event_completion', notify_event_completion),\n route('notify_event_cancellation', notify_event_cancellation),\n route('update_mercadolibre_data', update_mercadolibre_data),\n]\n","sub_path":"mussi/routing.py","file_name":"routing.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"468410520","text":"'''\nExp1. Test within-speaker variability\n\n2018-07-20\n2018-08-01\n- Feature extraction method was changed to my code\n'''\nimport ipdb as pdb\nimport os\nimport sys\nimport re\nimport textgrid\nimport glob\nimport numpy as np\nimport pandas as pd\nimport librosa\nfrom tqdm import tqdm\nfrom multiprocessing import Pool\nfrom speech_features import SpeechFeatures\n\n\nclass hparams:\n # Params\n def __init__(self):\n self.num_mels = 40\n self.num_freq = 1025 # =nfft\n self.sample_rate = 16000\n self.win_size = 0.02\n self.win_step = 0.01\n self.preemphasis = 0.97\n self.ndct = 36 # =numcep, num mfcc coeffs\n self.nfilt = 40\n\n\n# Get parameters\nhp = hparams()\n\n\ndef find_elements(pattern, my_list):\n '''Find elements in a list'''\n elements = []\n index = []\n\n for i, l in enumerate(my_list):\n if re.search(pattern, l):\n elements.append(my_list[i])\n index.append(i)\n return index, elements\n\n\ndef read_phn(phn_file):\n '''Read PHN file\n Returns:\n label: a list of labels\n time: a 2-d numpy array (sample location)\n '''\n with open(phn_file, 'r') as f:\n lines = f.readlines()\n time = np.zeros((len(lines), 2), dtype=np.int32)\n label = []\n for i, l in enumerate(lines):\n _b, _e, _l = l.strip().split()\n time[i] = int(_b), int(_e)\n label.append(_l)\n return label, time\n\n\ndef get_spectrogram(wav_file, phn_file, segment, time='center'):\n '''Returns normalized log mel-filterbank energies\n based on the specified segment (eg. 'iy' as string)\nmels_amp, mels_db, mags_amp, mags_db, mfcc_all\n\n Returns:\n mels_amp: np.array of mel amplitude\n mels_db: amplitude_to_db(mels_amp)\n mags_amp: np.array of FFT amplitude\n mags_db: amplitude_to_db(mags_amp)\n mfcc_all: np.array of mfcc \n ctx: phone labels (triphone); eg. ['h#_sh_iy', ...]\n '''\n # Initialize SpeechFeatures\n S = SpeechFeatures(wav_file, hp.win_size, hp.win_step, hp.num_freq,\n hp.nfilt, hp.ndct, win_fun=np.hamming, pre_emp=hp.preemphasis)\n # # Load wav file\n # _y, sr = librosa.load(wav_file, sr=hp.sample_rate)\n\n # Load phn file\n labels, samples = read_phn(phn_file)\n\n # Iterate over provided segments\n logspec_all = np.array([], dtype=np.float32).reshape(\n 0, 1 + hp.num_freq // 2) # (,513)\n fftdct_all = np.array([], dtype=np.float32).reshape(\n 0, hp.ndct) # (,36)\n mfcc_all = np.array([], dtype=np.float32).reshape(0, hp.ndct) # (,36)\n idx, _ = find_elements(segment, labels)\n if len(idx) > 0:\n # Get context\n ctx = []\n for i in idx:\n if (i-1) < 0:\n pre = '#'\n else:\n pre = labels[i-1]\n if (i+1) > len(labels):\n post = '#'\n else:\n post = labels[i+1]\n ctx.append('_'.join([pre, labels[i], post]))\n\n # Audio processing\n for i in idx:\n # Extract sample\n begT, endT = samples[i, 0], samples[i, 1]\n sig_part = S.sig[begT:endT]\n # Get magnitude spectrogram (db, =log spectrogram)\n magspec, powspec, logspec = S.get_fft(sig_part)\n # Get dct of logspec\n fftdct = S.apply_dct(logspec)\n # Get MFCCs\n mfcc = S.get_mfcc(powspec)\n # Slice sample (at mid point; TODO: add frame choice)\n if time == 'center':\n logspec = logspec[logspec.shape[0]//2, :]\n fftdct = fftdct[fftdct.shape[0]//2, :]\n mfcc = mfcc[mfcc.shape[0]//2, :]\n elif time == 'all':\n pass\n else:\n raise Exception(f'time={time} is not supported yet')\n # Add mel spectrogram\n logspec_all = np.vstack([logspec_all, logspec])\n fftdct_all = np.vstack([fftdct_all, fftdct])\n mfcc_all = np.vstack([mfcc_all, mfcc])\n else:\n return None\n return (logspec_all, fftdct_all, mfcc_all, ctx)\n\n\nif __name__ == '__main__':\n # Get directories\n if sys.platform == 'darwin':\n TMT_DIR = '/Volumes/Transcend/_DataArchive/TMT'\n else:\n TMT_DIR = '../data/TMT'\n TRAIN_DIR = os.path.join(TMT_DIR, 'TRAIN')\n TEST_DIR = os.path.join(TMT_DIR, 'TEST')\n SPKR_INFO = '../data/spkr_info.txt'\n S = pd.read_table(SPKR_INFO, sep=',', na_filter=False)\n\n # Get files\n if sys.platform == 'darwin':\n wav_ext = '*.wav'\n elif sys.platform == 'linux':\n wav_ext = '*.WAV'\n else:\n raise Exception(\n f'OS should be either darwin of linux, not {sys.platform}')\n wavs = sorted(glob.glob(\n os.path.join(TMT_DIR, '**', '**', '**', wav_ext)))\n assert len(wavs) > 0\n\n # Speaker list\n spkrs = S.ID.unique().tolist() # eg. JMI0\n # phones = ['iy', 'ae', 'aa', 'uh',\n # 'b', 'd', 'g', 'p', 't', 'k',\n # 's', 'z', 'sh', 'dh', 'f', 'v',\n # 'jh', 'ch']\n phones = ['iy', 'aa', 'uh', 's', 'f']\n\n # Make speaker dictionary\n init_spec = np.array([], dtype=np.float32).reshape(0, 1 + hp.num_freq//2)\n init_dct = np.array([], dtype=np.float32).reshape(0, hp.ndct)\n # logspec dictionary\n logspec_d = {s: {v: init_spec for v in phones} for s in spkrs}\n # fftdct dictionary\n fftdct_d = {s: {v: init_dct for v in phones} for s in spkrs}\n # mfcc dictionary\n mfcc_d = {s: {v: init_dct for v in phones} for s in spkrs}\n # context dictionary\n cdict = {s: {v: [] for v in phones} for s in spkrs}\n\n for i, wav in enumerate(tqdm(wavs)):\n # eg. [FM] + JMI0\n spkr_id = re.search('DR[0-9]/(\\w+\\d)/', wav).group(1)\n phn = re.sub('wav|WAV', 'PHN', wav)\n for v in phones:\n out = get_spectrogram(wav, phn, v, time='all')\n if out is not None:\n logspec, fftdct, mfcc, ctx = out\n # logspec dictionary\n _data = logspec_d[spkr_id[1:]][v]\n logspec_d[spkr_id[1:]][v] = np.vstack([_data, logspec])\n # fftdct dictionary\n _data = fftdct_d[spkr_id[1:]][v]\n fftdct_d[spkr_id[1:]][v] = np.vstack([_data, fftdct])\n # mfcc dictionary\n _data = mfcc_d[spkr_id[1:]][v]\n mfcc_d[spkr_id[1:]][v] = np.vstack([_data, mfcc])\n # context dictionary\n cdict[spkr_id[1:]][v] += ctx\n if (i+1) % 100 == 0:\n print(f'{i+1}/{len(wavs)}')\n\n # Save\n np.save('spkr_sdict_logspec_all.npy', logspec_d)\n np.save('spkr_sdict_fftdct_all.npy', fftdct_d)\n np.save('spkr_sdict_mfcc_all.npy', mfcc_d)\n np.save('spkr_cdict_all.npy', cdict)\n print('Finished')\n","sub_path":"exp_vowels/make_spkr_dict_mfcc.py","file_name":"make_spkr_dict_mfcc.py","file_ext":"py","file_size_in_byte":6788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"126600231","text":"\n\nfrom xai.brain.wordbase.nouns._bangle import _BANGLE\n\n#calss header\nclass _BANGLES(_BANGLE, ):\n\tdef __init__(self,): \n\t\t_BANGLE.__init__(self)\n\t\tself.name = \"BANGLES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"bangle\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_bangles.py","file_name":"_bangles.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"521833986","text":"from matplotlib.pylab import *\nimport h5py\n\nfid = h5py.File(\"04a_ejemplo_reticulado_guardar.h5\", \"r\")\n\n\n# for i in fid[\"cargas_val\"]:\n# \tprint (i)\n\n\n# valor = fid[\"cargas_val\"][0]\n\n# print (double(valor))\n\n# for i in range(10):\n# \tvalor_r = double(fid[\"restricciones_val\"][i])\n# \tprint (valor_r)\ncont_2=0\nfor i in fid[\"restricciones\"]:\n valor = double(fid[\"restricciones_val\"][cont_2])\n # ret.agregar_fuerza(i[0],i[1],valor)\n print (i[0],i[1],valor)\n cont_2 += 1\n\nfor i in fid[\"xyz\"]:\n #ret.agregar_nodo(i[0],i[1],i[2])\n print(i[0],i[1],i[2])\n\n\n\n\n\n\n# A = fid[\"barras\"][:, :]\n\n#print(A)","sub_path":"ejemplo_hdf5_2.py","file_name":"ejemplo_hdf5_2.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"187072623","text":"#!/usr/bin/python3 -B\n# product.py\n\n\ndef create(conn, cursor, name, seller, image_filename):\n cursor.execute('''\nINSERT INTO product (\n name,\n seller,\n image\n )\n VALUES (\n %s,\n %s,\n %s\n );\n''', (name, seller, image_filename))\n conn.commit()\n\n\ndef get_image(conn, cursor, id):\n cursor.execute('''\nSELECT image\n FROM product\n WHERE id = %s;\n''', (id,))\n return cursor.fetchall()[0][0]\n\n\ndef search(conn, cursor, query):\n pass\n\n\ndef delete(conn, cursor, id):\n cursor.execute('''\nDELETE FROM product\n WHERE id = %s;\n''', (id,))\n conn.commit()\n","sub_path":"backend/src/util/db/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"180810211","text":"import numpy as np\nfrom skimage import io, filter #@UnresolvedImport\nimport pandas as pandas\nimport os as os\nfrom skimage.filter import canny #@UnresolvedImport\nfrom scipy import ndimage\nimport matplotlib.pyplot as plt\n\n\ndef findCenters(labeled_plants, stackSum):\n center = {}\n for i in set(labeled_plants.flat):\n blob = np.where(labeled_plants==i)\n center[i] = [blob[0][np.argmax(stackSum[blob])], blob[1][np.argmax(stackSum[blob])]]\n center = pandas.Series(center)\n return center\n\ndef getCircle(center, radius=5):\n points = []\n for i in range(center[0]-radius, center[0]+radius+1):\n for j in range(center[1]-radius, center[1]+radius+1):\n if (i < 0) or (j < 0) or (i >= 240) or (j >=360): continue\n if np.sqrt(sum(pow(np.array(center[1]) - (i,j),2))):\n points.append([i,j])\n return np.array(points)\n\ndef detect_peaks(image):\n \"\"\"\n http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array\n\n Takes an image and detect the peaks using the local maximum filter.\n Returns a boolean mask of the peaks (i.e. 1 when\n the pixel's value is the neighborhood maximum, 0 otherwise)\n \"\"\"\n from scipy.ndimage.filters import maximum_filter\n from scipy.ndimage.morphology import generate_binary_structure, binary_erosion\n\n # define an 8-connected neighborhood\n neighborhood = generate_binary_structure(2,2)\n\n #apply the local maximum filter; all pixel of maximal value \n #in their neighborhood are set to 1\n local_max = maximum_filter(image, footprint=neighborhood)==image\n background = (image==0)\n\n #a little technicality: we must erode the background in order to \n #successfully subtract it form local_max, otherwise a line will \n #appear along the background border (artifact of the local maximum filter)\n eroded_background = binary_erosion(background, structure=neighborhood, \n border_value=1)\n detected_peaks = local_max - eroded_background\n peaks = np.array(np.where(detected_peaks)).T\n\n return peaks\n\ndef findPlantsCanny(stackVar, stackSum, showImages=True):\n edges = canny(stackVar)\n fill_stack = ndimage.binary_fill_holes(edges)\n label_objects, nb_labels = ndimage.label(fill_stack)\n sizes = np.bincount(label_objects.ravel())\n mask_sizes = sizes > 25\n \n for label in range(len(mask_sizes)):\n '''\n Get rid of lines in addition to the straight size threshold.\n '''\n pts = np.where(label_objects == label)\n xRange = (max(pts[0]) - min(pts[0]))\n yRange = (max(pts[1]) - min(pts[1]))\n areaCovered = float(len(pts[0])) / (xRange*yRange)\n if (areaCovered < .33) or (xRange < 3) or (yRange < 3):\n mask_sizes[label] = False\n\n mask_sizes[0] = 0\n plants_cleaned = mask_sizes[label_objects]\n labeled_plants, numPlants = ndimage.label(plants_cleaned)\n center = findCenters(labeled_plants, stackSum)\n \n if showImages:\n fig, axs = plt.subplots(1,3, figsize=(14,4), sharey=True)\n axs[0].imshow(stackVar)\n axs[1].imshow(stackVar, cmap=plt.cm.jet, interpolation='nearest') #@UndefinedVariable\n axs[1].contour(plants_cleaned, [0.5], linewidths=1.2, colors='y')\n axs[2].imshow(labeled_plants, cmap=plt.cm.spectral, interpolation='nearest') #@UndefinedVariable\n axs[2].scatter(np.array(center.tolist())[:,1], np.array(center.tolist())[:,0], \n color='grey')\n for ax in axs: ax.axis('off')\n fig.subplots_adjust(wspace=.01)\n \n return labeled_plants, center\n \ndef getFeatures(outDir, sizeThreshold=25, showImages=True):\n fList = os.listdir(outDir)\n fList = np.array(fList).take(np.argsort(map(lambda s: int(s.split('.')[0]), \n fList)))\n s = np.array([io.imread(outDir+f, as_grey=False, plugin=None, flatten=None) \n for f in fList])\n stackSum = np.sum(s, axis=0)\n stackVar = np.var(s, axis=0)\n stackVar = filter.tv_denoise(stackVar, weight=300, eps=1e-5)\n stackSum = filter.tv_denoise(stackSum, weight=300, eps=1e-5)\n\n labeled_plants, center = findPlantsCanny(stackVar, stackSum)\n circles = center.map(getCircle)\n maxTrace = pandas.DataFrame([np.max(s[:,circles[i][:,0],circles[i][:,1]],axis=1) \n for i in set(labeled_plants.flat)]).T\n wholeRegion = pandas.DataFrame([np.max(s[:,np.where(labeled_plants==i)[0],\n np.where(labeled_plants==i)[1]],axis=1) \n for i in set(labeled_plants.flat)]).T\n traces = pandas.DataFrame([np.mean(s[:,circles[i][:,0],circles[i][:,1]],axis=1) \n for i in set(labeled_plants.flat)]).T\n \n return labeled_plants, center, traces, maxTrace, wholeRegion\n\n","sub_path":"AutoLuc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"97798045","text":"import unreal\nimport random\n\n\nACTORS_COUNT = (\n 3,\n 3,\n 3\n)\n\nMESH_PATH = ''\nANIMATION_TIME = 10\nCHANNELS_TO_KEY = [\n 'Location.X',\n 'Location.Y',\n 'Location.Z'\n]\n\nlog\n\n\ndef hello():\n unreal.log(\"hello animations script\")\n\n\ndef main():\n unreal.log(\"main def\")\n\n\ndef add_object_to_animation(target_animation, actor):\n fps = target_animation.get_display_rate()\n actor_location = actor.get_actor_location()\n actor_location_vector3 = [actor_location.x, actor_location.y, actor_location.z]\n animation = target_animation.add_possessable(actor)\n\n\ndef create_actor(location):\n cube=unreal.EditorLevelLibrary.spawn_actor_from_class(\n unreal.StaticMeshActor,\n location=location\n )\n mesh = unreal.load_object(None, MESH_PATH)","sub_path":"animations.py","file_name":"animations.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"28434628","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 9 11:49:35 2019\n\n@author: Hugo\n\"\"\"\n\ndef exception_str(f):\n \n try:\n f()\n except Exception as ex:\n return str(ex)\n else: \n return \"No exception was raised\"\n \nprint(exception_str(lambda: 1/0))","sub_path":"1st_Year/1st_Semestre/Fpro/RE's/RE13/exception_str.py","file_name":"exception_str.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"630201479","text":"import pandas as pd\nimport datetime\nclass MShape:\n\tdef __init__(self):\n\t\tpass\n\n\tdef get(self, df: pd.DataFrame, colName, applyOnGains=True, window=14, maxFirstLegPeriod=10, maxSecondLegPeriod=10, maxThirdLegPeriod=10 \\\n\t\t\t, minleg2to1ratio=0.1, maxleg2to1ratio=0.5, minleg3to2ratio=0.25, leg4AboveDataPoints=3):\n\n\n\t\tlength = df.shape[0]\n\t\trv = pd.DataFrame(index=df.index.copy())\n\t\trv['shapebuy'] = False\n\t\trv['L1Length'] = 0.0\n\t\trv['L2To1Ratio'] = 0.0\n\t\trv['L3To2Ratio'] = 0.0\n\t\trv['L4To2Ratio'] = 0.0\n\t\trv['L1Start'] = datetime.datetime.utcnow()\n\t\trv['L1End'] = datetime.datetime.utcnow()\n\t\trv['L2End'] = datetime.datetime.utcnow()\n\t\trv['L3End'] = datetime.datetime.utcnow()\n\n\n\t\tinput = df[colName]\n\t\trv['gain'] = (df[colName] / df[colName].shift()) - 1\n\t\tif applyOnGains:\n\t\t\tinput = rv['gain']\n\n\t\ti = 0\n\t\tstats = {0: 0, 1: 0, 2: 0, 3:0, 4:0, 5: 0}\n\t\twhile i < length:\n\n\t\t\tif i == 0 or i % 200 == 0:\n\t\t\t\tprint(\"Computing wShape for\", i)\n\t\t\tstate = 0\n\t\t\tj = i+1\n\t\t\tleg1LowValue = input[i]\n\t\t\tleg2HighValue = 0\n\t\t\tleg1HighPoint = i\n\t\t\tleg1HighPoint = i\n\t\t\tleg2LowPoint = i\n\t\t\tleg3HighPoint = i\n\n\t\t\tif state == 0:\n\t\t\t\twhile(j < length and input.iloc[j] > input.iloc[j-1]):\n\t\t\t\t\tj = j+1\n\n\t\t\t\tj = j-1\n\t\t\t\tif j < length and 0 < (j - i) <= maxFirstLegPeriod:\n\t\t\t\t\tstate = 1\n\t\t\t\t\tleg1Length = input[j] - input[i]\n\t\t\t\t\trv.iat[j, 1] = leg1Length\n\t\t\t\t\tleg1HighPoint = j\n\n\t\t\tj = j + 1\n\t\t\tif state == 1:\n\t\t\t\twhile (j < length and input.iloc[j] <= input.iloc[j - 1]):\n\t\t\t\t\tj = j + 1\n\n\t\t\t\tj = j-1\n\t\t\t\tif j < length and 0 < (j - leg1HighPoint):\n\t\t\t\t\tleg2Length = input[leg1HighPoint]-input[j]\n\t\t\t\t\trv.iat[j, 2] = leg2Length/leg1Length\n\n\t\t\t\t\tleg2HighValue = input[j]\n\t\t\t\t\tif 0 < (j - i) <= maxSecondLegPeriod and minleg2to1ratio <= leg2Length/leg1Length <= maxleg2to1ratio:\n\t\t\t\t\t\tstate = 2\n\t\t\t\t\t\tleg2LowPoint = j\n\n\n\t\t\tj = j+1\n\t\t\tif state == 2:\n\t\t\t\twhile (j < length and input.iloc[j] >= input.iloc[j - 1]):\n\t\t\t\t\tj = j + 1\n\n\t\t\t\tj = j-1\n\t\t\t\tif j < length and 0 < (j - leg2LowPoint):\n\t\t\t\t\tleg3Length = input[j]-input[leg2LowPoint]\n\t\t\t\t\trv.iat[j, 3] = leg3Length / leg2Length\n\t\t\t\t\tif 0 < (j - i) <= maxThirdLegPeriod and minleg3to2ratio <= leg3Length / leg2Length and leg3Length <= leg1Length:\n\t\t\t\t\t\tstate = 3\n\t\t\t\t\t\tleg3HighPoint = j\n\n\t\t\tj = j+1\n\t\t\tfoundBuyPoint = False\n\t\t\tif state == 3:\n\t\t\t\twhile (j < length and input.iloc[j] <= input.iloc[j - 1]):\n\t\t\t\t\t#if it ever crosses leg2HighVaues we are go, our strike point\n\t\t\t\t\tstrikePoint = leg2HighValue\n\t\t\t\t\tif (j-leg3HighPoint) > 0 and input[j] < strikePoint:\n\t\t\t\t\t\trv.iat[j, 4] = (leg3HighPoint-strikePoint) / leg2Length\n\t\t\t\t\t\tstate = 4\n\t\t\t\t\t\tfoundBuyPoint = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\tj = j + 1\n\n\t\t\tstats[state] = stats[state] + 1\n\t\t\tif foundBuyPoint and j < length: # TODO fix it and (j-i) <= window:\n\t\t\t\t#verify next three are high\n\t\t\t\trv.iat[j, 0] = True\n\t\t\t\trv.at[rv.index.values[j], 'L1Start'] = rv.index.values[i]\n\t\t\t\trv.at[rv.index.values[j], 'L1End'] = rv.index.values[leg1HighPoint]\n\t\t\t\trv.at[rv.index.values[j], 'L2End'] = rv.index.values[leg2LowPoint]\n\t\t\t\trv.at[rv.index.values[j], 'L3End'] = rv.index.values[leg3HighPoint]\n\t\t\t\ti = j + 1\n\t\t\telse: # TODO : optimization- might be incremented to a higher value\n\t\t\t\ti = i + 1\n\n\t\tprint('MShape found for rows:', rv[rv['shapebuy'].isin([True])].shape)\n\t\treturn rv\n\n\n\n\n\n\n\n\n","sub_path":"indicators/mShape.py","file_name":"mShape.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"72238937","text":"from rest_framework.pagination import PageNumberPagination\nfrom rest_framework.response import Response\n\n\nclass WithTotalPagesCountPagination(PageNumberPagination):\n page_size_query_param = 'limit'\n\n def get_paginated_response(self, data):\n return Response({\n 'links': {\n 'next': self.get_next_link(),\n 'previous': self.get_previous_link()\n },\n 'count': getattr(self.page.paginator, 'count', 0),\n 'total_pages': self.page.paginator.num_pages,\n 'results': data\n })","sub_path":"apps/restful/pagination.py","file_name":"pagination.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"588184544","text":"# import modules\nimport pandas as pd\nimport numpy as np\nimport csv\nfrom DBAccess import DBConnection\nfrom sklearn.externals import joblib\n\n#read metrics file\nmodelMetrics = pd.read_csv(\"src/prediction/apparel/csv/metricsData1.csv\")\nprint(modelMetrics)\n\n#Read data from the DB\ndbConnection = DBConnection()\nreadDataSQL=\"SELECT `ID`,`Name`,`Career Growth`,`JoinedYear`,`Tenure`,`Age`,`Maritial Status`,`Total Salary`,`Promotions`,`Training`,`Gender`,`Working Hours`,`Experience`,`Performance Rating`,`No.of Leaves`,`Participation of Activities` FROM `emppredict` \"\nprint(\"Apparel Predict dataset\")\napperalDataPredict = dbConnection.readDataSet(readDataSQL)\nprint(apperalDataPredict);\n\nc = csv.writer(open(\"src/prediction/apparel/csv/ApperalPredict.csv\",\"w\",newline=''))\n\nc.writerow([\"ID\", \"Name\", \"Career Growth\", \"JoinedYear\", \"Tenure\", \"Age\", \"Maritial Status\", \"Total Salary\", \"Promotions\",\"Training\", \"Gender\" ,\"Working Hours\", \"Experience\", \"Performance Rating\", \"No.of Leaves\",\"Participation of Activities\"])\n\nfor x in apperalDataPredict:\n c.writerow([x[\"ID\"], \n x[\"Name\"], \n x[\"Career Growth\"], \n x[\"JoinedYear\"],\n x[\"Tenure\"],\n x[\"Age\"],\n x[\"Maritial Status\"],\n x[\"Total Salary\"],\n x[\"Promotions\"],\n x[\"Training\"],\n x[\"Gender\"],\n x[\"Working Hours\"],\n x[\"Experience\"],\n x[\"Performance Rating\"],\n x[\"No.of Leaves\"],\n x[\"Participation of Activities\"],])\n\napperalDataToPredict= pd.read_csv(\"src/prediction/apparel/csv/ApperalPredict.csv\")\n#apperalData = pd.read_csv(\"src/prediction/apparel/csv/ApperalDataSet.csv\")\n\ncolumns = apperalDataToPredict.columns.tolist()\nprint(columns)\n# have to use only numeric values to the model\ncolumns = [c for c in columns if\n c not in [\"ID\", \"Name\"]]\nprint(columns)\n\n#print(modelMetrics.columns)\n#print(modelMetrics)\n#accuracy=modelMetrics[\"Accuracy\"]\n#print(modelMetrics[\"Accuracy\"])\ni=modelMetrics[\"Accuracy\"].argmax()\nmodel=modelMetrics[\"Unnamed: 0\"][i]\n\nmodelLogisticRegressionFile='LogisticRegression.joblib.pkl'\nmodelDesicionTreeFile='DesicionTree.joblib.pkl'\nmodelSVMFile='SVM.joblib.pkl'\nmodelRandomForestClassifierFile='RandomForestClassifier.joblib.pkl'\nmodelKNeighborsClassifierFile='KNeighbors.joblib.pkl'\n\nprint(apperalDataToPredict[columns])\n\n#Accessing corresponding model\nif(model==\"LogisticReg\"):\n logisticRegressionModel = joblib.load(modelLogisticRegressionFile)\n predictions=logisticRegressionModel.predict(apperalDataToPredict[columns])\nelif(model==\"SVMC\"):\n svmModel = joblib.load(modelSVMFile)\n predictions=svmModel.predict(apperalDataToPredict[columns])\nelif (model == \"DecisionTree\"):\n desicionTreeModel = joblib.load(modelDesicionTreeFile)\n predictions=desicionTreeModel.predict(apperalDataToPredict[columns])\nelif (model == \"RandomForest\"):\n randomForestModel = joblib.load(modelRandomForestClassifierFile)\n predictions=randomForestModel.predict(apperalDataToPredict[columns])\nelif (model == \"kNN9\"):\n knnModel = joblib.load(modelKNeighborsClassifierFile)\n predictions=knnModel.predict(apperalDataToPredict[columns])\n\n #df1['e'] = Series(np.random.randn(sLength), index=df1.index)\n#predictions['EmployeeName']= pd.Series(apperalDataToPredict[\"Name\"], index=predictions.index)\n#print(predictions)\n\nIDs = np.array(apperalDataToPredict.ID)\nnames= np.array(apperalDataToPredict.Name)\nprint(IDs)\nprint(predictions)\nDAT = np.column_stack((IDs, names, predictions))\nnp.savetxt('src/prediction/apparel/csv/predictedchurn.csv',DAT, delimiter=\" \", fmt=\"%s\")\n\n\n","sub_path":"build/classes/prediction/apparel/pythoncode/trainBestModel.py","file_name":"trainBestModel.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"609886856","text":"from sklearn import tree\n\n# ゴツゴツは[0]、滑らかは[1]\nfeatures = [\n [140, 1],\n [130, 1],\n [150, 0],\n [170, 0]\n]\n\n# リンゴは[0]、オレンジは[1]\nlabels = [0, 0, 1, 1]\n\n# 分類機作成→[DecisionTreeClassifier関数]\nclf = tree.DecisionTreeClassifier()\n\n# fitは「データ中のパターンを見つける」と同���語の関数\nclf = clf.fit(features, labels)\n\n# ここでは、質量が150gで、ゴツゴツ[0]とする。\nprint(clf.predict([[150, 0]]))\n\n# 出力結果が[0]ならリンゴ、[1]ならオレンジとなる。","sub_path":"hello-world.py","file_name":"hello-world.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"516065863","text":"import unittest\nfrom selenium import webdriver\nimport time\n\n\nclass TestRegistration(unittest.TestCase):\n def reg_form(self, link_url):\n browser = webdriver.Chrome()\n browser.get(link_url)\n\n # Ваш код, который заполняет обязательные поля\n placeholders = ['Input your first name', 'Input your last name',\n 'Input your email']\n data = ['Mike', 'Smith', \"mike@test.com\"]\n for i in range(len(placeholders)):\n input_field = browser.find_element_by_xpath(\"//input[@placeholder=\" + \"'\" + placeholders[i] + \"'\" + \"]\")\n input_field.send_keys(data[i])\n # Отправляем заполненную форму\n button = browser.find_element_by_css_selector(\"button.btn\")\n button.click()\n\n # Проверяем, что смогли зарегистрироваться\n # ждем загрузки страницы\n time.sleep(1)\n\n # находим элемент, содержащий текст\n welcome_text_elt = browser.find_element_by_tag_name(\"h1\")\n # записываем в переменную welcome_text текст из элемента welcome_text_elt\n welcome_text = welcome_text_elt.text\n\n # проверяем, что ожидаемый текст совпадает с текстом на странице сайта\n form_text = \"Congratulations! You have successfully registered!\"\n self.assertEqual(welcome_text, form_text,\n f\"{welcome_text} is another than {form_text}\")\n browser.quit()\n\n def test_abs1(self):\n self.reg_form(\"http://suninjuly.github.io/registration1.html\")\n\n def test_registration2(self):\n self.reg_form(\"http://suninjuly.github.io/registration2.html\")\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"Задачи_stepik/test_init_test.py","file_name":"test_init_test.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"15389839","text":"from math import ceil\nfrom typing import Tuple\nfrom unittest import TestCase\n\nfrom numpy import mean\n\n\nclass Reaction:\n def __init__(self, output_chemical: Tuple[str, int], input_chemicals: Tuple):\n self.input_chemicals = input_chemicals\n self.output_chemical = output_chemical\n\n @staticmethod\n def create(description: str) -> 'Reaction':\n input_chemicals, output_chemical = description.split('=>')\n return Reaction(\n Reaction.parse_single_term(output_chemical),\n tuple(\n Reaction.parse_single_term(input_chemical)\n for input_chemical in input_chemicals.split(',')\n )\n )\n\n @staticmethod\n def parse_single_term(single_term: str) -> Tuple[str, int]:\n parts = single_term.strip('\\n ').split(' ')\n return parts[1], int(parts[0])\n\n def __repr__(self):\n return 'Reaction(output_chemical={}, input_chemicals={})'.format(\n self.output_chemical,\n self.input_chemicals\n )\n\n\ndef read_rules(filename='data.txt'):\n with open(filename) as file:\n reactions = {\n reaction.output_chemical[0]: reaction\n for reaction in [\n Reaction.create(line)\n for line in file\n ]\n }\n return reactions\n\n\ndef needed_chemicals(needed):\n return [\n chemical\n for chemical, needed_quantity in needed.items()\n if needed_quantity != 0 and chemical != 'ORE'\n ]\n\n\ndef solve_silver(reactions, fuel_goal=1):\n needed = {component: 0 for component in reactions.keys()}\n needed['FUEL'] = fuel_goal\n needed['ORE'] = 0\n extra = {component: 0 for component in reactions.keys()}\n while len(needed_chemicals(needed)) != 0:\n chemical = needed_chemicals(needed)[0]\n needed_quantity = needed[chemical]\n needed[chemical] = 0\n if needed_quantity <= extra[chemical]:\n extra[chemical] -= needed_quantity\n continue\n reaction = reactions[chemical]\n generated_quantity = reaction.output_chemical[1]\n reaction_quantity = ceil((needed_quantity - extra[chemical]) / generated_quantity)\n extra[chemical] += reaction_quantity * generated_quantity - needed_quantity\n for input_chemical in reaction.input_chemicals:\n needed[input_chemical[0]] += input_chemical[1]*reaction_quantity\n\n print(extra)\n print(needed)\n\n return needed\n\n\nclass TestSilver(TestCase):\n def test_example0(self):\n reactions = read_rules('example0.txt')\n needed = solve_silver(reactions)\n self.assertEqual(\n needed['ORE'],\n 31\n )\n\n def test_example1(self):\n reactions = read_rules('example1.txt')\n needed = solve_silver(reactions)\n self.assertEqual(\n needed['ORE'],\n 165\n )\n\n # 1786 too low\n def test_assignement(self):\n reactions = read_rules('data.txt')\n needed = solve_silver(reactions)\n self.assertEqual(\n needed['ORE'],\n 1037742\n )\n\n\nclass TestGold(TestCase):\n def test_assignement(self):\n reactions = read_rules('data.txt')\n needed = solve_silver(reactions, int(1_000_000_000_000/1_037_742))\n self.assertEqual(\n needed['ORE'],\n 612_856_043_188\n )\n needed = solve_silver(reactions, int(1_000_000_000_000/1_000_000))\n self.assertEqual(\n needed['ORE'],\n 635_987_164_066\n )\n needed = solve_silver(reactions, int(1_000_000_000_000/500_000))\n self.assertEqual(\n needed['ORE'],\n 1_271_973_764_454\n )\n lower_bound = int(1_000_000_000_000/1_000_000)\n upper_bound = int(1_000_000_000_000/500_000)\n bisect = int(mean([lower_bound, upper_bound]))\n while bisect != lower_bound:\n if solve_silver(reactions, fuel_goal=bisect)['ORE'] > 1_000_000_000_000:\n upper_bound = bisect\n else:\n lower_bound = bisect\n bisect = int(mean([lower_bound, upper_bound]))\n self.assertEqual(\n bisect,\n 1572358\n )\n","sub_path":"day14/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"633317243","text":"\"\"\"This file is a GovUK_sitemap spider created on top of the ATSSpider\nscrapy crawl govuk_sitemap -a mining_job_id=9999 -a iteration=1 -a url=\"https://www.civilservicejobs.service.gov.uk/sitemap.xml\"\n\nseed url:\n https://www.civilservicejobs.service.gov.uk/sitemap.xml\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urlparse\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import ConvertDateString, NormalizedJoin, Prefix, Replace\n\npattern = {\n 'ref_number': compile(r'jcode=(\\d+)')\n}\n\n\nclass GovUK_sitemap(ATSSpider):\n\n name = 'govuk_sitemap'\n\n def parse(self, response):\n # parse xml response\n # call GET method to each job urls\n sel = Selector(response)\n sel.remove_namespaces()\n urls = sel.xpath(\"//loc/text()\").extract()\n for url in urls:\n if 'jcode' in urlparse(url).query:\n yield Request(\n url,\n callback=self.parse_job_callback()\n )\n\n def parse_job(self, response):\n # Extract all required information\n sel = Selector(response)\n\n loader = BrightcorpItemLoader(selector=sel)\n\n loader.add_xpath(\n 'title',\n '//div[@class=\"vac_display_title_block\"]/div/h1/text()'\n )\n loader.add_xpath(\n 'location',\n [\n '//div/h3[contains(text(), \"Town/City\")]/following-sibling::div[1]/text()',\n '//div/h3[contains(text(), \"Region\")]/following-sibling::div[1]/text()',\n ],\n NormalizedJoin(', ')\n )\n loader.add_xpath(\n 'company',\n '//div[@class=\"vac_display_title_block\"]/p[@class=\"csr-page-subtitle\"]/text()'\n )\n loader.add_xpath(\n 'expiration_date',\n '//div/p[@class=\"vac_display_closing_date\"]/text()',\n Replace('Closing date: '),\n ConvertDateString('%d %b %Y')\n )\n loader.add_value(\n 'referencenumber',\n response.url,\n Prefix('%s-' % self.name),\n re=pattern['ref_number']\n )\n loader.add_xpath(\n 'baseSalary',\n '//div/h3[contains(text(), \"Salary\")]/following-sibling::div[1]/text()'\n )\n loader.add_xpath(\n 'jobtype',\n '//div/h3[contains(text(), \"Post type\")]/following-sibling::div[1]/text()'\n )\n loader.add_xpath(\n 'workhours',\n '//div/h3[contains(text(), \"Hours\")]/following-sibling::div[1]/text()'\n )\n loader.add_xpath(\n 'description',\n '//div/h3[contains(text(), \"Job description\")]/following-sibling::div[1]'\n )\n loader.add_xpath(\n 'benefits',\n '//div/h3[contains(text(), \"Benefits\")]/following-sibling::div[1]'\n )\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/govuk_sitemap.py","file_name":"govuk_sitemap.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"575919741","text":"\"\"\"\ntellopy sample using joystick and video palyer\n\n - you can use PS3/PS4/XONE joystick to controll DJI Tello with tellopy module\n - you must install mplayer to replay the video\n - Xbox One Controllers were only tested on Mac OS with the 360Controller Driver.\n get it here -> https://github.com/360Controller/360Controller'''\n\"\"\"\n\nimport time\nimport sys\nimport tellopy\nimport pygame\nimport pygame.locals\nfrom subprocess import Popen, PIPE\n\n\nclass JoystickPS4:\n # d-pad\n UP = 1 # UP\n DOWN = -1 # DOWN\n ROTATE_LEFT = -10 # LEFT\n ROTATE_RIGHT = 10 # RIGHT\n\n # buttons\n LEFT = 0 # SQUARE\n BACKWARD = 1 # CROSS\n RIGHT = 2 # CIRCLE\n FORWARD = 3 # TRIANGLE\n TAKEOFF = 4 # L1\n LAND = 5 # R1\n # UNUSED = 6 #L2\n # UNUSED = 7 #R2\n RECORD = 8 #SHARE\n TAKE_PICTURE = 9 #OPTIONS\n # UNUSED = 10 #LEFT STICK\n # UNUSED = 11 #RIGHT STICK\n FLIP = 12 # PS\n PALM_LAND = 13 # TOUCHPAD\n\n # axis\n LEFT_X = 0 # LEFT STICK X\n LEFT_Y = 1 # LEFT STICK X\n RIGHT_X = 2 # RIGHT STICK X\n RIGHT_Y = 3 # RIGHT STICK Y\n SPEED_UP = 4 # L2\n SPEED_DOWN = 5 # R2\n\n # axis value\n LEFT_X_REVERSE = 1.0\n LEFT_Y_REVERSE = -1.0\n RIGHT_X_REVERSE = 1.0\n RIGHT_Y_REVERSE = -1.0\n\n # others\n NO_INPUT = 0\n ORI_VALUE = 0.0\n DEADZONE = 0.1\n IF_FLIP = 0\n\n\nprev_flight_data = None\nvideo_player = None\nbuttons = None\nori_speed = 30\nspeed = ori_speed\nthrottle = 0.0\nyaw = 0.0\npitch = 0.0\nroll = 0.0\n\n\ndef handler(event, sender, data, **args):\n global prev_flight_data\n global video_player\n drone = sender\n if event is drone.EVENT_FLIGHT_DATA:\n if prev_flight_data != str(data):\n print(data)\n prev_flight_data = str(data)\n elif event is drone.EVENT_VIDEO_FRAME:\n if video_player is None:\n video_player = Popen(['mplayer', '-fps', '35', '-'], stdin=PIPE)\n try:\n video_player.stdin.write(data)\n except IOError as err:\n print(err)\n video_player = None\n else:\n print('event=\"%s\" data=%s' % (event.getname(), str(data)))\n\n\ndef update(old, new, max_delta=0.3):\n if abs(old - new) <= max_delta:\n res = new\n else:\n res = 0.0\n return res\n\n\ndef handle_input_event(drone, e):\n global speed\n global ori_speed\n global throttle\n global yaw\n global pitch\n global roll\n\n if e.type == pygame.locals.JOYAXISMOTION:\n if -buttons.DEADZONE > e.value or e.value > buttons.DEADZONE:\n print(e)\n else:\n print(e)\n\n if e.type == pygame.locals.JOYAXISMOTION:\n # ignore small input values (Deadzone)\n if -buttons.DEADZONE <= e.value and e.value <= buttons.DEADZONE:\n e.value = buttons.ORI_VALUE\n if e.axis == buttons.LEFT_Y:\n throttle = update(throttle, e.value * buttons.LEFT_Y_REVERSE)\n drone.set_throttle(throttle)\n if e.axis == buttons.LEFT_X:\n yaw = update(yaw, e.value * buttons.LEFT_X_REVERSE)\n drone.set_yaw(yaw)\n if e.axis == buttons.RIGHT_Y:\n pitch = update(pitch, e.value * buttons.RIGHT_Y_REVERSE)\n drone.set_pitch(pitch)\n if e.axis == buttons.RIGHT_X:\n roll = update(roll, e.value * buttons.RIGHT_X_REVERSE)\n drone.set_roll(roll)\n if e.axis == buttons.SPEED_UP:\n speedvalue = (e.value + 1) / 2 * 70 + ori_speed\n if speed < speedvalue:\n speed = speedvalue\n print('speed=', speed)\n if e.axis == buttons.SPEED_DOWN:\n speedvalue = 100 - (e.value + 1) / 2 * 70\n if speed > speedvalue:\n speed = speedvalue\n print('speed=', speed)\n elif e.type == pygame.locals.JOYHATMOTION:\n hatvalue = e.value[0] * 10 + e.value[1]\n if hatvalue == buttons.ROTATE_LEFT:\n drone.counter_clockwise(speed)\n if hatvalue == buttons.ROTATE_RIGHT:\n drone.clockwise(speed)\n if hatvalue == buttons.UP:\n drone.up(speed)\n if hatvalue == buttons.DOWN:\n drone.down(speed)\n if hatvalue == buttons.NO_INPUT:\n drone.clockwise(buttons.ORI_VALUE)\n drone.up(buttons.ORI_VALUE)\n elif e.type == pygame.locals.JOYBUTTONDOWN:\n if e.button == buttons.LAND:\n drone.land()\n elif e.button == buttons.PALM_LAND:\n drone.palm_land()\n elif e.button == buttons.FLIP:\n buttons.IF_FLIP = 1\n elif e.button == buttons.FORWARD:\n if buttons.IF_FLIP == 1:\n drone.flip_forward()\n else:\n drone.forward(speed)\n elif e.button == buttons.BACKWARD:\n if buttons.IF_FLIP == 1:\n drone.flip_back()\n else:\n drone.backward(speed)\n elif e.button == buttons.RIGHT:\n if buttons.IF_FLIP == 1:\n drone.flip_right()\n else:\n drone.right(speed)\n elif e.button == buttons.LEFT:\n if buttons.IF_FLIP == 1:\n drone.flip_left()\n else:\n drone.left(speed)\n\n elif e.type == pygame.locals.JOYBUTTONUP:\n if e.button == buttons.TAKEOFF:\n if throttle != buttons.ORI_VALUE:\n print('###')\n print('### throttle != 0.0 (This may hinder the drone from taking off)')\n print('###')\n drone.takeoff()\n elif e.button == buttons.FLIP:\n buttons.IF_FLIP = 0\n elif e.button == buttons.FORWARD:\n drone.forward(buttons.ORI_VALUE)\n elif e.button == buttons.BACKWARD:\n drone.backward(buttons.ORI_VALUE)\n elif e.button == buttons.RIGHT:\n drone.right(buttons.ORI_VALUE)\n elif e.button == buttons.LEFT:\n drone.left(buttons.ORI_VALUE)\ndef main():\n global buttons\n pygame.init()\n pygame.joystick.init()\n\n try:\n js = pygame.joystick.Joystick(0)\n js.init()\n js_name = js.get_name()\n print('Joystick name: ' + js_name)\n buttons = JoystickPS4\n except pygame.error:\n pass\n\n drone = tellopy.Tello()\n drone.connect()\n drone.start_video()\n drone.subscribe(drone.EVENT_FLIGHT_DATA, handler)\n drone.subscribe(drone.EVENT_VIDEO_FRAME, handler)\n\n try:\n while 1:\n # loop with pygame.event.get() is too much tight w/o some sleep\n time.sleep(0.01)\n for e in pygame.event.get():\n handle_input_event(drone, e)\n except KeyboardInterrupt as e:\n print(e)\n except Exception as e:\n print(e)\n\n drone.quit()\n exit(1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tellopy/examples/joystick_and_video.py","file_name":"joystick_and_video.py","file_ext":"py","file_size_in_byte":6745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"92979911","text":"import string\n\ndef unique_letter (user_input):\n uniques_list = []\n for char in user_input:\n if char not in uniques_list:\n if char not in string.punctuation:\n uniques_list.extend(char.strip())\n return uniques_list\n\n# Main starts here\nsentence = input(\"Input a sentence: \")\nunique_letters = unique_letter(sentence)\n# Call the function here\nprint(\"Unique letters:\", unique_letters)\n","sub_path":"Assignment/Assignment 10/unique_letters.py","file_name":"unique_letters.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"127926244","text":"orders_total = 0 #amount of orders\r\n\r\nclass Order:\r\n def __init__(self,product, pack, name='o'):\r\n self.product = product #product object\r\n self.pack = pack #product to be delivered\r\n\r\n #Name of the order, the default name is: \"order1\",\"order2\",...,\"orderN\"\r\n global orders_total\r\n orders_total+=1\r\n\r\n if name == 'o':\r\n self.name = \"order\" + str(orders_total)\r\n else:\r\n self.name = name\r\n\r\n #A informative string of the current order\r\n def status(self):\r\n origin = \"(\" + str(self.product.x) + \",\" + str(self.product.y) + \")\"\r\n destiny = \"(\" + str(self.pack.x) + \",\" + str(self.pack.y) + \")\"\r\n return self.name + \", \" + origin + \", \" + destiny\r\n\r\n def __eq__(self, other):\r\n return self.name == other.name\r\n","sub_path":"Delivery Robots/Order.py","file_name":"Order.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"406265419","text":"from flask import Flask, redirect, url_for\nimport os\n\ndef create_app(config):\n app = Flask(__name__)\n\n if config['env'] != 'local':\n app.debug = False\n app.testing = False\n else:\n app.debug = True\n app.testing = True\n\n from .api import mainApp\n app.register_blueprint(mainApp, url_prefix='/')\n\n # Add a default root route.\n @app.route(\"/\")\n def index():\n print('This is standard output')\n return redirect(url_for('mainApp.index'))\n return app\n\n @app.route('/')\n def fallback(dummy):\n return redirect(url_for('mainApp.index'))","sub_path":"api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"362223462","text":"import logging\n\nimport pytest\nfrom unittest.mock import Mock\n\nfrom ophyd.sim import make_fake_device\n\nfrom pcdsdevices.pim import PIM, PIMMotor, PPM, XPIM\n\nlogger = logging.getLogger(__name__)\n\n\n@pytest.fixture(scope='function')\ndef fake_pim():\n FakePIM = make_fake_device(PIMMotor)\n pim = FakePIM('Test:Yag', name='test')\n pim.state.sim_put(0)\n pim.state.sim_set_enum_strs(['Unknown'] + PIMMotor.states_list)\n pim.motor.error_severity.sim_put(0)\n pim.motor.bit_status.sim_put(0)\n pim.motor.motor_spg.sim_put(2)\n return pim\n\n\n@pytest.mark.timeout(5)\ndef test_pim_stage(fake_pim):\n logger.debug('test_pim_stage')\n pim = fake_pim\n # Should return to original position on unstage\n pim.move('OUT', wait=True)\n assert pim.removed\n pim.stage()\n pim.move('IN', wait=True)\n assert pim.inserted\n pim.unstage()\n assert pim.removed\n pim.move('IN', wait=True)\n assert pim.inserted\n pim.stage()\n pim.move('OUT', wait=True)\n assert pim.removed\n pim.unstage()\n assert pim.inserted\n\n\n@pytest.mark.timeout(5)\ndef test_pim_det():\n logger.debug('test_pim_det')\n FakePIM = make_fake_device(PIM)\n FakePIM('Test:Yag', name='test', prefix_det='potato')\n FakePIM('Test:Yag', name='test')\n\n\n@pytest.mark.timeout(5)\ndef test_pim_subscription(fake_pim):\n logger.debug('test_pim_subscription')\n pim = fake_pim\n cb = Mock()\n pim.subscribe(cb, event_type=pim.SUB_STATE, run=False)\n pim.state.sim_put(2)\n assert cb.called\n\n\n@pytest.mark.timeout(5)\ndef test_pim_disconnected():\n PIM('TST:YAG', name='tst', prefix_det='tstst')\n\n\n@pytest.mark.timeout(5)\ndef test_ppm_disconnected():\n PPM('IM7S7:PPM', name='im7s7')\n\n\n@pytest.mark.timeout(5)\ndef test_xpim_disconnected():\n XPIM('IM7S7:PPM', name='im7s7')\n","sub_path":"tests/test_pim.py","file_name":"test_pim.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"160293573","text":"\"\"\"\n Given a non-empty string s and an abbreviation abbr, return whether the string matches with the given abbreviation.\n\nA string such as \"word\" contains only the following valid abbreviations:\n\n[\"word\", \"1ord\", \"w1rd\", \"wo1d\", \"wor1\", \"2rd\", \"w2d\", \"wo2\", \"1o1d\", \"1or1\", \"w1r1\", \"1o2\", \"2r1\", \"3d\", \"w3\", \"4\"]\n\nNotice that only the above abbreviations are valid abbreviations of the string \"word\". Any other string is not a valid abbreviation of \"word\".\n\nNote:\nAssume s contains only lowercase letters and abbr contains only lowercase letters and digits.\n\nExample 1:\n\nGiven s = \"internationalization\", abbr = \"i12iz4n\":\n\nReturn true.\n\nExample 2:\n\nGiven s = \"apple\", abbr = \"a2e\":\n\nReturn false.\n\"\"\"\n\ndef validWordAbbreviation(word, abbr):\n\ti,j=0,0\n\twhile i<=len(word) and j\"9\":\n\t\t\treturn False\n\t\telif abbr[j].isdigit():\n\t\t\tcontin=True\n\t\t\tk=j\n\t\t\twhile k thershold)\n outlier_1 outlier_2\n 0 0 0\n 1 1 0\n 2 0 0\n 3 0 1\n 4 0 0\n\n 2.) a df with counts of outlier and non-outlier trs\n outlier n_tr\n 0 False 3\n 1 True 2\n\n note, from Ciric et. al, \"the conversion of FD to RMS displacement is approximately 2:1\"...\n -> here we are using FD for spike thr, so a value of 0.5 is ~ to the 0.25mm RMS spike thr of 36P method\n\n \"\"\"\n df = pd.DataFrame({\"motion\": motion_ts})\n df.fillna(value=0, inplace=True) # first value is nan\n df[\"outlier\"] = df[\"motion\"] > threshold\n outlier_stats = df.groupby(\"outlier\").count().reset_index().rename(columns={\"motion\": \"n_tr\"})\n\n df[\"outliers_num\"] = 0\n df.loc[df.outlier, \"outliers_num\"] = range(1, df.outlier.sum() + 1)\n outliers = pd.get_dummies(df.outliers_num, dtype=int, drop_first=True, prefix=\"outlier\")\n\n return outliers, outlier_stats\n\n\ndef get_confounds(confounds_file, kind=\"36P\", spikereg_threshold=None):\n \"\"\"\n takes a fmriprep confounds file and creates data frame with regressors.\n kind == \"36P\" returns Satterthwaite's 36P confound regressors\n kind == \"9P\" returns CSF, WM, Global signal + 6 motion parameters (used in \n Ng et al., 2016)\n kind == \"aCompCor\"* returns model no. 11 from Parkes\n kind == \"24aCompCor\"* returns model no. 7 from Parkes\n kind == \"24aCompCorGsr\"* returns model no. 9 from Parkes\n\n * fmriprep only provides 5 components overall... not sure if its exactly\n the same as in the paper, as they mention 5 from wm, 5 from csf\n\n if spikereg_threshold=None, no spike regression is performed\n\n Satterthwaite, T. D., Elliott, M. A., Gerraty, R. T., Ruparel, K., \n Loughead, J., Calkins, M. E., et al. (2013). An improved framework for \n confound regression and filtering for control of motion artifact in the \n preprocessing of resting-state functional connectivity data. NeuroImage, \n 64, 240?256. http://doi.org/10.1016/j.neuroimage.2012.08.052\n\n Parkes, L., Fulcher, B., Yücel, M., & Fornito, A. (2018). An evaluation\n of the efficacy, reliability, and sensitivity of motion correction\n strategies for resting-state functional MRI. NeuroImage, 171, 415-436.\n\n Ng et al. (2016). http://doi.org/10.1016/j.neuroimage.2016.03.029\n \"\"\"\n if kind not in [\"36P\", \"9P\", \"6P\", \"aCompCor\", \"24aCompCor\", \"24aCompCorGsr\"]:\n raise Exception(\"Confound type unknown {}\".format(kind))\n\n df = pd.read_csv(confounds_file, sep=\"\\t\")\n\n # extract nusiance regressors for movement + signal\n p6 = df[['trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z']]\n p9 = df[['csf', 'white_matter', 'global_signal', 'trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z']]\n \n # 6Pder\n p6_der = p6.diff().fillna(0)\n p6_der.columns = [c + \"_der\" for c in p6_der.columns]\n \n # 9Pder\n p9_der = p9.diff().fillna(0)\n p9_der.columns = [c + \"_der\" for c in p9_der.columns]\n \n # 12P\n p12 = pd.concat((p6, p6_der), axis=1)\n p12_2 = p12 ** 2\n p12_2.columns = [c + \"_2\" for c in p12_2.columns]\n \n # 18P + 18P^2\n p18 = pd.concat((p9, p9_der), axis=1)\n p18_2 = p18 ** 2\n p18_2.columns = [c + \"_2\" for c in p18_2.columns]\n \n # 36P\n p36 = pd.concat((p18, p18_2), axis=1)\n\n # GSR4\n gsr = df['global_signal']\n gsr_der = gsr.diff().fillna(0)\n gsr_der2 = gsr_der ** 2\n gsr4 = pd.concat((gsr, gsr_der, gsr_der2), axis=1)\n gsr4['sqrterm'] = np.power(range(1, gsr.shape[0]+1), 2)\n\n # get compcor nuisance regressors and combine with 12P\n aCompC = df.filter(regex='a_comp_cor_')\n p12aCompC = pd.concat((p12, aCompC), axis=1)\n p24aCompC = pd.concat((p12, p12_2, aCompC), axis=1)\n\n if kind == \"36P\":\n confounds = p36\n elif kind == \"9P\":\n confounds = p9\n elif kind == \"6P\":\n confounds = p6\n elif kind == \"aCompCor\":\n confounds = p12aCompC\n elif kind == \"24aCompCor\":\n confounds = p24aCompC\n elif kind == \"24aCompCorGsr\":\n confounds = pd.concat((p24aCompC, gsr4), axis=1)\n else:\n # it will never get here, but assign confounds so my linter doesn't complain\n confounds = ''\n exit(1)\n\n if spikereg_threshold:\n threshold = spikereg_threshold\n else:\n # if no spike regression still call get_spikereg_confounds to get count\n # of available trs\n threshold = 99999\n outliers, outlier_stats = get_spikereg_confounds(df[\"framewise_displacement\"].values, threshold)\n\n if spikereg_threshold:\n confounds = pd.concat([confounds, outliers], axis=1)\n\n return confounds, outlier_stats\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description='nusiance regression')\n parser.add_argument('fmri', type=str, help='input fmri to be denoised')\n parser.add_argument('mask', type=str, help='input mask in same space as fmri')\n parser.add_argument('confounds', type=str, help='input confounds file (from fmriprep)')\n parser.add_argument('-tr', type=float, help='tr of image (for bandpass filtering)', default=0)\n parser.add_argument('-strategy', type=str, help='confound strategy',\n choices=[\"36P\", \"9P\", \"6P\", \"aCompCor\", \"24aCompCor\", \"24aCompCorGsr\"],\n default='36P')\n parser.add_argument('-spikethr', type=float, help='spike threshold value',\n default=0.5)\n parser.add_argument('-fwhm', type=float, help='smoothing fwhm',\n default=6.0)\n parser.add_argument('-out', type=str, help='ouput base name',\n default='output')\n\n # parse\n args = parser.parse_args()\n\n # print the args\n print(\"\\nARGS: \")\n for arg in vars(args):\n print(\"{} {}\".format(str(arg), str(getattr(args, arg))))\n print(\"END ARGS\\n\")\n\n # read in the data\n inputImg = nib.load(args.fmri)\n inputMask = nib.load(args.mask)\n\n # call nuisance regress, get a nib Nifti1Image\n nrImg, outldf, outdfstat = nuisance_regress(inputImg, inputMask, args.confounds, inputtr=args.tr, conftype=args.strategy, spikethr=args.spikethr, smoothkern=args.fwhm)\n\n # write it\n nib.save(nrImg, ''.join([args.out, '_nuisance.nii.gz']))\n outldf.to_csv(''.join([args.out, '_outlierdf.csv']))\n outdfstat.to_csv(''.join([args.out, '_outlierstat.csv']))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/regress.py","file_name":"regress.py","file_ext":"py","file_size_in_byte":9376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"346860083","text":"import pandas as pd\n\ndef category_filter(json_file_path):\n title_uk = 'title==\"イギリス\"'\n df = pd.read_json(file, lines=True)\n uk = df.query(title_uk)['text'].values[0]\n lines = uk.split('\\n')\n return list(filter(lambda x: '[Category:' in x, lines))\n\nif __name__ == \"__main__\":\n file = './ch03/jawiki-country.json'\n ans = category_filter(file)\n for a in ans:\n print(a)\n","sub_path":"ch03/ans21.py","file_name":"ans21.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"539294137","text":"#!/usr/bin/python3\nimport sqlite3\nres = 0\nconnection = sqlite3.connect(\"bots.db\")\n\ncursor = connection.cursor()\n\ncursor.execute(\"SELECT * FROM bot\")\nresult = cursor.fetchall()\nfor r in result:\n res +=1\n #print(r[0])\n print(r)\n print(res)\n\n# delete \n#cursor.execute(\"\"\"DROP TABLE employee;\"\"\")\n\n#sql_command = \"\"\"INSERT INTO PLAYER (name, password)\n #VALUES (\"admin\", \"admin\");\"\"\"\n#cursor.execute(sql_command)\n\n# never forget this, if you want the changes to be saved:\n#connection.commit()\n\nconnection.close()\n#print(\"1\")","sub_path":"sql_print_bots.py","file_name":"sql_print_bots.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"426127675","text":"# vega 선생님은 Miss 피자 가게의 단골 손님이다.\n# 그는 이번 달부터 절약 생활을 시작했다.\n# 그래서 그는 피자 가게에서 주문할 수 ���는 피자 중 1 달러 당 열량이 최대가 되는 피자를 주문하고 싶어한다.\n# 이러한 피자를 \"최고의 피자\"라고 부르기로 하자.\n# \"최고의 피자\"는 1종류가 아니다.\n# Miss 피자는 N 종류의 토핑에서 여러 종류를 자유롭게 선택하여, 도우 위에 올려 주문할 수있다.\n# 같은 토핑을 2 개 이상 올릴 수 없다.\n# 도우에 토핑을 하나도 하지 않은 피자도 주문할 수있다.\n# 도우의 가격은 A 달러이며, 토핑의 가격은 모두 B 달러이다.\n# 실제 피자 가격은 도우의 가격과 토핑 가격의 합계이다.\n# 즉, 토핑을 k 종류 (0 ≦ k ≦ N) 한 피자의 가격은 A + k × B 원이다.\n# 피자 전체의 칼로리는 도우 열량과 토핑 칼로리의 합계이다.\n# 도우의 가격과 토핑의 가격, 그리고 도우와 각 토핑 열량 값이 주어 졌을 때, \"최고의 피자\"의 1 달러 당 열량의 수를 구하는 프로그램을 작성하시오.\n\nn = int(input())\na, b = map(int, input().split())\ndoughCal = int(input())\ntoppingCal = []\n\nfor i in range(n):\n toppingCal.append(int(input()))\n\ntoppingCal.sort(reverse=True)\n\nbestPizzaCal = int(doughCal // a)\ntoppingCalSum = 0\nfor i in range(n):\n toppingCalSum += toppingCal[i]\n cal = (doughCal + toppingCalSum) // (a+(b*(i+1)))\n if (bestPizzaCal < cal):\n bestPizzaCal = cal\n\nprint(bestPizzaCal)\n ","sub_path":"Python/CodeUp/3321_최고의_피자.py","file_name":"3321_최고의_피자.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"428973079","text":"import stdio\nimport sys\n\n\nclass Interval:\n \"\"\"\n Represents a 1-dimensional interval [lbound, rbound].\n \"\"\"\n\n def __init__(self, lbound, rbound):\n \"\"\"\n Constructs a new interval given its lower and upper bounds.\n \"\"\"\n\n self._lbound = lbound\n self._rbound = rbound\n\n def lbound(self):\n \"\"\"\n Returns the lower bound of the interval.\n \"\"\"\n\n return self._lbound\n\n def rbound(self):\n \"\"\"\n Returns the upper bound of the interval.\n \"\"\"\n\n return self._rbound\n\n def contains(self, x):\n \"\"\"\n Returns True if self contains the point x and False otherwise.\n \"\"\"\n\n if self._lbound < x and self._rbound > x:\n return True\n return False\n\n def intersects(self, other):\n \"\"\"\n Returns True if self intersects other and False othewise.\n \"\"\"\n\n if self._lbound >= other._lbound and self._lbound <= other._rbound:\n return True\n if self._rbound >= other._lbound and self._rbound <= other._rbound:\n return True\n return False\n\n def __str__(self):\n \"\"\"\n Returns a string representation of self.\n \"\"\"\n\n return '[' + str(self._lbound) + ', ' + str(self._rbound) + ']'\n\n\n# Test client [DO NOT EDIT]. Reads a float x from the command line and\n# writes to standard output: all of the intervals from standard input\n# (each defined by a pair of floats) that contain x; and all pairs\n# of intervals from standard input that intersect one another.\ndef _main():\n x = float(sys.argv[1])\n intervals = []\n while not stdio.isEmpty():\n lbound = stdio.readFloat()\n rbound = stdio.readFloat()\n intervals += [Interval(lbound, rbound)]\n for i in range(len(intervals)):\n if intervals[i].contains(x):\n stdio.writef('%s contains %f\\n', intervals[i], x)\n for i in range(len(intervals)):\n for j in range(i + 1, len(intervals)):\n if intervals[i].intersects(intervals[j]):\n stdio.writef('%s intersects %s\\n', intervals[i], intervals[j])\n\nif __name__ == '__main__':\n _main()\n","sub_path":"homework9/interval.py","file_name":"interval.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"525553095","text":"import logging\nimport sys\n\nimport click\n\nfrom snuba import settings\n\n\n@click.command()\n@click.option('--clickhouse-server', multiple=True,\n help='Clickhouse server to optimize.')\n@click.option('--database', default='default',\n help='Name of the database to target.')\n@click.option('--table', default=settings.DEFAULT_LOCAL_TABLE,\n help='Name of the table to target.')\n@click.option('--timeout', default=10000, type=int,\n help='Clickhouse connection send/receive timeout, must be long enough for OPTIMIZE to complete.')\n@click.option('--log-level', default=settings.LOG_LEVEL, help='Logging level to use.')\ndef optimize(clickhouse_server, database, table, timeout, log_level):\n from datetime import datetime\n from snuba.clickhouse import ClickhousePool\n from snuba.optimize import run_optimize, logger\n\n logging.basicConfig(level=getattr(logging, log_level.upper()), format='%(asctime)s %(message)s')\n\n if not clickhouse_server:\n logger.error(\"Must provide at least one Clickhouse server.\")\n sys.exit(1)\n\n today = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)\n for server in clickhouse_server:\n clickhouse = ClickhousePool(\n server.split(':')[0], port=int(server.split(':')[1]), send_receive_timeout=timeout\n )\n num_dropped = run_optimize(clickhouse, database, table, before=today)\n logger.info(\"Optimized %s partitions on %s\" % (num_dropped, server))\n","sub_path":"snuba/cli/optimize.py","file_name":"optimize.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"649436360","text":"from __future__ import absolute_import\n\nimport requests\nimport re\nfrom io import BytesIO\nfrom pathlib import Path\nfrom urllib.parse import quote\n\nfrom .exceptions import (\n StorageAuthenticationException,\n StorageNetworkException,\n StorageServerException,\n StorageCountryNotSupportedException,\n)\nfrom .models import (\n HttpOptions,\n HttpRecordRead,\n HttpRecordWrite,\n HttpRecordBatchWrite,\n HttpRecordFind,\n HttpAttachmentMeta,\n)\nfrom .validation import validate_http_response\nfrom .retry import retry_on_server_exception\nfrom .__version__ import __version__\nfrom .countries_cache import CountriesCache\nfrom .token_clients.token_client import TokenClient\nfrom .http_request import http_request\n\n\nclass HttpClient:\n DEFAULT_COUNTRY = \"us\"\n DEFAULT_ENDPOINT_MASK = \"-mt-01.api.incountry.io\"\n AUTH_TOTAL_RETRIES = 1\n\n DEFAULT_AUTH_REGION = \"default\"\n\n def __init__(\n self,\n env_id: str,\n token_client: TokenClient,\n endpoint: str = None,\n debug: bool = False,\n endpoint_mask: str = None,\n countries_cache: CountriesCache = None,\n server_side_encryption=False,\n options: HttpOptions = None,\n ):\n self.token_client = token_client\n self.endpoint = endpoint\n self.env_id = env_id\n self.debug = debug\n self.endpoint_mask = endpoint_mask\n self.server_side_encryption = server_side_encryption\n\n if options is None:\n self.options = HttpOptions()\n else:\n self.options = options if isinstance(options, HttpOptions) else HttpOptions(**options)\n\n self.countries_cache = countries_cache if countries_cache is not None else CountriesCache()\n\n if self.endpoint is None:\n self.log(\n f\"Connecting to default endpoint: \"\n f\"https://.{self.endpoint_mask or HttpClient.DEFAULT_ENDPOINT_MASK}. \"\n f\"Connection timeout {self.options.timeout}s\"\n )\n else:\n self.log(f\"Connecting to custom endpoint: {self.endpoint}. Connection timeout {self.options.timeout}s\")\n\n @validate_http_response(HttpRecordWrite)\n def write(self, country, data, request_options={}):\n (res, http_response) = self.request(country, method=\"POST\", data=data, request_options=request_options)\n return (res, http_response)\n\n @validate_http_response(HttpRecordBatchWrite)\n def batch_write(self, country, data, request_options={}):\n (res, http_response) = self.request(\n country, path=\"/batchWrite\", method=\"POST\", data=data, request_options=request_options\n )\n return (res, http_response)\n\n @validate_http_response(HttpRecordRead)\n def read(self, country, record_key, request_options={}):\n return self.request(country, path=f\"/{quote(record_key)}\", request_options=request_options)\n\n @validate_http_response(HttpRecordFind)\n def find(self, country, data, request_options={}):\n return self.request(country, path=\"/find\", method=\"POST\", data=data, request_options=request_options)\n\n def delete(self, country, record_key, request_options={}):\n return self.request(\n country,\n path=f\"/{quote(record_key)}\",\n method=\"DELETE\",\n request_options=request_options,\n )\n\n def batch_delete(self, country, data, request_options={}):\n return self.request(country, path=\"/batchDelete\", method=\"POST\", data=data, request_options=request_options)\n\n def health_check(self, country, request_options={}):\n http_response = None\n try:\n (_, http_response) = self.request(\n country,\n path=f\"/healthcheck\",\n method=\"GET\",\n request_options=request_options,\n use_records_path=False,\n )\n return (http_response.status_code == 200, http_response)\n except StorageCountryNotSupportedException:\n raise\n except StorageAuthenticationException:\n raise\n except StorageNetworkException:\n raise\n except StorageServerException as e:\n if e.scope != \"storage server request\":\n raise\n return (False, http_response)\n\n @validate_http_response(HttpAttachmentMeta)\n def add_attachment(self, country, record_key, file, upsert=False, mime_type=None, request_options={}):\n filename = Path(getattr(file, \"name\", \"file\")).name\n files = {\"file\": file}\n\n if mime_type is not None:\n files[\"file\"] = (filename, file, mime_type)\n\n return self.request(\n country,\n path=f\"/{quote(record_key)}/attachments\",\n method=\"PUT\" if upsert else \"POST\",\n files=files,\n request_options=request_options,\n )\n\n def delete_attachment(self, country, record_key, file_id, request_options={}):\n return self.request(\n country,\n path=f\"/{quote(record_key)}/attachments/{quote(file_id)}\",\n method=\"DELETE\",\n request_options=request_options,\n )\n\n def get_attachment_file(self, country, record_key, file_id, request_options={}):\n (_, http_response) = self.request(\n country,\n path=f\"/{quote(record_key)}/attachments/{quote(file_id)}\",\n method=\"GET\",\n request_options=request_options,\n )\n return (\n {\n \"filename\": self.get_filename_from_headers(http_response.headers),\n \"file\": BytesIO(http_response.content),\n },\n http_response,\n )\n\n @validate_http_response(HttpAttachmentMeta)\n def get_attachment_meta(self, country, record_key, file_id, request_options={}):\n return self.request(\n country,\n path=f\"/{quote(record_key)}/attachments/{quote(file_id)}/meta\",\n method=\"GET\",\n request_options=request_options,\n )\n\n @validate_http_response(HttpAttachmentMeta)\n def update_attachment_meta(self, country, record_key, file_id, meta, request_options={}):\n return self.request(\n country,\n path=f\"/{quote(record_key)}/attachments/{quote(file_id)}/meta\",\n method=\"PATCH\",\n data=meta,\n request_options=request_options,\n )\n\n @retry_on_server_exception(\n status_code=429,\n retry_base_delay=lambda instance: instance.options.retry_base_delay,\n retry_max_delay=lambda instance: instance.options.retry_max_delay,\n )\n def request(\n self,\n country,\n path=\"\",\n method=\"GET\",\n data=None,\n request_options={},\n retries=AUTH_TOTAL_RETRIES,\n files=None,\n use_records_path=True,\n ):\n try:\n (endpoint, audience, region) = self.get_request_pop_details(country)\n\n url = (\n self.get_request_url(endpoint, \"/v2/storage/records/\", country, path)\n if use_records_path\n else self.get_request_url(endpoint, path)\n )\n auth_token = self.token_client.get_token(\n audience=audience, region=region, refetch=retries < HttpClient.AUTH_TOTAL_RETRIES\n )\n\n params = {\n \"method\": method,\n \"url\": url,\n \"headers\": self.get_headers(auth_token=auth_token, **request_options.get(\"http_headers\", {})),\n \"timeout\": self.options.timeout,\n }\n\n if data is not None:\n params[\"json\"] = data\n if files is not None:\n params[\"files\"] = files\n\n return http_request(params, scope=\"storage server request\", debug=self.debug)\n except StorageAuthenticationException as e:\n if e.status_code == 401 and self.token_client.can_refetch and retries > 0:\n return self.request(\n country=country,\n path=path,\n method=method,\n data=data,\n request_options=request_options,\n retries=retries - 1,\n files=files,\n use_records_path=use_records_path,\n )\n else:\n raise e from None\n\n def get_request_pop_details(self, country):\n if self.endpoint and self.endpoint_mask is None:\n return (self.endpoint, self.endpoint, HttpClient.DEFAULT_AUTH_REGION)\n\n endpoint_mask_to_use = self.endpoint_mask or HttpClient.DEFAULT_ENDPOINT_MASK\n\n region = HttpClient.DEFAULT_AUTH_REGION\n endpoint = HttpClient.get_pop_url(HttpClient.DEFAULT_COUNTRY, HttpClient.DEFAULT_ENDPOINT_MASK)\n country_endpoint = HttpClient.get_pop_url(country, endpoint_mask_to_use)\n audience = endpoint\n\n if self.endpoint:\n endpoint = self.endpoint\n audience = endpoint if endpoint == country_endpoint else f\"{endpoint} {country_endpoint}\"\n else:\n country_details = self.countries_cache.get_country_details(country=country)\n\n if country_details[\"is_midpop\"]:\n endpoint = country_endpoint\n audience = endpoint\n region = country_details[\"region\"]\n else:\n endpoint = HttpClient.get_pop_url(HttpClient.DEFAULT_COUNTRY, endpoint_mask_to_use)\n audience = f\"{endpoint} {country_endpoint}\"\n\n return (endpoint, audience, region)\n\n def get_request_url(self, host, *parts):\n res_url = host.rstrip(\"/\")\n for part in parts:\n res_url += \"/\" + part.strip(\"/\")\n return res_url.strip(\"/\")\n\n def get_headers(self, auth_token, **additional_headers):\n return {\n \"Authorization\": \"Bearer \" + auth_token,\n \"x-env-id\": self.env_id,\n \"User-Agent\": \"SDK-Python/\" + __version__,\n **({\"X-Encrypted-Storage\": \"true\"} if self.server_side_encryption else {}),\n **additional_headers,\n }\n\n def get_filename_from_headers(self, headers):\n content_disposition = headers.get(\"content-disposition\", None)\n if content_disposition is None:\n return \"file\"\n filename_re_from_header = re.findall(\"filename\\\\*=UTF-8''([^;]*)\", headers[\"content-disposition\"])\n if len(filename_re_from_header) == 0:\n return \"file\"\n return requests.utils.unquote(filename_re_from_header[0].strip('\"'))\n\n def log(self, *args):\n if self.debug:\n print(\"[incountry] \", args)\n\n @staticmethod\n def get_pop_url(country, endpoint_mask=DEFAULT_ENDPOINT_MASK):\n endpoint_mask = endpoint_mask or HttpClient.DEFAULT_ENDPOINT_MASK\n return f\"https://{country}{endpoint_mask}\"\n","sub_path":"incountry/http_client.py","file_name":"http_client.py","file_ext":"py","file_size_in_byte":10857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"336849972","text":"from __future__ import print_function\n\nimport flask\nimport json\nfrom flask import jsonify, request\n\n\napp = flask.Flask(__name__)\n\n@app.route('/ping', methods=['GET'])\ndef ping():\n # health = ScoringService.get_model() is not None # You can insert a health check here\n health = True\n\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')\n\n@app.route('/invocations', methods=['POST'])\ndef transformation():\n if flask.request.content_type == 'application/json':\n result = json.dumps({\n 'first_name': request.json[\"first_name\"],\n 'last_name': request.json[\"last_name\"]\n })\n return flask.Response(response=result, status=200, mimetype='application/json')\n else:\n return flask.Response(response='This predictor only supports CSV data', status=415, mimetype='text/plain')","sub_path":"SageMaker/モデルのデプロイ/独自推論コードのデプロイ/example/estimator/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"287126400","text":"from lib import *\n\nclass AWSInterface():\n\n\tdef __init__(self):\n\t\tparser = SafeConfigParser()\n\t\tparser.read('device.conf')\n\t\tself.host = parser.get('device','host')\n\t\tself.port = int(parser.get('device','port'))\n\t\tself.clientId = parser.get('device','clientId')\n\t\tself.userId = parser.get('device','userId')\n\t\tself.topic = parser.get('device','topic')\n\t\tself.rootCAPath = parser.get('device','rootCAPath')\n\t\tself.privateKeyPath = parser.get('device','privateKeyPath')\n\t\tself.certificatePath = parser.get('device','certificatePath')\n\t\tself.growId = parser.get('grow','growId')\n\t\tparser.read('plant.conf')\n\t\tself.growStartDate = parser.get('PlantInfo','plantingDate')\n\t\tself.growStartDate = strtoDate(self.growStartDate)\n\t\tself.myAWSIoTMQTTClient = AWSIoTMQTTClient(self.clientId)\n\t\tself.myAWSIoTMQTTClient.configureEndpoint(self.host, self.port)\n\t\tself.myAWSIoTMQTTClient.configureCredentials(self.rootCAPath, self.privateKeyPath, self.certificatePath)\n\t\tself.myAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 32, 20)\n\t\tself.myAWSIoTMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing\n\t\tself.myAWSIoTMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz\n\t\tself.myAWSIoTMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec\n\t\tself.myAWSIoTMQTTClient.configureMQTTOperationTimeout(5) # 5 sec\n\t\tif(self.myAWSIoTMQTTClient.connect()):\n\t\t\tprint(\"Connected successfully\")\n\t\telse:\n\t\t\tprint(\"Not Connected\")\n\n\tdef receiveData(self,topic,func):\n\t\tself.myAWSIoTMQTTClient.subscribe(topic,1,func)\n\n\tdef sendData(self,data):\n\t\tpacket = self.makePacket(data)\n\t\ttry:\n\t\t\tself.myAWSIoTMQTTClient.publish(self.topic, packet, 1)\n\t\t\treturn True\n\t\texcept Exception as e:\n\t\t\traise(e)\n\t\t\treturn False\n\t\t\n\tdef makePacket(self,data):\n\t\tpacket = {}\n\t\tpacket['device_id'] = self.clientId\n\t\tpacket['user_id'] = self.userId\n\t\tpacket['time_stamp'] = str(datetime.datetime.now())\n\t\tpacket['sensor_data'] = data['sensor']\n\t\tpacket['grow_id'] = self.growId\n\t\tpacket['time_from_start'] = str(datetime.date.today()-self.growStartDate)\n\t\tpacket['actuator_data'] = data['actuator']\n\t\tiotPacket = json.dumps(packet)\n\t\treturn iotPacket\n\n\tdef sendCameraData(self,data):\n\t\tresponse = requests.post(\"https://aws.savetos3_api\",data=data)\n\t\treturn response","sub_path":"awsInterface.py","file_name":"awsInterface.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"21622741","text":"\"\"\"\nRelational join in MapReduce\n\"\"\"\nimport sys\n\nimport MapReduce\n\nmr = MapReduce.MapReduce()\n\ndef mapper(record):\n \"\"\"Mapper for relational join problem\"\"\"\n mr.emit_intermediate(record[1], record)\n\ndef reducer(order_id, order_items): #pylint: disable=I0011,W0613\n \"\"\"Reducer for relational join problem\"\"\"\n is_order = lambda x: x[0] == 'order'\n order = next(order_item for order_item in order_items if is_order(order_item))\n for line_item in order_items:\n if not is_order(line_item):\n order_line_item = order + line_item\n mr.emit(order_line_item)\n\nif __name__ == '__main__':\n with open(sys.argv[1]) as input_data:\n mr.execute(input_data, mapper, reducer)\n","sub_path":"assignment3/join.py","file_name":"join.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"614834079","text":"from __future__ import division\nimport numpy as np\nimport membership\n\ndef intersection(fuzzyInput, fuzzySet):\n\tif len(fuzzyInput) != len(fuzzySet):\n\t\traise ValueError(\"Argument arrays must have the same size\")\t\n\n\tminimum = []\n\n\tfor i in range(0, len(fuzzyInput)):\n\t\tminimum.append(min(fuzzyInput[i], fuzzySet[i]))\n\n\tresult = max(minimum)\n\n\treturn result","sub_path":"Experimentos/Blood/mamdani.py","file_name":"mamdani.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"640378798","text":"import igraph\nimport math as m\nimport random as r\nimport NetworkComponents as networks\n\n\nclass GraphTopology:\n\n def __init__(self, k_val, n_p, n_mb, max_vm_size, phys_host_cap=None):\n self.k = k_val\n self.n_pairs = n_p\n self.n_middleboxes = n_mb\n self.max_vm_size = max_vm_size\n\n self.hosts, self.edge_switches, self.agg_switches, self.core_switches = [], [], [], []\n self.middleboxes = []\n self.master_graph = None\n\n self.phys_host_cap = phys_host_cap\n\n def create_topology(self):\n fat_tree = igraph.Graph()\n k = self.k\n\n num_hosts = num_core_switches = int(m.pow(k / 2, 2))\n num_agg_switches = num_edge_switches = num_connections = k / 2\n\n # Make all the hosts, 2.0 needed to make expression float and thus roundable up to the nearest integer\n if self.phys_host_cap is None:\n host_capacity = int(m.ceil((self.n_pairs * 2.0 * self.max_vm_size) / (num_hosts * k)))\n else:\n host_capacity = self.phys_host_cap\n for i in range(0, num_hosts * k):\n host_name = \"host_{}\".format(i + 1)\n host = networks.PhysicalMachine(host_capacity, host_name)\n\n fat_tree.add_vertex(name=host_name)\n fat_tree.vs.select(name=host_name)[\"host\"] = host\n self.hosts.append(host)\n\n # Make all the edge switches\n pod_number, pod_index = 0, 0\n vertical_number = 1\n for i in range(0, int(num_edge_switches * k)):\n edge_switch_name = \"edge_switch_\" + str(i + 1)\n edge_switch = networks.PhysicalSwitch(edge_switch_name)\n edge_switch.set_p(pod_number)\n edge_switch.set_v(vertical_number)\n edge_switch.set_h(2)\n pod_index += 1\n vertical_number += 1\n\n if pod_index == k / 2:\n pod_index = 0\n pod_number += 1\n vertical_number = 0\n\n fat_tree.add_vertex(name=edge_switch_name)\n fat_tree.vs.select(name=edge_switch_name)[\"edge_switch\"] = edge_switch\n self.edge_switches.append(edge_switch)\n\n # Make all the aggregate switches\n pod_number, pod_index = 0, 0\n vertical_number = 0\n for i in range(0, int(num_agg_switches * k)):\n agg_switch_name = \"agg_switch_\" + str(i + 1)\n agg_switch = networks.PhysicalSwitch(agg_switch_name)\n agg_switch.set_p(pod_number)\n agg_switch.set_v(vertical_number)\n agg_switch.set_h(1)\n pod_index += 1\n vertical_number += 1\n\n if pod_index == k/2:\n pod_index = 0\n pod_number += 1\n vertical_number = 0\n\n fat_tree.add_vertex(name=agg_switch_name)\n fat_tree.vs.select(name=agg_switch_name)[\"agg_switch\"] = agg_switch\n self.agg_switches.append(agg_switch)\n\n # Make all the core switches\n for i in range(0, num_core_switches):\n core_switch_name = \"core_switch_\" + str(i + 1)\n core_switch = networks.PhysicalSwitch(core_switch_name)\n\n fat_tree.add_vertex(name=core_switch_name)\n fat_tree.vs.select(name=core_switch_name)[\"core_switch\"] = core_switch\n self.core_switches.append(core_switch)\n\n # Link Hosts to Edge Switches\n n_host = 0\n for edge_switch in self.edge_switches:\n for i in range(0, int(num_connections)):\n fat_tree.add_edge(source=edge_switch.get_name(), target=self.hosts[n_host].get_name())\n self.hosts[n_host].set_edge_switch(edge_switch)\n n_host += 1\n\n # Link Aggregation Switches to Edge Switches\n num_edge, n_edge = 0, 0\n for agg_switch in self.agg_switches:\n start_value = num_edge\n for i in range(0, int(num_edge_switches)):\n fat_tree.add_edge(source=agg_switch.get_name(), target=self.edge_switches[num_edge].get_name())\n num_edge += 1\n num_edge = start_value\n\n n_edge += 1\n if n_edge >= int(num_edge_switches):\n num_edge += num_edge_switches\n n_edge = 0\n\n # Link Core Switches to Aggregation Switches\n num_edge, agg_switch_index = 0, 0\n for core_switch in self.core_switches:\n start_value = agg_switch_index\n for i in range(0, k):\n fat_tree.add_edge(source=core_switch.get_name(), target=self.agg_switches[agg_switch_index].get_name())\n agg_switch_index += num_connections\n agg_switch_index = start_value\n num_edge += 1\n if num_edge >= int(num_edge_switches):\n agg_switch_index += 1\n num_edge = 0\n\n self.master_graph = fat_tree\n\n def create_middleboxes(self):\n middlebox_host = []\n for i in range(1, self.n_middleboxes + 1):\n new_middlebox = networks.MiddleBox(\"MiddleBox_\" + str(i))\n random_parent_switch = self.get_random_switch()\n\n while random_parent_switch.get_name() in middlebox_host:\n random_parent_switch = self.get_random_switch()\n\n new_middlebox.set_parent_switch(random_parent_switch)\n middlebox_host.append(random_parent_switch.get_name())\n self.middleboxes.append(new_middlebox)\n\n def get_random_switch(self):\n switches = self.agg_switches + self.edge_switches\n return switches[r.randint(0, len(switches) - 1)]\n\n def get_hosts(self):\n return self.hosts\n\n def get_topology_graph(self):\n return self.master_graph\n\n def get_middleboxes(self):\n return self.middleboxes\n\n def set_middleboxes(self, mbs):\n self.middleboxes = mbs\n\n def reset_physical_hosts(self):\n map(lambda phys_mach: phys_mach.clear_physical_machine(), self.hosts)\n\n def get_switches(self):\n return self.agg_switches + self.core_switches + self.edge_switches\n\n @staticmethod\n def get_distance(node1, node2):\n p1, v1, h1 = node1.get_p(), node1.get_v(), node1.get_h()\n p2, v2, h2 = node2.get_p(), node2.get_v(), node2.get_h()\n\n if p1 == p2:\n if v1 == v2:\n return 1\n else:\n if h1 == h2:\n return 2\n else:\n return 1\n else:\n if v1 == v2:\n if h1 == h2:\n return 2 * h1\n else:\n return 3\n else:\n if h1 == h2:\n return 4\n else:\n return 3","sub_path":"GraphTopology.py","file_name":"GraphTopology.py","file_ext":"py","file_size_in_byte":6681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"386685021","text":"import numpy as np\nfrom tqdm import tqdm\n\nfrom dataloader import ImageData, ImageLoader\nfrom utils import image_quality\nfrom utils.prepare_images import *\n\nDCSCN_12 = \"model_check_points/DCSCN/DCSCN_model_387epos_L12_noise_1.pt\"\nmodel_dcscn = torch.load(DCSCN_12)\n\nmodel_upconv7 = UpConv_7()\nmodel_upconv7.load_pre_train_weights(\"model_check_points/Upconv_7/anime/noise0_scale2.0x_model.json\")\n\nimg_dataset = ImageData(img_folder='demo/demo_imgs/',\n max_patch_per_img=1000,\n patch_size=96,\n shrink_size=2,\n noise_level=1,\n down_sample_method=Image.BICUBIC,\n color_mod='RGB')\n\nimg_data = ImageLoader(img_dataset,\n up_sample=None,\n batch_size=1,\n shuffle=True,\n pad_img=model_upconv7.offset) # DCSCN must set pad_img = 0\nssim_score = []\npsnr_score = []\nfor img in tqdm(img_data, ascii=True):\n lr, hr = img\n out = model_upconv7.forward_checkpoint(lr)\n psnr_score.append(image_quality.calc_psnr(out, hr))\n ssim_score.append(image_quality.msssim(out, hr))\n\nprint(\"Averge PSNR score: {:.4f}\".format(np.mean(psnr_score)))\nprint(\"Average MS-SSIM score: {:.4f}\".format(np.mean(ssim_score)))\n","sub_path":"examples/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"351728345","text":"import csv\nimport random\nimport math\nimport sklearn\nfrom sklearn import datasets\nfrom sklearn import svm\n\ndef loadCsv(filename):\n\tlines = csv.reader(open(filename, \"rb\"))\n\tdataset = list(lines)\n\tfor i in range(len(dataset)):\n\t\tdataset[i] = [float(x) for x in dataset[i]]\n\treturn dataset\n\ndef splitDataset(dataset, splitRatio):\n\ttrainSize = int(len(dataset) * splitRatio)\n\ttrainSet = []\n\tcopy = list(dataset)\n\twhile len(trainSet) < trainSize:\n\t\tindex = random.randrange(len(copy))\n\t\ttrainSet.append(copy.pop(index))\n\treturn [trainSet, copy]\n\ndef getAccuracy(testSet, predictions):\n\tcorrect = 0\n\tfor x in range(len(testSet)):\n\t\tif testSet[x][-1] == predictions[x]:\n\t\t\tcorrect += 1\n\treturn (correct/float(len(testSet))) * 100.0\n\t\ndef main ():\n\tfilename = 'spambase.data.csv'\n\tsplitRatio = 0.7\n\tdataset = loadCsv(filename)\n\ttrainSet,testSet = splitDataset(dataset, splitRatio)\n\ttrainLabel = [row[57] for row in trainSet]\n\tprint('Split {0} rows into train={1} and test={2} rows').format(len(dataset), len(trainSet), len(testSet))\n\tclf = svm.SVC(probability = True)\n\tclf.fit(trainSet, trainLabel)\n\tprobs =clf.predict_proba(testSet)\n\tans = clf.predict(testSet)\n\taccuracy = getAccuracy(testSet, ans)\n\tprint('Accuracy: ' + repr(accuracy) + '%')\n\nimport time\nstart_time = time.time();\nmain()\nprint(\"%s seconds\" % (time.time() - start_time))","sub_path":"SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"377899620","text":"from datasets import SSTDataset, PhraseDataset\nfrom configs import parser, tre_config\nimport ipdb as pdb\nfrom ipdb import slaunch_ipdb_on_exception\nfrom typing import Dict\nimport json\nfrom tre import evals2 as tre_eval\nimport torch.nn as nn\nimport constants\nimport numpy as np\nimport pandas as pd\nimport scipy\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nfrom nltk import Tree\n\nsns.set(font_scale=1.5)\nsns.set_style(\"ticks\", {'font.family': 'serif'})\nimport os\n\n\ndef ranks(arr):\n return np.array(arr).argsort().argsort()\n\n\ndef main(kwargs: Dict):\n pdb.set_trace()\n\n composition_fn = constants.composition_fn_dict[kwargs['composition_fn']]\n dataset = constants.dataset_dict[kwargs['dataset_name']](**kwargs)\n # dataset.load_data()\n\n prefix = '_'.join([kwargs['dataset_name'], kwargs['encoder_name'], ''])\n with open(prefix + kwargs['output_embeddings_file'], 'r') as f:\n output = json.load(f)\n # metadata = output['metadata']\n\n # Assumes the expressions are the keys and the representations are the\n # values.\n if 'embedding_dict' in output:\n embedding_dict = output['embedding_dict']\n else:\n embedding_dict = output\n reps, keys, exprs, offset = dataset.prepare_tre_input(embedding_dict)\n\n # Assumes the compositional expressions and representations are the last\n # n items.\n errs = tre_eval.evaluate(\n np.array(reps), exprs, composition_fn(), tre_eval.CosDist(),\n zero_init=False)\n\n if isinstance(exprs[0], Tree):\n exprs = [\" \".join(e.leaves()) for e in exprs]\n ecomp = exprs[-offset:]\n errs = errs[-offset:]\n\n pdb.set_trace()\n scores = dataset.get_gt_tre_scores(keys[-offset:])\n\n r_errs = ranks(errs)\n r_scores = ranks(scores)\n\n data = pd.DataFrame({'err': r_errs, 'score': r_scores})\n sns.lmplot(x='err', y='score', data=data)\n plt.title('Encoder: {encoder_name}; Dataset: {dataset_name}'.format(\n encoder_name=kwargs['encoder_name'], dataset_name=kwargs['dataset_name']))\n plt.xlabel('TRE (rank)')\n plt.ylabel('compositionality (rank)')\n # plt.ylim(0, 5)\n plt.savefig('%s_correl_plot_full.png' % prefix, format='png')\n # plt.show()\n print(scipy.stats.spearmanr(errs, scores))\n\n comb = zip(scores, errs, ecomp)\n comb = sorted(comb, key=lambda x: x[1])\n df = pd.DataFrame(comb, columns=[\"human score\", \"model err\", \"words\"])\n df['model_ranks'] = ranks(df['model err'].values)\n df['human_ranks'] = ranks(df['human score'].values)\n df['rank_correl'] = scipy.stats.spearmanr(df['model err'].values, df['human score'].values).correlation\n df.to_csv('%s_compositional_scores_full.csv' % prefix, index=True)\n\n pdb.set_trace()\n print(\"compositional:\")\n print(\"%20s %20s %40s\" % (\"human score\", \"model err\", \"words\"))\n for c in comb[:5]:\n if isinstance(c[2], Tree):\n text = \" \".join(c[2].leaves())\n print(\"%20.2f %20.2f %40s\" % (c[0], c[1], text))\n else:\n print(\"%20.2f %20.2f %40s\" % c)\n print()\n print(\"non-compositional:\")\n print(\"%20s %20s %40s\" % (\"human score\", \"model err\", \"words\"))\n for c in comb[-5:]:\n if isinstance(c[2], Tree):\n text = \" \".join(c[2].leaves())\n print(\"%20.2f %20.2f %40s\" % (c[0], c[1], text))\n else:\n print(\"%20.2f %20.2f %40s\" % c)\n\n\nif __name__ == \"__main__\":\n parser = tre_config(parser)\n args = parser.parse_args()\n\n if not args.debug:\n pdb.set_trace = lambda: None\n\n with slaunch_ipdb_on_exception():\n main(vars(args))\n","sub_path":"project/run_tre.py","file_name":"run_tre.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"469571801","text":"#!/usr/bin/python\n\"\"\"\ncompute the PCA for each region.\n\"\"\"\nimport sys,os\nfrom glob import glob\nsys.path.append('./library')\nfrom mrjob.job import MRJob,MRStep\nimport mrjob.protocol\nimport pandas as pd\nimport numpy as np\nimport Statistics2 as Stat\nimport pickle\n#from ECatch import ECatch\nfrom time import time\nV_len=365\n\nlog=sys.stderr\nlog.write('**starting MRpca**\\n\\n')\nlog.flush()\nCounter=0\n\nclass MRpca(MRJob):\n\n # When using PickleProtocol no extra work is needed, but the pickling is much slower\n # increasing from 0.06sec to 0.25 second. And the serialization is already the slowest part.\n # INTERNAL_PROTOCOL = mrjob.protocol.PickleProtocol\n\n INTERNAL_PROTOCOL = mrjob.protocol.JSONProtocol\n OUTPUT_PROTOCOL = mrjob.protocol.JSONProtocol\n \n global Counter\n #@ECatch\n def mapper_init(self):\n \"read the station partitioning table\"\n log.write('current working directory='+os.getcwd() + \"\\n\")\n filenames=glob('*')\n log.write(str(filenames)+'\\n')\n log.flush()\n os.system('ls -l')\n file=open('Partition_Tree.pkl','rb')\n Ptree=pickle.load(file)\n file.close()\n T=Ptree['Partitioned_Stations']\n self.Partition=T[['block','Node','weight']]\n self.Counter=0\n self.t_end=time()\n\n #def mapper(self,_,line):\n # self.mapper_step(line)\n\n #@ECatch\n def mapper(self,_, line):\n self.t0=time()\n elements=line.split(',')\n if elements[1]=='TMAX': \n station=elements[0]\n vec=np.zeros(V_len)\n nulls=0\n for i in range(3,len(elements)):\n if elements[i]=='':\n vec[i-3]=np.nan\n nulls+=1\n else:\n vec[i-3]=int(elements[i])\n if nulls<=65:\n #log.write('mapping line: '+str(self.Counter)+'\\n')\n self.Counter+=1\n S=Stat.VecStat(V_len)\n S.accum(vec)\n P_row=self.Partition.loc[station]\n key=P_row['block']\n self.t8=time()\n log.write('Mapper times='+str([self.t0-self.t_end,self.t8-self.t0])+'\\n')\n self.t_end=self.t8\n yield(key,S.to_lists())\n\n def reducer_init(self):\n \"initialize VecStat\"\n self.VS=Stat.VecStat(V_len)\n self.Counter=0\n \n #@ECatch\n def reducer(self,key,val):\n log.write('start reduce on '+str(key)+'\\n')\n try:\n while True:\n U_aslists = next(val)\n U=Stat.VecStat(V_len)\n U.from_lists(U_aslists)\n #U=next(val)\n log.write('reducing Iteration: '+str(self.Counter)+'\\n')\n self.Counter+=1\n self.VS.add(U)\n except StopIteration:\n log.write('end reduce on '+str(key)+'\\n')\n yield (key,self.VS.to_lists())\n #yield (key,self.VS)\n\n def steps(self):\n return[\n MRStep(mapper_init=self.mapper_init,\n mapper=self.mapper,\n reducer_init=self.reducer_init,\n reducer=self.reducer,\n combiner_init=self.reducer_init,\n combiner=self.reducer)\n ]\n\nif __name__ == '__main__':\n #print 'yey'\n MRpca.run()","sub_path":"notebooks/weather.mapreduce/mr_pca.py","file_name":"mr_pca.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"530845919","text":"#!/usr/bin/python\nimport jieba\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\nif len(sys.argv) != 3:\n print(\"Usage: cut.py [input file] [output file]\")\n exit()\n\ninfile = sys.argv[1]\noutfile = sys.argv[2]\noutstream = open(outfile, 'w')\nfor line in open(infile):\n #print line\n seg_list = jieba.cut(line, cut_all=False, HMM=True)\n outstream.write(\" \".join(seg_list))\n #print \" \".join(seg_list)\noutstream.close()\n\n","sub_path":"python/titleVector/cut.py","file_name":"cut.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"567041182","text":"import common\nimport datetime\nimport models\nimport qmongo\ndef get_permission(args):\n user_role = qmongo.models.auth_user_info.aggregate.project(username = 1, role_code = 1)\\\n .match(\"username == {0}\", args['user'].username).get_item()\n ret = common.get_collection(\"AD_Roles\").aggregate([\n {\"$match\": {\n \"role_code\": user_role.get('role_code', None)\n }},\n {\"$unwind\":{\n \"path\":\"$permission\",\n \"preserveNullAndEmptyArrays\":False\n }},\n {\"$match\":{\n \"permission.function_id\":args['data']['function_id']\n }},\n {\"$project\":{\n \"_id\":0,\n \"function_id\": \"$permission.function_id\",\n \"read\": \"$permission.read\",\n \"create\": \"$permission.create\",\n \"write\": \"$permission.write\",\n \"delete\": \"$permission.delete\",\n \"export\": \"$permission.export\",\n \"import\": \"$permission.import\",\n \"copy\": \"$permission.copy\",\n \"attach\": \"$permission.attach\",\n \"download\": \"$permission.download\",\n \"print\": \"$permission.print\",\n \"action\": \"$permission.action\"\n }}\n ])\n\n rs = list(ret)\n\n return (lambda x: x[0] if len(x) > 0 else {\n \"function_id\": args['data']['function_id'],\n \"read\": False,\n \"create\": False,\n \"write\": False,\n \"delete\": False,\n \"export\": False,\n \"import\": False,\n \"copy\": False,\n \"attach\": False,\n \"download\": False,\n \"print\": False,\n \"action\": False\n })(rs)\n","sub_path":"apps/lms/api/authenticate_permission.py","file_name":"authenticate_permission.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"473531554","text":"import csv\nimport maidenhead as mh\n\ngrid_list = []\n\nwith open('contacts_latlon.txt') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n lat = float(row[1])\n lon = float(row[2])\n level = 4\n grid = mh.toMaiden(lat,lon)[0:4]\n if grid not in grid_list:\n grid_list.append(grid)\n\nprint(grid_list)\nwith open('seen_grids.txt', 'w') as f:\n for item in grid_list:\n f.write(\"%s\\n\" % item)\n","sub_path":"get_grid_squares.py","file_name":"get_grid_squares.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"581695151","text":"__author__ = 'twlhgs'\n\nclass FindMaxCount:\n\n def __init__(self):\n pass\n\n def find_match_by_max_count(self, num):\n dic = self._make_dic(num)\n max_count = self._find_max_value(dic)\n match_lst = []\n for key in dic.keys():\n if dic[key] == max_count:\n match_lst.append(key)\n match_lst.sort()\n return list(map(int, match_lst)), max_count\n\n def _find_max_value(self, dic):\n max_cun = max(dic.values())\n return max_cun\n\n def _make_dic(self, num):\n count_dict = {}\n full_lst = list(str(num))\n set1 = set(full_lst)\n for i in set1:\n count_dict.update({i: full_lst.count(i)})\n return count_dict\n\ninput_num = input('Input N: ')\nfind_max = FindMaxCount()\nmatch_list = list(map(str, find_max.find_match_by_max_count(input_num)[0]))\nmax_count = find_max.find_match_by_max_count(input_num)[1]\nprint(','.join(match_list), ' ({})'.format(max_count))\n\n","sub_path":"python101/homework/util/FindMaxCount.py","file_name":"FindMaxCount.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"377748020","text":"import pygame\r\nimport os\r\nimport Objects\r\nimport ScreenEngine\r\nimport Logic\r\nimport Service\r\n\r\n\r\n# SCREEN_DIM = (1400, 1050)\r\n\r\npygame.init()\r\n\r\nvideo_info = pygame.display.Info()\r\nSCREEN_DIM = (video_info.current_w, video_info.current_h)\r\n\r\ngameDisplay = pygame.display.set_mode(SCREEN_DIM)\r\npygame.display.set_caption(\"MyRPG\")\r\nKEYBOARD_CONTROL = True\r\n\r\nif not KEYBOARD_CONTROL:\r\n import numpy as np\r\n answer = np.zeros(4, dtype=float)\r\n\r\nbase_stats = {\r\n \"strength\": 20,\r\n \"endurance\": 20,\r\n \"intelligence\": 5,\r\n \"luck\": 5\r\n}\r\n\r\n\r\ndef create_game(sprite_size, is_new):\r\n global hero, engine, drawer, iteration\r\n if is_new:\r\n hero = Objects.Hero(base_stats,\r\n Service.create_sprite(\r\n os.path.join(\"texture\", \"Hero.png\"),\r\n sprite_size))\r\n engine = Logic.GameEngine()\r\n Service.service_init(sprite_size)\r\n Service.reload_game(engine, hero)\r\n\r\n engine.screen_dim = SCREEN_DIM\r\n engine.info_width = int(SCREEN_DIM[0] * 0.15)\r\n engine.info_height = SCREEN_DIM[1]\r\n engine.progress_height = int(SCREEN_DIM[1] * 0.1)\r\n engine.progress_width = SCREEN_DIM[0] - engine.info_width\r\n engine.map_height = SCREEN_DIM[1] - engine.progress_height\r\n engine.map_width = SCREEN_DIM[0] - engine.info_width\r\n engine.help_height = 500\r\n engine.help_width = 700\r\n\r\n drawer = ScreenEngine.GameSurface(\r\n (engine.map_width, engine.map_height),\r\n pygame.SRCALPHA, (0, 0),\r\n ScreenEngine.ProgressBar(\r\n (engine.progress_width, engine.progress_height),\r\n (engine.progress_width, 0),\r\n ScreenEngine.InfoWindow(\r\n (engine.info_width, engine.info_height),\r\n ((engine.map_width - engine.help_width) // 2,\r\n (engine.map_height - engine.help_height) // 2),\r\n ScreenEngine.HelpWindow(\r\n (engine.help_width, engine.help_height),\r\n pygame.SRCALPHA,\r\n ((engine.map_width - engine.help_width) // 2,\r\n (engine.map_height -\r\n engine.help_height) // 2),\r\n ScreenEngine.GameOver(\r\n (engine.help_width, engine.help_height),\r\n pygame.SRCALPHA, (0, 0),\r\n ScreenEngine.ScreenHandle((0, 0))\r\n )\r\n )\r\n )\r\n )\r\n )\r\n\r\n else:\r\n sh = drawer\r\n while not isinstance(sh, ScreenEngine.ScreenHandle):\r\n sh = sh.successor\r\n sh.fill(ScreenEngine.colors['black'])\r\n\r\n engine.sprite_size = sprite_size\r\n hero.sprite = Service.create_sprite(\r\n os.path.join(\"texture\", \"Hero.png\"),\r\n sprite_size\r\n )\r\n Service.service_init(sprite_size, False)\r\n\r\n Logic.GameEngine.sprite_size = sprite_size\r\n\r\n drawer.connect_engine(engine)\r\n\r\n iteration = 0\r\n\r\n\r\nsize = 60\r\ncreate_game(size, True)\r\n\r\nwhile engine.working:\r\n\r\n if KEYBOARD_CONTROL:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n engine.working = False\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_h:\r\n engine.show_help = not engine.show_help\r\n if event.key == pygame.K_KP_PLUS:\r\n size = size + 1\r\n create_game(size, False)\r\n if event.key == pygame.K_KP_MINUS:\r\n size = size - 1\r\n create_game(size, False)\r\n if event.key == pygame.K_r:\r\n create_game(size, True)\r\n if event.key == pygame.K_ESCAPE:\r\n engine.working = False\r\n if engine.game_over:\r\n continue\r\n if engine.game_process:\r\n if event.key == pygame.K_UP:\r\n engine.move_up()\r\n iteration += 1\r\n elif event.key == pygame.K_DOWN:\r\n engine.move_down()\r\n iteration += 1\r\n elif event.key == pygame.K_LEFT:\r\n engine.move_left()\r\n iteration += 1\r\n elif event.key == pygame.K_RIGHT:\r\n engine.move_right()\r\n iteration += 1\r\n else:\r\n if event.key == pygame.K_RETURN:\r\n create_game()\r\n else:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n engine.working = False\r\n if engine.game_process:\r\n actions = [\r\n engine.move_right,\r\n engine.move_left,\r\n engine.move_up,\r\n engine.move_down,\r\n ]\r\n answer = np.random.randint(0, 100, 4)\r\n prev_score = engine.score\r\n move = actions[np.argmax(answer)]()\r\n state = pygame.surfarray.array3d(gameDisplay)\r\n reward = engine.score - prev_score\r\n print(reward)\r\n else:\r\n create_game()\r\n\r\n gameDisplay.blit(drawer, (0, 0))\r\n drawer.draw(gameDisplay)\r\n\r\n pygame.display.update()\r\n\r\npygame.display.quit()\r\npygame.quit()\r\nexit(0)\r\n","sub_path":"python/oop-patterns-python/week5/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":5748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"217601383","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\nimport requests,os,sys,time\n\ndef main():\n try:\n with open(\"uploadpath.ini\",'r+',encoding=\"gbk\") as fr:\n dic = eval(fr.read())\n SerAddress,SaveDir=dic[\"SerAddress\"],dic[\"SaveDir\"]\n filepath=dic[\"filepath\"].split(\";\")\n\n with open(\"upload.log\",\"a+\",encoding=\"gbk\") as res:\n res.write(\"######\\n\")\n for file in filepath:\n c_time=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))\n with open(file,\"rb\") as f:\n response = requests.post(SerAddress,data={'dirField':SaveDir,'fnField':os.path.basename(file)},files={'fileField':(\"tmp\",f)})\n r_txt=response.text.strip()\n if r_txt==\"Done\":\n res.write(\"##\".join([c_time,file,\"上传成功\\n\"]))\n else:\n res.write(\"##\".join([c_time,file,\"上传失败:\"+r_txt+\"\\n\"]))\n except FileNotFoundError:\n print(\"FileNotFoundError\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"cgi-bin/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"417240377","text":"import requests\nfrom django.http import JsonResponse\nfrom rest_framework import viewsets\nfrom rest_framework.filters import SearchFilter\nfrom rest_framework.generics import ListCreateAPIView, get_object_or_404\nfrom rest_framework.response import Response\n\nfrom adeva.settings import ICE_AND_FIRE_API_URL\nfrom api.serializers import BookSerializer\nfrom api.models import Book\n\n\ndef external_books(request):\n try:\n name = request.GET.get('name')\n resp = requests.get(ICE_AND_FIRE_API_URL + 'books?name=%s' % name)\n if resp.status_code != 200:\n response = {\n 'status_code': 200,\n 'status': 'success',\n 'data': []\n }\n return JsonResponse(response, status=200)\n else:\n response = {\n 'status_code': 200,\n 'status': 'success',\n 'data': resp.json()\n }\n return JsonResponse(response, status=200)\n except:\n response = {\n 'status_code': 200,\n 'status': 'success',\n 'data': []\n }\n return JsonResponse(response, status=200)\n\n\nclass BooksViewSet(viewsets.ModelViewSet):\n queryset = Book.objects.all()\n serializer_class = BookSerializer\n filter_backends = (SearchFilter,)\n search_fields = ('name', 'country', 'publisher', 'release_date')\n\n def list(self, request):\n books = self.get_queryset()\n serializer = BookSerializer(books, many=True)\n response = {\n 'status_code': 200,\n 'status': 'success',\n 'data': serializer.data\n }\n return Response(response)\n\n def retrieve(self, request, pk=None):\n queryset = Book.objects.all()\n user = get_object_or_404(queryset, pk=pk)\n serializer = BookSerializer(user, many=False)\n response = {\n 'status_code': 201,\n 'status': 'success',\n 'data': serializer.data\n }\n return Response(response)\n\n def create(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n validated_data = serializer.validated_data\n book = Book.objects.create(**validated_data)\n response = {\n 'status_code': 200,\n 'status': 'success',\n 'data': [\n {'book': BookSerializer(book).data}\n ]\n }\n return Response(response)\n\n def destroy(self, request, *args, **kwargs):\n book = self.get_object()\n self.perform_destroy(book)\n response = {\n 'status_code': 204,\n 'status': 'success',\n 'message': \"The book %s was deleted successfully\" % book.name,\n 'data': []\n }\n return Response(response)\n","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"642233673","text":"# %load q05_runs/build.py\n# Default Imports\nfrom greyatomlib.python_getting_started.q01_read_data.build import read_data\ndata = read_data()\n\n\n# Your Solution\ndef BC_runs(data):\n ing=data.get('innings')\n firsting=ing[0]\n fingdict=firsting.get('1st innings')\n deliveries=fingdict.get('deliveries')\n runs=0\n for ball in deliveries:\n bat=list(ball.values())\n if bat[0].get('batsman')=='BB McCullum':\n run=bat[0].get('runs').get('batsman')\n runs=runs+run\n\n # Write your code here\n\n\n return(runs)\n\nBC_runs(data)\n\n","sub_path":"q05_runs/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"583170896","text":"from django.test import TestCase\nfrom django.test import Client\n\nfrom .models import Message\n\n# Create your tests here.\n\nclass MessageTestCase(TestCase):\n def setUp(self):\n m1 = Message.objects.create(\n name = \"Jan Kołodziej\",\n email = \"jan@kołodziej.com\",\n priority = 4,\n category = \"question\",\n subject = \"Kto to?\",\n body = \"Puste\",\n )\n m2 = Message.objects.create(\n name = \"Ewa Kowal\",\n email = \"ewa@kowal.com\",\n priority = 9,\n category = \"other\",\n subject = \"Co to?\",\n body = \"Bla bla\",\n )\n m3 = Message.objects.create(\n name = \"Adam Bartnik\",\n email = \"adam@bartnik.com\",\n priority = 43,\n category = \"question\",\n subject = \"Gdzie?\",\n body = \"Lorem ipsum lorem\",\n )\n\n ### Testowanie modelu znajdującego się na serwerze\n\n def test_create_object(self):\n length = len(Message.objects.all())\n self.assertEqual(length, 3)\n\n def test_valid_message(self):\n m = Message.objects.get(id=1)\n self.assertTrue(m.is_valid_message())\n\n def test_invalid_message(self):\n m = Message.objects.filter(name=\"Adam Bartnik\").first()\n self.assertFalse(m.is_valid_message())\n\n def test_increase_priority(self):\n m = Message.objects.get(id=2)\n p = m.priority\n m.increase_priority()\n self.assertEqual(p+1, m.priority)\n\n def test_set_priority(self):\n m = Message.objects.get(id=1)\n m.set_priority(7)\n self.assertEqual(m.priority, 7)\n\n ### Testowanie odpowiedzi serwera\n\n def test_messages_response(self):\n c = Client()\n\n response = c.get(\"/crud/message-list\")\n self.assertEqual(response.status_code, 200)\n\n context = response.context['object_list']\n self.assertEqual(len(context), 3)\n\n def test_messages_invalid_response(self):\n c = Client()\n response = c.get(\"/crud/message-list/5\")\n self.assertEqual(response.status_code, 404)\n\n def test_messages_detail_response(self):\n c = Client()\n response = c.get(\"/crud/message-detail/2\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context['object'].name, \"Ewa Kowal\")\n\n def test_message_form(self):\n c = Client()\n response = c.post('/crud/message-create', {\n 'name': \"Katarzyna Bartnik\",\n \"email\": \"katarzyna@bartnik.com\",\n 'priority': 6,\n 'category': \"question\",\n 'subject': \"Pytanie\",\n 'body': \"Treść pytania\",\n })\n self.assertEqual(response.status_code, 302)\n m = Message.objects.all()\n self.assertEqual(m.count(), 4)","sub_path":"django/django_project2/crud/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"543187517","text":"from array import array\n\nfrom unittest.mock import patch, Mock\nfrom unittest import TestCase\n\nfrom webbluetooth.echo_gatt_server import (\n UserDescriptionDescriptor,\n DeviceNameChar,\n Characteristic,\n)\n\n\ndef nil_constructor(*args):\n pass\n\n\nclass GattServerTest(TestCase):\n @patch.object(UserDescriptionDescriptor,\n '__init__',\n new=nil_constructor)\n @patch('webbluetooth.echo_gatt_server.current_bt_device')\n def test_user_description_fetches_bt_name(self, current_bt):\n current_bt.return_value = 'huawei'\n descriptor = UserDescriptionDescriptor()\n\n device_name = descriptor.ReadValue({})\n\n assert device_name == array('B', [104, 117, 97, 119, 101, 105])\n\n @patch.object(Characteristic, '__init__', new=nil_constructor)\n @patch('webbluetooth.echo_gatt_server.UserDescriptionDescriptor')\n def test_device_name_characteristic_adds_descriptor(self, user_info):\n add_descriptor = Mock()\n DeviceNameChar.add_descriptor = add_descriptor\n DeviceNameChar('bus', 1, None)\n\n assert add_descriptor.call_count == 1\n","sub_path":"webbluetooth/test_echo_gatt_server.py","file_name":"test_echo_gatt_server.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"456703555","text":"from __future__ import absolute_import, print_function, unicode_literals\n\nfrom .jstypes import Hook, Interfaces\n\n\nOBSOLETE_EXTENSION_MANAGER = {\n 'on_get': 'This interface is part of the obsolete extension manager '\n 'interface, which is not available in any remotely modern '\n 'version of Firefox. It should not be referenced in any '\n 'code.'}\n\nInterfaces.hook({\n 'nsIExtensionManager': OBSOLETE_EXTENSION_MANAGER,\n 'nsIUpdateItem': OBSOLETE_EXTENSION_MANAGER,\n 'nsIInstallLocation': OBSOLETE_EXTENSION_MANAGER,\n 'nsIAddonInstallListener': OBSOLETE_EXTENSION_MANAGER,\n 'nsIAddonUpdateCheckListener': OBSOLETE_EXTENSION_MANAGER,\n})\n\n# nsIJSON\n\nNSIJSON_DEPRECATED = {\n 'err_id': ('testcases_javascript_calldefinitions', 'nsIJSON', 'deprec'),\n 'warning': 'Deprecated nsIJSON methods in use.',\n 'description':\n 'The `encode` and `decode` methods in nsIJSON have been '\n 'deprecated since Gecko 7. You should use the methods in the '\n 'global JSON object instead. See '\n 'https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference'\n '/Global_Objects/JSON for more information.'}\n\n\n@Interfaces.hook\nclass nsIJSON(Hook):\n encode = {'on_call': NSIJSON_DEPRECATED}\n decode = {'on_call': NSIJSON_DEPRECATED}\n\n\n# nsIWebBrowserPersist\n\nWEBBROWSERPERSIST_DEPRECATED = {\n 'err_id': ('testcases_javascript_call_definititions',\n 'webbrowserpersist'),\n 'warning': 'nsIWebBrowserPersist should no longer be used',\n 'description':\n 'Most nsIWebBrowserPersist methods have been '\n 'superseded by simpler methods in Downloads.jsm, namely '\n '`Downloads.fetch` and `Downloads.createDownload`. See '\n 'http://mzl.la/downloads-jsm for more information.',\n}\n\n\n@Interfaces.hook\nclass nsIWebBrowserPersist(Hook):\n saveChannel = {'on_call': WEBBROWSERPERSIST_DEPRECATED}\n savePrivacyAwareURI = {'on_call': WEBBROWSERPERSIST_DEPRECATED}\n\n @Hook.on_call\n def saveURI(this, args, callee):\n \"\"\"nsIWebBrowserPersist.saveURI requires a valid privacy context as\n of Firefox 19.\"\"\"\n if len(args) >= 7:\n load_context = args[6]\n if load_context.as_primitive() is None:\n this.traverser.warning(\n err_id=('testcases_javascript_call_definititions',\n 'webbrowserpersist_saveuri'),\n warning=('saveURI should not be called with a null load '\n 'context'),\n description=(\n 'While nsIWebBrowserPersist.saveURI accepts null '\n 'in place of a privacy context, this usage is '\n 'acceptable only when no appropriate load '\n 'context exists.'))\n\n return WEBBROWSERPERSIST_DEPRECATED\n\n\n# nsITransferable\n\n@Interfaces.hook\nclass nsITransferable(Hook):\n @Hook.on_call\n def init(this, args, callee):\n if args and not args[0].as_primitive():\n this.traverser.warning(\n err_id=('js_entity_values', 'nsITransferable', 'init'),\n warning=(\n '`nsITransferable.init` should not be called with `null` '\n 'as its first argument'),\n description=(\n 'Calling `nsITransferable.init()` with a null first '\n 'argument has the potential to leak data across '\n 'private browsing mode sessions. `null` is '\n 'appropriate only when reading data or writing data '\n 'which is not associated with a particular window.'))\n","sub_path":"validator/testcases/javascript/obsolete.py","file_name":"obsolete.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"532743525","text":"import json\nimport sys\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\nimport os\n\n\nfilename = \"\"\nme_dict = {}\ncalls = []\nbounces = []\n\nbuy_list = []\ntarget_symbols = set([])\n\n\n\n\n\ndef plot_stuff():\n j = 0\n\n for key in me_dict:\n #call = calls[num]\n me_list = me_dict[key]\n\n line_max = max(me_list)\n\n chart_info = bounces[j]\n chart_json = json.loads(chart_info)\n\n option = chart_json['option']\n downbs = chart_json['down_bounces']\n upbs = chart_json['up_bounces']\n\n linestyle = 'dotted'\n color = 'black'\n j += 1\n\n\n if (downbs > 5):\n linestyle = 'solid'\n color = 'blue'\n if (downbs > 20):\n linestyle = 'solid'\n color = 'purple'\n if (downbs > 30):\n linestyle = 'solid'\n color = 'green'\n\n if (upbs > 5):\n linestyle = 'solid'\n color = 'yellow'\n if (upbs > 20):\n linestyle = 'solid'\n color = 'orange'\n if (upbs > 30):\n linestyle = 'solid'\n color = 'red'\n\n\n #print(\"hmm this one to plot had downs: \" + str(downbs) + \", ups: \" + str(upbs) + \" and color: \" + color)\n #print(\"would plot: \" + str(me_list))\n #plt.plot(me_list)\n\n if line_max < 0.1:\n plt.plot(me_list, label=option, linestyle=linestyle, color=color)\n\n plt.xlabel(\"Time\")\n plt.ylabel(\"Call price\")\n plt.legend(loc='upper right', prop={'size': 8})\n plt.show()\n\n\n# Make a dictionary of each option where the keys are\n# 'SYMBOL-STRIKE_PRICE' and the value is the list of prices\n# it was throughout the time we were logging the option price\n#\n\ndef form_option_dict():\n with open(filename, \"r\") as a_file:\n\n for line in a_file:\n\n stripped_line = line.strip()\n\n if \"price update\" in stripped_line:\n print(\"some important line: \" + stripped_line)\n\n #parts = stripped_line.split(\" \")\n symbol_parts = stripped_line.split(\"symbol\\\": \\\"\")[1]\n strike_parts = stripped_line.split(\"strike\\\": \\\"\")[1]\n\n symbol = symbol_parts.split(\"\\\"\")[0]\n strike = strike_parts.split(\"\\\"\")[0]\n\n print(\"the calls symbol: \" + symbol)\n print(\"the calls strike: \" + strike)\n\n call = symbol + \"-\" + strike\n\n if call not in me_dict:\n\n me_dict[call] = []\n #calls.append(call)\n\n\n# Second, loop over lines and add price to price lists\n\ndef parse_option_prices(add_flats=False):\n with open(filename, \"r\") as a_file:\n\n for line in a_file:\n\n stripped_line = line.strip()\n\n if \"price update\" in stripped_line:\n\n try:\n\n print(\"\\n\\nwhole line for json: \" + stripped_line)\n\n new_option = stripped_line.split(\"to: \")[1]\n opt_json = json.loads(new_option)\n\n symbol = opt_json['symbol']\n strike = opt_json['strike']\n price = opt_json['price']\n\n diff_parts = stripped_line.split(\"diff: \")[1]\n diff = diff_parts.split(\",\")[0]\n\n print(\"symbol: \" + str(symbol) + \", strike: \" + str(strike) + \", price: \" + str(price) + \", diff \" + diff)\n\n price_fl = round(float(price), 2)\n diff_fl = round(float(diff), 2)\n\n\n print(\"the calls symbol: \" + symbol)\n print(\"the calls strike: \" + strike)\n\n call = symbol + \"-\" + strike\n price_len = len(me_dict[call])\n\n if add_flats:\n me_dict[call].append(price_fl)\n else:\n print(\"not adding flats so diff is: \" + str(diff_fl))\n if diff_fl != 0.0 or price_len == 0:\n me_dict[call].append(price_fl)\n\n except:\n e = sys.exc_info()[0]\n v = sys.exc_info()[1]\n print(\"uh oh something went wrong parsing the lines \" + str(e) + \", val:\" + str(v))\n\n\n\n\n\ndef check_for_bounces(prices, symbol):\n\n print(\"checking this list for bounces: \" + str(prices))\n print(\"lenght of price list: \" + str(len(prices)))\n\n\n most_price = max(prices, key=prices.count)\n print(\"max guy: \" + str(most_price))\n after_price = False\n\n up_bounces = 0\n down_bounces = 0\n\n floors = set([])\n ceilings = set([])\n\n for price in prices:\n\n if after_price and price != most_price:\n down_bounce = price < most_price\n #print(\"price right after max one: \" + str(price) + \", down bounce: \" + str(down_bounce))\n after_price = False\n\n if down_bounce:\n down_bounces += 1\n #print(\"floor on bounce: \" + str(price))\n floors.add(price)\n else:\n up_bounces += 1\n ceilings.add(price)\n\n if price == most_price:\n after_price = True\n\n\n print(\"ceiling is: \" + str(most_price) + \", with possible floors: \" + str(floors))\n #return (up_bounces, down_bounces)\n chart_info = {\n 'option': symbol,\n 'up_bounces': up_bounces,\n 'down_bounces': down_bounces,\n 'mode_price' : most_price,\n 'floors': list(floors),\n 'ceilings': list(ceilings)\n }\n\n # convert into JSON:\n chart_json = json.dumps(chart_info)\n\n return chart_json\n\n\n\ndef make_choices():\n\n num_bounces = 5\n\n for bounce in bounces:\n chart_json = json.loads(bounce)\n if (chart_json['down_bounces'] > num_bounces):\n\n print(\"definitely should look at buying: \" + str(bounce))\n symbol = chart_json['option'].split(\"-\")[0]\n strike = chart_json['option'].split(\"-\")[1]\n\n target_buy = {\n 'symbol': symbol,\n 'strike': strike,\n 'buy_price': chart_json['floors'],\n 'sell_price': chart_json['mode_price'],\n }\n\n buy_list.append(target_buy)\n target_symbols.add(symbol)\n\n\n\n if (chart_json['up_bounces'] > num_bounces):\n print(\"definitely should look at buying: \" + str(bounce))\n symbol = chart_json['option'].split(\"-\")[0]\n strike = chart_json['option'].split(\"-\")[1]\n\n target_buy = {\n 'symbol': symbol,\n 'strike': strike,\n 'buy_price': chart_json['mode_price'],\n 'sell_price': chart_json['ceilings'],\n }\n\n buy_list.append(target_buy)\n target_symbols.add(symbol)\n\n\ndef write_to_buy_targets():\n\n symbol_string = \"\"\n for symbol in target_symbols:\n symbol_string += symbol + \"^^\"\n\n buy_targets = \"\\nwould check the status of buy list >> \" + str(json.dumps(buy_list))\n buy_targets += \"\\nby checking option status for >> \" + symbol_string\n buy_targets += \"\\n\"\n\n\n # get the date piece from the passed in scan data file\n # so we know when the targets were taken from\n #\n fileparts = filename.split(\"/\")\n file_end = fileparts[2]\n chart_date = file_end.split(\"_\")[0]\n\n now = datetime.now()\n dt_string = now.strftime(\"%m-%d-%y-%H:%M:%S\")\n day_string = now.strftime(\"%m-%d-%y\")\n\n os.system(\"mkdir -p targets/\" + day_string)\n fpath = \"targets/\" + day_string + \"/target_list_\" + chart_date + \".txt\"\n\n print(\"writing target file to: \" + fpath)\n\n f = open(fpath, \"a\")\n f.write(buy_targets)\n f.close()\n\n\n\n\n\n\n## Main script stuff\ndef main():\n\n form_option_dict()\n parse_option_prices(True)\n print(\"all me calls to track: \" + str(me_dict))\n\n\n for key in me_dict:\n print(\"checking \" + key + \" action for bounces..\")\n chart_info = check_for_bounces(me_dict[key], key)\n chart_json = json.loads(chart_info)\n\n print(\"chart info: \" + str(chart_info))\n print(key + \" price list result -- up bounces: \" + str(chart_json['up_bounces']) + \", down bounces: \" + str(chart_json['down_bounces']) + \"\\n\\n\")\n #bounces.append((upbs, downbs))\n bounces.append(chart_info)\n\n\n # Loop over each options chart info to see if any are worth buying (i.e. bouncinnn)\n #\n make_choices()\n\n print(\"me target buy list:\" + str(buy_list))\n\n\n # Loop over each target option buy to check the status and maybe buy some trash\n #\n print(\"\\n\\nChecking status of target buys..\")\n write_to_buy_targets()\n\n # Plot each option's price history\n #\n plot_stuff()\n\n\n\nif __name__ == \"__main__\":\n\n if (len(sys.argv) == 2):\n filename = sys.argv[1]\n print(\"Filepath we're looking at: \" + filename)\n main()\n else:\n print(\"Sike wrong number of args: \" + str(len(sys.argv)))\n","sub_path":"stonks/bouncer.py","file_name":"bouncer.py","file_ext":"py","file_size_in_byte":8930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"171068099","text":"import telebot\nimport config\nfrom telebot import types\nimport random\n\nbot = telebot.TeleBot(config.token)\n\nrock = u'\\U0000270A'\npaper = u'\\U0000270B'\nscissors = u'\\U0000270C'\n\nfigures = [rock, paper, scissors]\n\n\n@bot.message_handler(commands=['start'])\ndef send_start_message(message):\n bot.send_message(message.chat.id, \"Hi there! Enjoy the game.\")\n\n\ndef generate_markup():\n markup = types.ReplyKeyboardMarkup(row_width=3)\n markup.row(rock, paper, scissors)\n return markup\n\n\ndef generate_figure():\n return random.sample(figures, 1)\n\n\n@bot.message_handler(commands=['game'])\ndef set_game(message):\n bot.send_message(message.chat.id,\n 'Choose your figure:',\n reply_markup=generate_markup())\n\n\n@bot.message_handler(func=lambda message: True, content_types=['text'])\ndef define_winner(message):\n answer, reply = message.text, generate_figure()[0]\n status = figures.index(answer) - figures.index(reply)\n responses = ['Draw.', 'You won.', 'You lost.']\n bot.send_message(message.chat.id,\n reply + ' v ' +\n answer + '\\n' + responses[status])\n \n\nif __name__ == \"__main__\":\n bot.polling(none_stop=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"196431345","text":"from flask import Blueprint, request, jsonify, abort\nfrom .oauth import oauth\nfrom .login.pebble import api_ensure_pebble\n\napi = Blueprint('api', __name__)\n\n\n@api.route('/me')\n@oauth.require_oauth('profile')\ndef me():\n return jsonify(uid=request.oauth.user.id)\n\n\n@api.route('/me/pebble/auth')\n@oauth.require_oauth('pebble')\n@api_ensure_pebble\ndef pebble_auth_me():\n user = request.oauth.user\n return jsonify(id=user.pebble_auth_uid, email=user.email, name=user.name)\n\n\n@api.route('/me/pebble/dev-portal')\n@oauth.require_oauth('pebble')\n@api_ensure_pebble\ndef pebble_dev_portal_me():\n user = request.oauth.user\n return jsonify([{'id': user.pebble_dev_portal_uid, 'uid': user.pebble_auth_uid}])\n\n\ndef init_app(app, url_prefix='/api/v1'):\n app.register_blueprint(api, url_prefix=url_prefix)\n","sub_path":"auth/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"569468078","text":"preamble = []\npreamblelen=25\n\ndef allsums(l):\n return set([ a + b for a in l for b in l if a != b])\n\ndef isvalid(l, i):\n return l[i] in allsums(l[i-preamblelen:i])\n\n\nwith open(\"input.txt\", mode=\"r\") as f:\n numbers = [ int(i.strip()) for i in f.readlines() ]\n\nfor i in range(preamblelen, len(numbers)):\n if not isvalid(numbers, i):\n invalidnumber = numbers[i]\n invalidpos = i\n break\n\n# now find the collision\nfor x in range(0, invalidpos - 1):\n s = 0\n y = x\n while s < invalidnumber:\n s += numbers[y]\n y += 1\n if s == invalidnumber:\n print(min(numbers[x:y]) + max(numbers[x:y]))\n break\n","sub_path":"2020/09/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"491621245","text":"# import graphviz\nimport os,sys\nimport bayes\nimport random\nimport shutil\nfrom nltk.corpus import stopwords\nimport nltk\nimport convertformat as cf\nimport init_sets as init\nimport wordfreq as wf\nimport tfidf\nimport matplotlib.pyplot as plt\nimport ID3\ntrain_ratio=0.8\nsumofspam=25\nsumofham=25\nsumofemail=sumofham+sumofspam\n\nsumoftrain=int(sumofemail*train_ratio)\nsumoftest=sumofemail-sumoftrain\nsumofhamtrain=int(sumofham*train_ratio)\nsumofhamtest=sumofham-sumofhamtrain\nsumofspamtrain=int(sumofspam*train_ratio)\nsumofspamtest=sumofspam-sumofspamtrain\npath =os.path.abspath(os.path.dirname(sys.argv[0]))#当前路径\nhampath=path+'\\\\email\\\\ham\\\\'\nspampath=path+'\\\\email\\\\spam\\\\'\ntestpath=path+'\\\\test\\\\'\ntrainpath=path+'\\\\train\\\\'\n\n\nif __name__ == \"__main__\":\n\n #对文件夹内的文本全部进行编码转换,这一步做一次就可以了\n # cf.convertDir(hampath)\n # cf.convertDir(spampath)\n #当直接剔除新出现的单词时,误判率\n with open('mylog.txt','a')as f:\n f.write(\"==========ID3=======================\")\n time=5#运行次数\n Threshold=0.9#垃圾邮件阈值\n num_maxtfidfwords=15#只留下tfidf最大的几个值 目的是为了去除常见的词\n usetfidf=True\n with open('mylog.txt','a')as f:\n f.write(\"运行次数={0}\\n\".format(time))\n if usetfidf:\n f.write(\"利用TF-IDF算法,每篇文章保留{0}个特征单词\\n\".format(num_maxtfidfwords))\n else:\n f.write('不使用TF-IDF算法\\n')\n f.write(\"检测垃圾邮件的阈值{0}\\n\".format(Threshold))\n average_accur=0\n sum_accur=0\n Accuracy=[0]*time\n \n \n for j in range(time):\n with open('mylog.txt','a')as f:\n f.write('\\nNo.'+str(j))\n print('第{}次'.format(j+1))\n train_hamlist,test_hamlist,train_spamlist,test_spamlist=init.init_testfiles()#文件名列表\n train_list=train_spamlist+train_hamlist\n test_list=test_hamlist+test_spamlist\n contents_train_list=[]#储存所有处理好的的txt对应的句子\n \n info_train_hamlist={}#储存train_ham每篇的信息\n info_train_spamlist={}\n info_test_hamlist={}#储存test_ham每篇的信息\n info_test_spamlist={}\n for i in train_hamlist:\n dict1={}\n dict1['wordslist'],dict1['content']=wf.WordsListofFile(i,trainpath)\n contents_train_list.append(dict1['content'])\n info_train_hamlist[i]=dict1\n for i in train_spamlist:\n dict1={}\n dict1['wordslist'],dict1['content']=wf.WordsListofFile(i,trainpath)\n contents_train_list.append(dict1['content'])\n info_train_spamlist[i]=dict1\n allwordslist=tfidf.getallword(contents_train_list,repeat=False)#获得所有单词 不重复\n allwordslist_canrepeat=tfidf.getallword(contents_train_list,repeat=True)#总单词\n for i in test_hamlist:\n dict1={}\n dict1['wordslist'],dict1['content']=wf.WordsListofFile(i,testpath)\n info_test_hamlist[i]=dict1\n for i in test_spamlist:\n dict1={}\n dict1['wordslist'],dict1['content']=wf.WordsListofFile(i,testpath)\n info_test_spamlist[i]=dict1\n #通过tfidf预处理\n #tfidf#train\n info_train_hamlist=tfidf.getallwordtfidf_text(info_train_hamlist,contents_train_list)#获取各个词的tfidf值\n info_train_hamlist=tfidf.get_maxtfidf_word(info_train_hamlist,num_maxtfidfwords)#去除小的值\n info_train_spamlist=tfidf.getallwordtfidf_text(info_train_spamlist,contents_train_list)\n info_train_spamlist=tfidf.get_maxtfidf_word(info_train_spamlist,num_maxtfidfwords)\n #test 注意是在contents_train_list环境中获得tfidf值\n info_test_hamlist=tfidf.getallwordtfidf_text(info_test_hamlist,contents_train_list)#获取各个词的tfidf值\n info_test_hamlist=tfidf.get_maxtfidf_word(info_test_hamlist,num_maxtfidfwords)#去除小的值\n info_test_spamlist=tfidf.getallwordtfidf_text(info_test_spamlist,contents_train_list)\n info_test_spamlist=tfidf.get_maxtfidf_word(info_test_spamlist,num_maxtfidfwords)\n #计算词频\n # for i in info_train_hamlist.keys():\n # print(info_train_hamlist[i]['wordslist'])\n ID3LIST=ID3.get_characters(info_trainham_list=info_train_hamlist,info_trainspam_list=info_train_spamlist)\n for i in ID3LIST:\n print(i)\n \n\n wordfreq_intrainham_dict=wf.wordsfreq(info_train_hamlist,allwordslist,sumofhamtrain)\n wordfreq_intrainspam_dict=wf.wordsfreq(info_train_spamlist,allwordslist,sumofspamtrain)\n #联合概率贝叶斯\n \n \n counthamok=0\n countspamok=0\n for i in range(len(test_hamlist)):\n filename=test_hamlist[i]\n ham=info_test_hamlist[filename]\n okemail,pswdict,p=bayes.naivebayes(ham,filename,wordfreq_intrainham_dict,wordfreq_intrainspam_dict,Threshold=Threshold,usetfidf=usetfidf)\n if okemail:\n #如果检测正确\n counthamok+=1\n else:#检测失败\n with open('mylog.txt','a')as f:\n f.write(\"\\nFail:\"+test_hamlist[i])\n f.write('\\n{0}\\n'.format(pswdict))\n print('\\nFail:'+test_hamlist[i]+':',p)#输出文件名\n print(pswdict)\n for i in range(len(test_spamlist)):\n filename=test_spamlist[i]\n spam=info_test_spamlist[filename]\n okemail,pswdict,p=bayes.naivebayes(spam,filename,wordfreq_intrainham_dict,wordfreq_intrainspam_dict,Threshold=Threshold,usetfidf=usetfidf)\n if okemail==False:\n countspamok+=1\n else:#检测失败\n with open('mylog.txt','a')as f:\n f.write(\"\\nFail:\"+test_spamlist[i])\n f.write('\\n{0}\\n'.format(pswdict))\n print('\\nFail:'+test_spamlist[i]+':',p)#输出文件名\n print(pswdict)\n Accuracy[j]=(counthamok+countspamok)/sumoftest\n print(Accuracy[j])#计算正确性\n sum_accur+=Accuracy[j]\n # \n average_accur=sum_accur/time\n print(\"平均正确率:\", average_accur)\n with open('mylog.txt','a')as f:\n f.write(\"平均正确率:{0}\\n\".format(average_accur))\n x=[x for x in range(1,time+1)]\n y=Accuracy[:]\n plt.scatter(x,y)\n plt.show()\n \n","sub_path":"main3.py","file_name":"main3.py","file_ext":"py","file_size_in_byte":6458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"460495465","text":"# examples and notes from https://pythonprogramming.net/machine-learning-tutorial-python-introduction/\n\n\n#pip install quandl\nimport pandas as pd\nimport quandl, math, datetime\nimport numpy as np\nfrom sklearn import preprocessing, cross_validation, svm\nfrom sklearn.linear_model import LinearRegression\n#svm = support vector machine\n#cross validation shuffles your data, protects against bias\nimport pickle\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\n\nstyle.use('ggplot')\n\ndf = quandl.get('WIKI/GOOGL') #third party data api\ndf = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume',]]\n#define special relationships, weed out redundant prices\ndf['HL_PCT'] = (df['Adj. High'] - df['Adj. Close'])/df['Adj. Close'] * 100.0\ndf['PCT_change'] = (df['Adj. Close'] - df['Adj. Open'])/df['Adj. Open'] * 100.0\ndf = df[['Adj. Close','HL_PCT','PCT_change','Adj. Volume']] #features\n\nforecast_col = 'Adj. Close'\ndf.fillna(-99999, inplace=True)\nforecast_out = int(math.ceil(0.01*len(df))) #rounds everything up to the neares whole. Predict 1% out.\n# Eg for example next 30 days\n\ndf['label'] = df[forecast_col].shift(-forecast_out) #grabs price 10 days ahead of current features\n\nx = np.array(df.drop(['label'], 1))\nx = preprocessing.scale(x)\nx = x[:-forecast_out]\nx_lately = x[-forecast_out:] #what we're predicting against formuals: y = mx+b\n\n#always scale with training data and new data together. Will add to processing time\n#x = x[:-forecast_out + 1] #make sure not to get features where we don't have y labels\ndf.dropna(inplace=True)\ny = np.array(df['label'])\n#print(len(x), len(y))\n\nx_train, x_test, y_train, y_test = cross_validation.train_test_split(x, y, test_size=0.2) #20% is testing data\n\nclf = LinearRegression()\nclf.fit(x_train, y_train)\naccuracy = clf.score(x_test, y_test)\n#print (accuracy) #accurcy is squared error\nforecast_set = clf.predict(x_lately) #predict using latest 30 days of data\nprint(forecast_set, accuracy, forecast_out) #next 30 days of stock prices, accuracy and how many days we're forecasting\ndf['Forecast'] = np.nan\n\nlast_date = df.iloc[-1].name\nlast_unix = last_date.timestamp()\none_day = 86400\nnext_unix = last_unix + one_day\n\nfor i in forecast_set:\n next_date = datetime.datetime.fromtimestamp(next_unix)\n next_unix += one_day\n df.loc[next_date] = [np.nan for _ in range(len(df.columns)-1)] + [i]\n\nprint(df.tail())\n\ndf['Adj. Close'].plot()\ndf['Forecast'].plot()\nplt.legend(loc=4)\nplt.xlabel('Date')\nplt.ylabel('Price')\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"PyProgMachineLearning.py","file_name":"PyProgMachineLearning.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"195750432","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Frame error in rare events\n# This notebook calculates frame error exclusively in pre-defined time epochs of rare events\n# \\\n# Yarden, June 2021\n\n# In[1]:\n\n\n# imports\nfrom argparse import ArgumentParser\nimport configparser # used to load 'min_segment_dur.ini'\n\nfrom collections import defaultdict\nimport json\nfrom pathlib import Path\n\nimport joblib\nimport numpy as np\nimport pandas as pd\nimport pyprojroot\nimport torch\nfrom tqdm import tqdm\n\nfrom vak import config, io, models, transforms\nfrom vak.datasets.vocal_dataset import VocalDataset\nimport vak.device\nimport vak.files\nfrom vak.labeled_timebins import lbl_tb2segments, majority_vote_transform, lbl_tb_segment_inds_list, remove_short_segments\nfrom vak.core.learncurve import train_dur_csv_paths as _train_dur_csv_paths\nfrom vak.logging import log_or_print\nfrom vak.labeled_timebins import (\n lbl_tb2segments,\n majority_vote_transform,\n lbl_tb_segment_inds_list,\n remove_short_segments\n)\nimport copy\nfrom collections import Counter\nfrom crowsetta import Transcriber\nfrom pathlib import Path\n\n\n# In[47]:\n\n\n# Data folders and parameters\nmin_segment_dur_ini = 'D:\\\\Users\\\\yarde\\\\github\\\\tweetynet\\\\data\\\\configs\\\\min_segment_dur.ini '\nconfig = configparser.ConfigParser()\nconfig.optionxform = lambda option: option # monkeypatch optionxform so it doesn't lowercase animal IDs\nconfig.read(Path(min_segment_dur_ini).expanduser().resolve())\nmin_segment_durs = {k: float(v) for k, v in config['min_segment_dur'].items()}\n\nRoot_learning_curve = Path('D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\learncurve')\nRoot_behavior = Path('D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\BFSongRepository')\nRoot_hidden_16 = Path('D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\hidden_size\\\\hidden_size_16')\nRoot_hidden_64 = Path('D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\hidden_size\\\\hidden_size_64')\nbirds = ['bl26lb16','gr41rd51','gy6or6','or60yw70']\n\n# output folder\noutput_folder = Path('D:\\\\Users\\\\yarde\\\\github\\\\tweetynet\\\\results\\\\Bengalese_Finches\\\\rare_events')\n\n# general parameters\nmin_p_ratio = 0.001\nmin_rare_count = 10\nmin_count = 50\nmax_p_ratio = 0.25\n\n\n# In[3]:\n\n\n# functions to convert .not.mat annotations for a single .csv\ndef convert_notmats_to_csv(notmat_folder,csv_filename):\n scribe = Transcriber(format='notmat')\n annotpaths = [str(x) for x in Path(notmat_folder).glob('*.not.mat')]\n scribe.to_csv(annotpaths,csv_filename)\n\n\n# In[4]:\n\n\n# creaate csv annotations for BFSongRepository\nBF1_notmat_folder = 'D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\BFSongRepository\\\\bl26lb16\\\\041912'\nBF1_csv_filename = 'D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\BFSongRepository\\\\bl26lb16\\\\bl26lb16_annotation.csv'\nconvert_notmats_to_csv(BF1_notmat_folder,BF1_csv_filename)\nBF2_notmat_folder = 'D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\BFSongRepository\\\\gr41rd51\\\\06-21-12'\nBF2_csv_filename = 'D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\BFSongRepository\\\\gr41rd51\\\\gr41rd51_annotation.csv'\nconvert_notmats_to_csv(BF2_notmat_folder,BF2_csv_filename)\nBF3_notmat_folder = 'D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\BFSongRepository\\\\gy6or6\\\\032212'\nBF3_csv_filename = 'D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\BFSongRepository\\\\gy6or6\\\\gy6or6_annotation.csv'\nconvert_notmats_to_csv(BF3_notmat_folder,BF3_csv_filename)\nBF4_notmat_folder = 'D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\BFSongRepository\\\\or60yw70\\\\09-27-28-12'\nBF4_csv_filename = 'D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\BFSongRepository\\\\or60yw70\\\\or60yw70_annotation.csv'\nconvert_notmats_to_csv(BF4_notmat_folder,BF4_csv_filename)\n\n\n# In[5]:\n\n\n# Bird names and Root folders for experiments:\nRoot_learning_curve = Path('D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\learncurve')\nRoot_hidden_16 = Path('D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\hidden_size\\\\hidden_size_16')\nRoot_hidden_64 = Path('D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\hidden_size\\\\hidden_size_64')\nbirds = ['bl26lb16','gr41rd51','gy6or6','or60yw70']\nannotations = {'bl26lb16':BF1_csv_filename,'gr41rd51':BF2_csv_filename,'gy6or6':BF3_csv_filename,'or60yw70':BF4_csv_filename}\n\n\n# In[6]:\n\n\n# function to locate rare events\ndef locate_rare_events(path_annot_csv,labelmap,degree=3):\n if 'unlabeled' in labelmap.keys():\n unl_shift = 1\n else:\n unl_shift = 0\n inverse_labelmap = dict((v, k) for k, v in labelmap.items())\n annot_df = pd.read_csv(path_annot_csv)\n filenames = np.unique(annot_df.audio_path)\n labels = \"\".join([l for l in labelmap.keys() if l != 'unlabeled'])\n nsyls = len(labels)\n # create ngram matrix\n if degree==3:\n transmat = np.zeros((nsyls,nsyls,nsyls))\n else:\n transmat = np.zeros((nsyls,nsyls))\n for filename in filenames:\n label_idx_seq = np.array([labelmap[x]-unl_shift for x in annot_df[annot_df.audio_path==filename].label if x in labelmap.keys()])\n \n if degree==3:\n for i in range(len(label_idx_seq)-2):\n a=label_idx_seq[i]; b=label_idx_seq[i+1]; c=label_idx_seq[i+2]\n transmat[a,b,c] +=1\n else:\n for i in range(len(label_idx_seq)-1):\n a=label_idx_seq[i]; b=label_idx_seq[i+1];\n transmat[a,b] +=1\n \n # find forking transition points\n if degree==3:\n syl1 = []\n syl2 = []\n outsyls = []\n for a in range(unl_shift,nsyls):\n for b in range(unl_shift,nsyls):\n if sum(np.squeeze(transmat[a-unl_shift,b-unl_shift,:]) > 0) > 1:\n syl1.append(inverse_labelmap[a])\n syl2.append(inverse_labelmap[b])\n outsyls.append(np.squeeze(transmat[a-unl_shift,b-unl_shift,:]))\n rare_events_df = pd.DataFrame({'a':syl1,'b':syl2,'trans_outcome':outsyls})\n else:\n syl1 = []\n outsyls = []\n for a in range(unl_shift,nsyls):\n if sum(np.squeeze(transmat[a-unl_shift,:]) > 0) > 1:\n syl1.append(inverse_labelmap[a]);\n outsyls.append(np.squeeze(transmat[a0unl_shift,:]))\n rare_events_df = pd.DataFrame({'a':syl1,'trans_outcome':outsyls}) \n return rare_events_df\n \n\n\n# In[7]:\n\n\n# Curate 2nd and 3rd order rare events\n\n\n# In[8]:\n\n\ndef load_network_results(path_to_config=None,\n spect_scaler_path = None,\n csv_path=None,\n labelmap_path=None,\n checkpoint_path=None,\n window_size = 370,\n hidden_size = None,\n min_segment_dur = 0.01,\n num_workers = 12,\n device='cuda',\n spect_key='s',\n timebins_key='t',\n freq_key = 'f',\n test_all_files=False):\n '''\n This function loads a model from an EVAL config file or from specified parameters, loads a model, and returns its outputs \n for a specified test set.\n \n Setting 'test_all_files=True' will create a copy of the list in csv_path where all files are in the test set.\n '''\n if path_to_config:\n # ---- get all the parameters from the config we need\n cfg = config.parse.from_toml_path(path_to_config)\n if cfg.eval: \n model_config_map = config.models.map_from_path(path_to_config, cfg.eval.models)\n csv_path = cfg.eval.csv_path\n labelmap_path = cfg.eval.labelmap_path\n checkpoint_path = cfg.eval.checkpoint_path\n window_size = cfg.dataloader.window_size\n num_workers = cfg.eval.num_workers\n if spect_scaler_path:\n spect_scaler_path = cfg.eval.spect_scaler_path\n else:\n print('config file must hold parameters in an [EVAL] section')\n return None\n else:\n if hidden_size:\n model_config_map = {'TweetyNet': {'loss': {}, 'metrics': {}, 'network': {'hidden_size':hidden_size}, 'optimizer': {'lr': 0.001}}}\n else:\n model_config_map = {'TweetyNet': {'loss': {}, 'metrics': {}, 'network': {}, 'optimizer': {'lr': 0.001}}}\n \n with labelmap_path.open('r') as f:\n labelmap = json.load(f)\n if spect_scaler_path:\n spect_standardizer = joblib.load(spect_scaler_path)\n else:\n spect_standardizer = None\n # prepare evaluation data\n csv_df = pd.read_csv(csv_path)\n if test_all_files==True: # allow creating a new csv 'csv_path_test.csv' where all entries are 'test'\n csv_df['split'] = 'test'\n csv_df.to_csv(csv_path.parent.joinpath(csv_path.stem + '_test.csv'))\n csv_path = csv_path.parent.joinpath(csv_path.stem + '_test.csv')\n csv_df = csv_df[csv_df.split == 'test']\n \n item_transform = transforms.get_defaults('eval',\n spect_standardizer=spect_standardizer,\n window_size=window_size,\n return_padding_mask=True,\n )\n\n eval_dataset = VocalDataset.from_csv(csv_path=csv_path,\n split='test',\n labelmap=labelmap,\n spect_key=spect_key,\n timebins_key=timebins_key,\n item_transform=item_transform,\n )\n\n eval_data = torch.utils.data.DataLoader(dataset=eval_dataset,\n shuffle=False,\n # batch size 1 because each spectrogram reshaped into a batch of windows\n batch_size=1,\n num_workers=num_workers)\n input_shape = eval_dataset.shape\n # if dataset returns spectrogram reshaped into windows,\n # throw out the window dimension; just want to tell network (channels, height, width) shape\n if len(input_shape) == 4:\n input_shape = input_shape[1:]\n\n models_map = models.from_model_config_map(\n model_config_map,\n num_classes=len(labelmap),\n input_shape=input_shape\n )\n model_name = 'TweetyNet'\n model = models_map['TweetyNet']\n model.load(checkpoint_path)\n #metrics = model.metrics # metric name -> callable map we use below in loop\n if device is None:\n device = vak.device.get_default_device()\n pred_dict = model.predict(pred_data=eval_data,\n device=device)\n \n \n annotation_dfs = [pd.DataFrame(eval_dataset.annots[file_number].seq.as_dict()) for file_number in range(len(csv_df))]\n \n return csv_df, annotation_dfs, pred_dict, labelmap\n\ndef seq_in_seq(long_seq,target_seq):\n #import pdb\n #pdb.set_trace()\n onsets = []\n offsets = []\n for onset in np.arange(0,len(long_seq)-len(target_seq)):\n if list(long_seq)[onset:onset+len(target_seq)] == list(target_seq):\n onsets.append(onset)\n offsets.append(onset+len(target_seq))\n return onsets,offsets\n\n\n# In[9]:\n\n\n# Function to summarize experiments\n\n\n# In[10]:\n\n\ndef create_results(annot_path,rare_trans_df,labelmap,csv_df,pred_dict,degree=3):\n if 'unlabeled' in labelmap.keys():\n unl_shift = 1\n else:\n unl_shift = 0\n labels = \"\".join([l for l in labelmap.keys() if l != 'unlabeled'])\n df_annot = pd.read_csv(annot_path)\n inverse_labelmap = dict((v, k) for k, v in labelmap.items())\n nsyls = len(labels)\n sequences = []\n ratios = []\n totals = []\n for n in range(len(rare_trans_df)):\n stem = np.array(rare_trans_df.loc[n,['a','b']])\n trans_n = np.array(rare_trans_df.loc[n,'trans_outcome'])\n tot_num_event = sum(trans_n)\n trans_p = trans_n/tot_num_event\n max_p = max(trans_p)\n rec_trans_p = trans_p/max_p\n if tot_num_event >= min_count:\n for i in range(len(rec_trans_p)):\n if ((rec_trans_p[i] <= max_p_ratio) & (trans_n[i] > min_rare_count) & (trans_p[i] > min_p_ratio)):\n print('Transition added: ',stem,'to',inverse_labelmap[i+unl_shift])\n ratios.append(rec_trans_p[i])\n sequences.append(np.append(stem,inverse_labelmap[i+unl_shift]))\n totals.append(trans_n)\n\n names = [Path(x).name for x in csv_df['audio_path']]\n spect_names = [x for x in csv_df['spect_path']]\n\n idxs = []\n times_on=[]\n times_off=[]\n seq_idxs = []\n for ind,name in enumerate(names):\n temp_df = copy.deepcopy(df_annot[[Path(x).name == name for x in df_annot.audio_path]])\n for seq_id,seq in enumerate(sequences):\n onsets,offsets = seq_in_seq(np.array(temp_df.label),seq)\n if len(onsets)>0:\n idxs.append(ind)\n times_on.append([temp_df['onset_s'].iloc[x-1] for x in offsets])\n times_off.append([temp_df['offset_s'].iloc[x-1] for x in offsets])\n seq_idxs.append(seq_id)\n #print('seq:',seq,'in',name,':',times_on[-1])\n\n\n # now collect frame error rate\n\n argmax_labels = []\n argmax_labels_maj = []\n cnt=0\n for idx,seq_idx,t_ons,t_offs in zip(idxs,seq_idxs,times_on,times_off):\n #print(cnt)\n #spect = vak.files.spect.load(str(spect_names[idx]))['s']\n model_output = pred_dict[str(spect_names[idx])]\n model_output = np.squeeze(model_output.cpu().numpy())\n model_output = np.transpose(model_output,(0,2,1))\n m_shape = np.shape(model_output)\n model_output = model_output.reshape(m_shape[0]*m_shape[1],m_shape[2])\n t_vec = vak.files.spect.load(str(spect_names[idx]))['t']#[0] #remember to remove [0]\n f_vec = vak.files.spect.load(str(spect_names[idx]))['f']#[0] \n model_output = model_output[:len(t_vec)]\n #import pdb\n #pdb.set_trace()\n #model_output_argmax = np.array([int(inverse_labelmap[(x)]) if x>0 else 0 for x in np.argmax(model_output,axis=1)])\n model_output_argmax = np.argmax(model_output,axis=1)\n tmp = [model_output_argmax[(t_vec >= t_on) & (t_vec <= t_off)] for t_on,t_off in zip(t_ons,t_offs)]\n argmax_labels.append(np.concatenate(tmp))\n tmp_maj = []\n for t in tmp:\n tmp_maj.append(list(Counter(t).most_common(1)[0])[0]*np.ones_like(t))\n argmax_labels_maj.append(np.concatenate(tmp_maj))\n \n \n errs_maj = []\n errs=[]\n ns = []\n for i_seq,seq in enumerate(sequences):\n ratio = ratios[i_seq]\n label = seq[-1]\n tmp_argmax_seq_labels = [[inverse_labelmap[y] for y in argmax_labels[x]] for x in np.where(np.array(seq_idxs) == i_seq)[0]]\n if len(tmp_argmax_seq_labels) > 0:\n argmax_seq_labels = np.concatenate(tmp_argmax_seq_labels)\n print('seq:',seq,'ratio:',ratio,'err',1-np.mean(argmax_seq_labels == label),'n',totals[i_seq])\n errs.append(1-np.mean(argmax_seq_labels == label))\n else:\n print('seq:',seq,'does not appear in the test set')\n errs.append(None)\n tmp_argmax_seq_labels = [[inverse_labelmap[y] for y in argmax_labels_maj[x]] for x in np.where(np.array(seq_idxs) == i_seq)[0]]\n if len(tmp_argmax_seq_labels) > 0:\n argmax_seq_labels = np.concatenate(tmp_argmax_seq_labels)\n errs_maj.append(1-np.mean(argmax_seq_labels == label))\n ns.append(len(argmax_seq_labels))\n else:\n #print('seq:',seq,'does not appear in the test set')\n errs_maj.append(None)\n ns.append(None)\n outdict={'sequences':sequences, \n 'ratios':ratios, \n 'totals':totals, \n 'idxs':idxs, \n 'times_on':times_on, \n 'times_off':times_off, \n 'seq_idxs':seq_idxs, \n 'argmax_labels':argmax_labels, \n 'argmax_labels_maj':argmax_labels_maj, \n 'errs_maj':errs_maj, \n 'errs':errs, \n 'ns':ns} \n return outdict\n\n\n# In[11]:\n\n\n# summarize results with hidden_size=16\nwindow_size = 176\nhidden_size = 16\ntraining_dur_summary = []\nratio_summary = []\nerrs_summary = []\nerrs_maj_summary = []\nerrs_se_summary = []\nerrs_se_maj_summary = []\n\nfor bird in birds:\n min_segment_dur = min_segment_durs[bird]\n result_folder = [d for d in Root_hidden_16.joinpath(bird).iterdir()][0]\n train_dur_folders = [d for d in result_folder.iterdir() if d.is_dir()]\n for curr_train_folder in train_dur_folders:\n replicate_folders = [d for d in curr_train_folder.iterdir() if d.is_dir()]\n ods = []\n for curr_replicate_folder in replicate_folders:\n path_labelmap = curr_replicate_folder.joinpath('labelmap.json')\n if curr_replicate_folder.joinpath('StandardizeSpect').exists():\n spect_scaler_path = curr_replicate_folder.joinpath('StandardizeSpect')\n else:\n spect_scaler_path = None\n\n checkpoint_path = curr_replicate_folder.joinpath('TweetyNet','checkpoints','max-val-acc-checkpoint.pt')\n csv_path = [f for f in Root_learning_curve.joinpath(bird).glob('*.csv')][0]\n csv_df, annotation_dfs, pred_dict, labelmap = load_network_results(path_to_config=None,\n spect_scaler_path = spect_scaler_path,\n csv_path=csv_path,\n labelmap_path=path_labelmap,\n checkpoint_path=checkpoint_path,\n window_size = window_size,\n hidden_size = hidden_size,\n min_segment_dur = min_segment_dur,\n num_workers = 4,\n device='cuda',\n spect_key='s',\n timebins_key='t',\n freq_key = 'f',\n test_all_files=False)\n rare_trans_df = locate_rare_events(annotations[bird],labelmap,degree=3)\n od = create_results(annotations[bird],rare_trans_df,labelmap,csv_df,pred_dict,degree=3)\n ods.append(od)\n print('Done',curr_replicate_folder)\n #locate_rare_events(BF1_csv_filename,'output_folder',labelmap,degree=3)\n mn=np.nanmean(np.array([x['errs'] for x in ods]).astype(float),axis=0)\n sd=np.nanstd(np.array([x['errs'] for x in ods]).astype(float),axis=0)\n mn_maj=np.nanmean(np.array([x['errs_maj'] for x in ods]).astype(float),axis=0)\n sd_maj=np.nanstd(np.array([x['errs_maj'] for x in ods]).astype(float),axis=0)\n \n errs_summary.append(mn)\n errs_maj_summary.append(mn_maj)\n errs_se_summary.append(sd/np.sqrt(10))\n errs_se_maj_summary.append(sd_maj/np.sqrt(10))\n \n ratio_summary.append(ods[0]['ratios'])\n \n training_dur_summary.append(curr_train_folder.parts[-1].split('_')[-1])\n#%%\n\n\n# In[108]:\n\n\n\n\n\n# In[46]:\n\n\n#(training_dur_summary[:7])\n#(ratio_summary[:7])\n#errs_summary \n#errs_maj_summary \n#errs_se_summary \n#errs_se_maj_summary \ndata = dict((x,np.concatenate(np.array(errs_summary,dtype=object)[np.where(np.array(training_dur_summary) == x)])) for x in np.unique(training_dur_summary))\ndata_maj = dict((x,np.concatenate(np.array(errs_maj_summary,dtype=object)[np.where(np.array(training_dur_summary) == x)])) for x in np.unique(training_dur_summary))\n\n#se\ndata_se = dict((x,np.concatenate(np.array(errs_se_summary,dtype=object)[np.where(np.array(training_dur_summary) == x)])) for x in np.unique(training_dur_summary))\ndata_maj_se = dict((x,np.concatenate(np.array(errs_se_maj_summary,dtype=object)[np.where(np.array(training_dur_summary) == x)])) for x in np.unique(training_dur_summary))\n\n\nall_ratios = dict((x,np.concatenate(np.array(ratio_summary,dtype=object)[np.where(np.array(training_dur_summary) == x)])) for x in np.unique(training_dur_summary))\ndurs = np.array([int(s[:-1]) for s in data.keys()])\ndurkeys = np.array([s for s in data.keys()])\n\n\n# In[48]:\n\n\n# convert to data frames and save .csv source data\n# One bird did not have the 600s datapoint so adding 'nan's \n\nall_ratios['600s'] = np.concatenate([all_ratios['600s'],[np.nan,np.nan]])\nall_ratios_df = pd.DataFrame(all_ratios)\n\ndata['600s'] = np.concatenate([data['600s'],[np.nan,np.nan]])\ndata_df = pd.DataFrame(data)\n\ndata_maj['600s'] = np.concatenate([data_maj['600s'],[np.nan,np.nan]])\ndata_maj_df = pd.DataFrame(data_maj)\n\ndata_se['600s'] = np.concatenate([data_se['600s'],[np.nan,np.nan]])\ndata_se_df = pd.DataFrame(data_se)\n\ndata_maj_se['600s'] = np.concatenate([data_maj_se['600s'],[np.nan,np.nan]])\ndata_maj_se_df = pd.DataFrame(data_maj_se)\n\nall_ratios_df.to_csv(output_folder.joinpath('all_ratios_hidden_16.csv'))\ndata_df.to_csv(output_folder.joinpath('data_hidden_16.csv'))\ndata_maj_df.to_csv(output_folder.joinpath('data_maj_hidden_16.csv'))\ndata_se_df.to_csv(output_folder.joinpath('data_se_hidden_16.csv'))\ndata_maj_se_df.to_csv(output_folder.joinpath('data_maj_se_hidden_16.csv'))\n\n\n# In[13]:\n\n\n#(training_dur_summary[:7])\n#(ratio_summary[:7])\n#errs_summary \n#errs_maj_summary \n#errs_se_summary \n#errs_se_maj_summary \nresult_folder = 'D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\hidden_size'\ndata = dict((x,np.concatenate(np.array(errs_summary,dtype=object)[np.where(np.array(training_dur_summary) == x)])) for x in np.unique(training_dur_summary))\ndata_maj = dict((x,np.concatenate(np.array(errs_maj_summary,dtype=object)[np.where(np.array(training_dur_summary) == x)])) for x in np.unique(training_dur_summary))\nall_ratios = dict((x,np.concatenate(np.array(ratio_summary,dtype=object)[np.where(np.array(training_dur_summary) == x)])) for x in np.unique(training_dur_summary))\ndurs = np.array([int(s[:-1]) for s in data.keys()])\ndurkeys = np.array([s for s in data.keys()])\nsortind = np.argsort(durs)\nimport matplotlib.pyplot as plt\nfrom scipy.stats.stats import pearsonr\nplt.figure(figsize=(25,4))\nfor i,e in enumerate(durkeys[sortind]):\n ax = plt.subplot(1,len(durs),i+1)\n ax.scatter(all_ratios[e],data[e])\n ax.scatter(all_ratios[e],data_maj[e])\n x = [all_ratios[e][pos] for pos,d in enumerate(data[e]) if not np.isnan(d)]\n y = [data[e][pos] for pos,d in enumerate(data[e]) if not np.isnan(d)]\n r,p = pearsonr(x,y)\n ax.set_title(e + ': r={0:1.2f},p={1:1.2f}'.format(r,p))\n ax.set_ylim([0,1])\n if i>0: \n ax.set_yticks([])\n else:\n ax.set_ylabel('Frame error rate')\n ax.set_xlabel('Ratio of Rare/Frequent sequence')\n ax.legend(['raw','maj_vote'])\nplt.suptitle('Error rates in rare sequences - Summary plot for BFSongRepository data with num_hidden = 16',fontsize=16) \nplt.tight_layout()\nfilename = 'rare_events_test_hidden_16'\nplt.savefig(result_folder + '\\\\' + filename + '.png')\nplt.savefig(result_folder + '\\\\' + filename + '.pdf')\nplt.show()\n\n\n# In[169]:\n\n\n\n\n\n# ## Conclusion - for hidden_num=16:\n# #### 1. We examine the frame error in syllables 'X' in the sequences 'a-b-X' and look for relation to the rareity of 'a-b-X' compared to the most frequent alternative 'a-b-Y'\n# #### 2. High error rates in rare sequences are more likely to occur in very rare events but there is no significant correlation (pearsons r,p - high r values resulting from a few outliers).\n# #### 3. The trend depends on how well the model is trained (the duration of the training set)\n# #### 4. Using the majority vote cleanup almost always reduce the error rates.\n# #### 5. The minimum of the correlation coefficient may be another sign of the optimal training set duration (for future developments)\n# \n# \n\n# In[49]:\n\n\n# summarize results with hidden_size=64\nwindow_size = 176\nhidden_size = 64\ntraining_dur_summary = []\nratio_summary = []\nerrs_summary = []\nerrs_maj_summary = []\nerrs_se_summary = []\nerrs_se_maj_summary = []\n\nfor bird in birds:\n min_segment_dur = min_segment_durs[bird]\n result_folder = [d for d in Root_hidden_64.joinpath(bird).iterdir()][0]\n train_dur_folders = [d for d in result_folder.iterdir() if d.is_dir()]\n for curr_train_folder in train_dur_folders:\n replicate_folders = [d for d in curr_train_folder.iterdir() if d.is_dir()]\n ods = []\n for curr_replicate_folder in replicate_folders:\n path_labelmap = curr_replicate_folder.joinpath('labelmap.json')\n if curr_replicate_folder.joinpath('StandardizeSpect').exists():\n spect_scaler_path = curr_replicate_folder.joinpath('StandardizeSpect')\n else:\n spect_scaler_path = None\n\n checkpoint_path = curr_replicate_folder.joinpath('TweetyNet','checkpoints','max-val-acc-checkpoint.pt')\n csv_path = [f for f in Root_learning_curve.joinpath(bird).glob('*.csv')][0]\n csv_df, annotation_dfs, pred_dict, labelmap = load_network_results(path_to_config=None,\n spect_scaler_path = spect_scaler_path,\n csv_path=csv_path,\n labelmap_path=path_labelmap,\n checkpoint_path=checkpoint_path,\n window_size = window_size,\n hidden_size = hidden_size,\n min_segment_dur = min_segment_dur,\n num_workers = 4,\n device='cuda',\n spect_key='s',\n timebins_key='t',\n freq_key = 'f',\n test_all_files=False)\n rare_trans_df = locate_rare_events(annotations[bird],labelmap,degree=3)\n od = create_results(annotations[bird],rare_trans_df,labelmap,csv_df,pred_dict,degree=3)\n ods.append(od)\n print('Done',curr_replicate_folder)\n #locate_rare_events(BF1_csv_filename,'output_folder',labelmap,degree=3)\n mn=np.nanmean(np.array([x['errs'] for x in ods]).astype(float),axis=0)\n sd=np.nanstd(np.array([x['errs'] for x in ods]).astype(float),axis=0)\n mn_maj=np.nanmean(np.array([x['errs_maj'] for x in ods]).astype(float),axis=0)\n sd_maj=np.nanstd(np.array([x['errs_maj'] for x in ods]).astype(float),axis=0)\n \n errs_summary.append(mn)\n errs_maj_summary.append(mn_maj)\n errs_se_summary.append(sd/np.sqrt(10))\n errs_se_maj_summary.append(sd_maj/np.sqrt(10))\n \n ratio_summary.append(ods[0]['ratios'])\n \n training_dur_summary.append(curr_train_folder.parts[-1].split('_')[-1])\n\n\n# In[50]:\n\n\n# Summarize for hidden_size=64\nresult_folder = 'D:\\\\Users\\\\yarde\\\\vak_project\\\\BF\\\\hidden_size'\n#(training_dur_summary[:7])\n#(ratio_summary[:7])\n#errs_summary \n#errs_maj_summary \n#errs_se_summary \n#errs_se_maj_summary \ndata = dict((x,np.concatenate(np.array(errs_summary,dtype=object)[np.where(np.array(training_dur_summary) == x)])) for x in np.unique(training_dur_summary))\ndata_maj = dict((x,np.concatenate(np.array(errs_maj_summary,dtype=object)[np.where(np.array(training_dur_summary) == x)])) for x in np.unique(training_dur_summary))\nall_ratios = dict((x,np.concatenate(np.array(ratio_summary,dtype=object)[np.where(np.array(training_dur_summary) == x)])) for x in np.unique(training_dur_summary))\ndurs = np.array([int(s[:-1]) for s in data.keys()])\ndurkeys = np.array([s for s in data.keys()])\nsortind = np.argsort(durs)\nimport matplotlib.pyplot as plt\nfrom scipy.stats.stats import pearsonr\nplt.figure(figsize=(25,4))\nfor i,e in enumerate(durkeys[sortind]):\n ax = plt.subplot(1,len(durs),i+1)\n ax.scatter(all_ratios[e],data[e])\n ax.scatter(all_ratios[e],data_maj[e])\n x = [all_ratios[e][pos] for pos,d in enumerate(data[e]) if not np.isnan(d)]\n y = [data[e][pos] for pos,d in enumerate(data[e]) if not np.isnan(d)]\n r,p = pearsonr(x,y)\n ax.set_title(e + ': r={0:1.2f},p={1:1.2f}'.format(r,p))\n ax.set_ylim([0,1])\n if i>0: \n ax.set_yticks([])\n else:\n ax.set_ylabel('Frame error rate')\n ax.set_xlabel('Ratio of Rare/Frequent sequence')\n ax.legend(['raw','maj_vote'])\nplt.suptitle('Error rates in rare sequences - Summary plot for BFSongRepository data with num_hidden = 64',fontsize=16) \nplt.tight_layout()\nfilename = 'rare_events_test_hidden_64'\nplt.savefig(result_folder + '\\\\' + filename + '.png')\nplt.savefig(result_folder + '\\\\' + filename + '.pdf')\nplt.show()\n\n\n# ## Conclusion - for hidden_num=64:\n# #### 1. Compared to the hidden_size=16 case, error rates are smaller\n# #### 2. The trend of more errors in rare events is smaller, almost not significant - the pearson 'r' values is determined by one outlier\n# #### 3. Using the majority vote cleanup almost always reduce the error rates.\n# #### 5. The larger hidden_size allows convergence to better results with shorter training duration.\n# \n\n# In[51]:\n\n\ndata = dict((x,np.concatenate(np.array(errs_summary,dtype=object)[np.where(np.array(training_dur_summary) == x)])) for x in np.unique(training_dur_summary))\ndata_maj = dict((x,np.concatenate(np.array(errs_maj_summary,dtype=object)[np.where(np.array(training_dur_summary) == x)])) for x in np.unique(training_dur_summary))\n\n#se\ndata_se = dict((x,np.concatenate(np.array(errs_se_summary,dtype=object)[np.where(np.array(training_dur_summary) == x)])) for x in np.unique(training_dur_summary))\ndata_maj_se = dict((x,np.concatenate(np.array(errs_se_maj_summary,dtype=object)[np.where(np.array(training_dur_summary) == x)])) for x in np.unique(training_dur_summary))\n\nall_ratios = dict((x,np.concatenate(np.array(ratio_summary,dtype=object)[np.where(np.array(training_dur_summary) == x)])) for x in np.unique(training_dur_summary))\ndurs = np.array([int(s[:-1]) for s in data.keys()])\ndurkeys = np.array([s for s in data.keys()])\n\n# convert to data frames and save .csv source data\n# One bird did not have the 600s datapoint so adding 'nan's \n\nall_ratios['600s'] = np.concatenate([all_ratios['600s'],[np.nan,np.nan]])\nall_ratios_df = pd.DataFrame(all_ratios)\n\ndata['600s'] = np.concatenate([data['600s'],[np.nan,np.nan]])\ndata_df = pd.DataFrame(data)\n\ndata_maj['600s'] = np.concatenate([data_maj['600s'],[np.nan,np.nan]])\ndata_maj_df = pd.DataFrame(data_maj)\n\ndata_se['600s'] = np.concatenate([data_se['600s'],[np.nan,np.nan]])\ndata_se_df = pd.DataFrame(data_se)\n\ndata_maj_se['600s'] = np.concatenate([data_maj_se['600s'],[np.nan,np.nan]])\ndata_maj_se_df = pd.DataFrame(data_maj_se)\n\nall_ratios_df.to_csv(output_folder.joinpath('all_ratios_hidden_64.csv'))\ndata_df.to_csv(output_folder.joinpath('data_hidden_64.csv'))\ndata_maj_df.to_csv(output_folder.joinpath('data_maj_hidden_64.csv'))\ndata_se_df.to_csv(output_folder.joinpath('data_se_hidden_64.csv'))\ndata_maj_se_df.to_csv(output_folder.joinpath('data_maj_se_hidden_64.csv'))\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"article/src/scripts/Frame_error_in_rare_events.py","file_name":"Frame_error_in_rare_events.py","file_ext":"py","file_size_in_byte":32039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"278001328","text":"#!/usr/bin/env python\n\n__author__ = \"Jose Antonio Navas Molina\"\n__copyright__ = \"Copyright 2013, The QIIME Scaling Project\"\n__credits__ = [\"Jose Antonio Navas Molina\"]\n__license__ = \"BSD\"\n__version__ = \"0.0.2-dev\"\n__maintainer__ = \"Jose Antonio Navas Molina\"\n__email__ = \"josenavasmolina@gmail.com\"\n__status__ = \"Development\"\n\nfrom unittest import TestCase, main\nfrom scaling.util import natural_sort\n\nclass TestUtil(TestCase):\n def test_natural_sort(self):\n \"\"\"Correctly sorts a list in natural sort\"\"\"\n l = ['100_b','10_bb','100_a','20_aa','500_c', '9_c']\n exp =['9_c','10_bb','20_aa','100_a','100_b','500_c']\n obs = natural_sort(l)\n\n self.assertEqual(obs, exp)\n\nif __name__ == '__main__':\n main()","sub_path":"tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"235648672","text":"#========================================================\n#\n# Author: AM readygood@163.com\n#\n# Blog: http://www.my-blog.top\n#\n# Last modified: 2019-01-15 16:59\n#\n# Filename: homework_week04.py\n#\n# Description: V1.0\n#\n#========================================================\n#\nn = int(input('How many string do you want to stay? >>> '))\ncomments = (\n \"Implementation note\",\n \"Changed\",\n \"ABC for generator\",\n \"Hello python\"\n)\ndef add_ellipsis(n,word):\n for i in word:\n lst = []\n for j in i:\n lst.append(j)\n print('{}{}'.format(''.join(lst[:n]),'......'))\n#test:\nadd_ellipsis(n,comments)\n\n# 这个取巧写法,没有问题","sub_path":"P17084_阿孟/04/homework_week04.py","file_name":"homework_week04.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"117730706","text":"from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'', include('osmaxx.excerptexport.urls', namespace='excerptexport')),\n url(r'^admin/django-rq/', include('django_rq.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'', include('social_django.urls', namespace='social')),\n url(r'^version/', include('osmaxx.version.urls', namespace='version')),\n # browsable REST API\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^api/', include('osmaxx.excerptexport.rest_api.urls', namespace='excerptexport_api')),\n url(r'^job_progress/', include('osmaxx.job_progress.urls', namespace='job_progress')),\n url(r'^pages/', include('osmaxx.core.urls', namespace='pages')),\n url(r'^profile/', include('osmaxx.profile.urls', namespace='profile')),\n] + \\\n static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT, show_indexes=True) + \\\n static(settings.STATIC_URL, document_root=settings.STATIC_ROOT, show_indexes=True)\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns += [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n ]\n","sub_path":"web_frontend/config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"11021424","text":"#\n# coordinate_variation.py\n#\n# Copyright © 2010-2015, 2017 Monotype Imaging Inc. All Rights Reserved.\n#\n\n\"\"\"\nItems relating to variation-modified coordinate values for OpenType BASE tables.\n\nIMPORTANT NOTE: The OpenType spec (as of version 1.6) does not specify where\nthe offset to the Device record is calculated from. This code assumes the\noffset is from the start of the BaseCoordFormat3 table itself; if this proves\ninaccurate, this code will have to be modified.\n\"\"\"\n\n# System imports\nimport logging\n\n# Other imports\nfrom fontio3.fontdata import valuemeta\nfrom fontio3.opentype import device\n\n# -----------------------------------------------------------------------------\n\n#\n# Private functions\n#\n\ndef _validate(obj, **kwArgs):\n logger = kwArgs['logger']\n e = kwArgs['editor']\n \n try:\n n = int(round(obj))\n except:\n n = None\n \n # Note that if the value n is None (i.e. conversion or rounding failed)\n # the error is not raised here. Since this function is a partial, the\n # valuemeta isValid() checks will still be done, and the error will be\n # raised there instead.\n \n if n is not None and e is not None and e.reallyHas(b'head'):\n upem = e.head.unitsPerEm\n \n if abs(n) >= 2 * upem:\n logger.warning((\n 'V0637',\n (n,),\n \"The FUnit value %d is more than two ems away \"\n \"from the origin, which seems unlikely.\"))\n \n return True\n\n# -----------------------------------------------------------------------------\n\n#\n# Classes\n#\n\nif 0:\n def __________________(): pass\n\nclass Coordinate_variation(int, metaclass=valuemeta.FontDataMetaclass):\n \"\"\"\n Objects representing a coordinate value, a single integer in FUnits. This\n will be interpreted as X or Y depending on whether the object containing it\n is part of the horizontal or vertical baseline data.\n \n There is also one attribute:\n \n variation A LivingDeltas object to be used to tweak the\n coordinate in variation space.\n \n >>> int(_testingValues[0])\n 25\n >>> list(_testingValues[0].variation)[0][1]\n -180\n\n >>> logger = utilities.makeDoctestLogger(\"coordinate_variation_test\")\n >>> e = _fakeEditor()\n >>> _testingValues[2].isValid(logger=logger, editor=e)\n True\n \n >>> _testingValues[3].isValid(logger=logger, editor=e)\n coordinate_variation_test - WARNING - The FUnit value -20000 is more than two ems away from the origin, which seems unlikely.\n True\n \"\"\"\n \n #\n # Class definition variables\n #\n \n valueSpec = dict(\n value_pprintlabel = \"Coordinate\",\n value_scales = True,\n value_validatefunc_partial = _validate)\n \n attrSpec = dict(\n variation = dict(\n attr_followsprotocol = True,\n attr_label = \"Variation\",\n attr_islivingdeltas = True))\n \n #\n # Methods\n #\n \n def buildBinary(self, w, **kwArgs):\n \"\"\"\n Adds the binary data for the Coordinate_variation object to the specified\n LinkedWriter. There is one optional keyword argument:\n \n devicePool A dict mapping immutable versions of Devices to the\n (Device, stake) pairs. If specified, the caller (or\n a higher caller) is responsible for writing out the\n pool when done. If not specified, a local pool will\n be used and will be written here.\n \n otIVS \n \n >>> w = writer.LinkedWriter()\n >>> otIVS = (\"\", {\"FakeLivingDeltas1\": (1, 55)})\n >>> obj = Coordinate_variation(32, variation=\"FakeLivingDeltas1\")\n >>> bs = obj.binaryString(otIVS=otIVS)\n >>> utilities.hexdump(bs)\n 0 | 0003 0020 0006 0001 0037 8000 |... .....7.. |\n \"\"\"\n \n if 'stakeValue' in kwArgs:\n stakeValue = kwArgs.pop('stakeValue')\n w.stakeCurrentWithValue(stakeValue)\n else:\n stakeValue = w.stakeCurrent()\n\n ivsBs, ld2dsimap = kwArgs.get('otIVS', (None, None))\n\n w.add(\"Hh\", 3, self)\n \n if 'devicePool' in kwArgs:\n pool = kwArgs['devicePool']\n\n dstuple = ld2dsimap[self.variation]\n\n if dstuple not in pool:\n varidxrec = device.Device(\n dict(enumerate(dstuple)),\n isVariable=True)\n pool[dstuple] = (varidxrec, w.getNewStake())\n \n w.addUnresolvedOffset(\"H\", stakeValue, pool[dstuple][1])\n \n else:\n stake = w.getNewStake()\n w.addUnresolvedOffset(\"H\", stakeValue, stake)\n dstuple = ld2dsimap[self.variation]\n varidxrec = device.Device(\n dict(enumerate(dstuple)),\n isVariable=True)\n varidxrec.buildBinary(w, stakeValue=stake, **kwArgs)\n \n @classmethod\n def fromvalidatedwalker(cls, w, **kwArgs):\n \"\"\"\n Creates and returns a new Coordinate_variation object from the\n specified walker, doing source validation. The following keyword\n arguments are supported:\n \n logger A logger to which messages will be logged.\n \n otcommondeltas A dictionary of (outer, inner): LivingDeltas.\n Required for Variable fonts.\n\n >>> s = utilities.fromhex(\"0003 0040 0006 0000 0001 8000\")\n >>> otcd = {(0,1): testLD1, (0,2): testLD2}\n >>> logger = utilities.makeDoctestLogger(\"coordinate_variation_fvw\")\n >>> fvb = Coordinate_variation.fromvalidatedbytes\n >>> obj = fvb(s, logger=logger, otcommondeltas=otcd)\n coordinate_variation_fvw.coordinate_variation - DEBUG - Walker has 12 remaining bytes.\n coordinate_variation_fvw.coordinate_variation.device - DEBUG - Walker has 6 remaining bytes.\n coordinate_variation_fvw.coordinate_variation.device - DEBUG - VariationIndex (0, 1)\n coordinate_variation_fvw.coordinate_variation - DEBUG - LivingDeltas ('wdth': (start -1.0, peak 0.25, end 0.75), 'wght': (start -0.75, peak 0.0, end 1.0), -180)\n \"\"\"\n \n logger = kwArgs.pop('logger')\n otcommondeltas = kwArgs.get('otcommondeltas')\n\n if logger is None:\n logger = logging.getLogger().getChild('coordinate_variation')\n else:\n logger = logger.getChild('coordinate_variation')\n \n logger.debug((\n 'V0001',\n (w.length(),),\n \"Walker has %d remaining bytes.\"))\n \n if w.length() < 6:\n logger.error(('V0004', (), \"Insufficient bytes.\"))\n return None\n \n wBase = w.subWalker(0, relative=True) # needed for device base\n format, shift, devOffset = w.unpack(\"HhH\")\n \n if format != 3:\n logger.error((\n 'V0002',\n (format,),\n \"Expected format 3, but got %d instead.\"))\n \n return None\n \n if devOffset >= wBase.length():\n logger.error((\n 'V0639',\n (devOffset, wBase.length()),\n \"The device offset of %d is beyond the available length %d.\"))\n \n return None\n \n dlt = device.Device.fromvalidatedwalker(\n wBase.subWalker(devOffset),\n logger = logger,\n **kwArgs)\n\n if dlt:\n ld = otcommondeltas.get(dlt)\n if ld:\n logger.debug((\n 'Vxxxx',\n (ld,),\n \"LivingDeltas %s\"))\n else:\n logger.error((\n 'Vxxxx',\n (dlt,),\n \"Variation Index %s not present in the OpenType \"\n \"common itemVariationStore (GDEF).\"))\n \n return None\n\n else:\n logger.debug((\n 'Vxxxx',\n (devOffset,),\n \"Invalid Variable data at 0x%04X\"))\n\n return cls(shift, variation=ld)\n\n @classmethod\n def fromwalker(cls, w, **kwArgs):\n \"\"\"\n Creates and returns a Coordinate_variation object from the specified\n walker. The following keyword arguments are supported:\n\n otcommondeltas A dictionary of (outer, inner): LivingDeltas.\n Required for Variable fonts.\n \n >>> bs = utilities.fromhex(\"0003 000A 0006 0000 0002 8000\")\n >>> fvb = Coordinate_variation.frombytes\n >>> otcd = {(0,2): testLD2}\n >>> obj = fvb(bs, otcommondeltas=otcd)\n >>> int(obj)\n 10\n >>> list(obj.variation)[0][1]\n 10\n \"\"\"\n\n otcommondeltas = kwArgs['otcommondeltas']\n \n wBase = w.subWalker(0, relative=True)\n format = w.unpack(\"H\")\n \n if format != 3:\n raise ValueError(\n \"Unknown format for Coordinate_variation: %d\" % (format,))\n \n n = w.unpack(\"h\")\n \n dlt = device.Device.fromwalker(\n wBase.subWalker(w.unpack(\"H\")),\n **kwArgs)\n \n ld = otcommondeltas[dlt]\n \n return cls(n, variation=ld)\n\n# -----------------------------------------------------------------------------\n\n#\n# Test code\n#\n\nif 0:\n def __________________(): pass\n\nif __debug__:\n import operator\n from fontio3 import utilities\n from fontio3.opentype import living_variations\n from fontio3.utilities import writer\n \n def _fakeEditor():\n from fontio3.head import head\n \n e = utilities.fakeEditor(0x10000)\n e.head = head.Head()\n return e\n\n LR = living_variations.LivingRegion\n LD = living_variations.LivingDeltas\n LDM = living_variations.LivingDeltasMember\n\n d = {'wght': (-0.75, 0.0, 1.0), 'wdth': (-1.0, 0.25, 0.75)}\n key = LR.fromdict(d)\n testLD1 = LD({LDM((key, -180))})\n testLD2 = LD({LDM((key, 10))})\n\n _testingValues = (\n Coordinate_variation(25, variation=testLD1),\n Coordinate_variation(-10, variation=testLD2),\n Coordinate_variation(15, variation=testLD2),\n # bad values start here\n Coordinate_variation(-20000, variation=testLD1))\n\ndef _test():\n import doctest\n doctest.testmod()\n\nif __name__ == \"__main__\":\n if __debug__:\n _test()\n","sub_path":"fontio3/build/lib.linux-x86_64-3.6/fontio3/BASE/coordinate_variation.py","file_name":"coordinate_variation.py","file_ext":"py","file_size_in_byte":10495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"606613891","text":"import threading\nimport time\n\nfrom core.logging_handler import Logging\nfrom dataSource.DAO import DAO\n\n\nclass DatabaseUpdateService:\n\n def __init__(self):\n self.log = Logging()\n\n def start(self, accountDic, updateTime):\n threadName = 'Database-Update-Service'\n try:\n t = threading.Thread(target=DatabaseUpdateService.update_loop,\n name=threadName,\n args=(self, accountDic, updateTime))\n t.start()\n self.log.info('Database-Update-Service Successfully')\n except:\n self.log.warning('Database-Update-Service could not be created')\n\n def update_loop(self, accountDic, updateTime):\n\n def string_builder(i):\n queryPart = (\n 'UPDATE `accounts` SET '\n '`pass` = \\'{}\\','\n '`rank` = \\'{}\\','\n '`nickname` = \\'{}\\','\n '`lastConnectionDate` = \\'{}\\','\n '`lastIP` = \\'{}\\','\n '`friends` = \\'{}\\','\n '`reload_needed` = \\'{}\\','\n '`logged` = \\'{}\\','\n '`subscribe` = \\'{}\\' '\n 'WHERE (`id` = \\'{}\\');'.format(i[\"pass\"],i[\"rank\"],i[\"nickname\"],\n i[\"lastConnectionDate\"],\n i[\"lastIP\"],i[\"friends\"],i[\"reload_needed\"],\n i[\"logged\"],i[\"subscribe\"],i[\"id\"]))\n return queryPart\n\n dao = DAO()\n while True:\n time.sleep(updateTime)\n self.log.debug('[Database-Update-Service] - '\n 'Data is send to the database')\n start = time.time()\n counter = 0\n for i in accountDic:\n query = string_builder(i)\n dao.multi_update_data(query)\n counter = counter + 1\n ende = time.time()\n self.log.debug('[Database-Update-Service] - Data was transferred '\n '(query:{}, total time: {:5.3f}s)'.format(counter, ende-start))\n","sub_path":"logon/src/dataSource/database_update_service.py","file_name":"database_update_service.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"85458238","text":"set_show_symmetry_master(1)\n\nset_matrix(20.0)\n\n\nadd_key_binding(\"Map to 1 sigma\",\"!\",\nlambda: set_contour_level_in_sigma(scroll_wheel_map(),1))\n\nset_show_environment_distances_h_bonds(1)\n\nset_show_environment_distances(1)\nset_show_environment_distances_bumps(0)\nset_show_environment_distances_h_bonds(1)\n\n\nset_refine_ramachandran_angles(1)\n\nset_refine_with_torsion_restraints(1)\n\nadd_key_binding(\"Map plus 0.5 sigma\",\"|\",\nlambda: step_map_coarse_up(scroll_wheel_map()))\n\nadd_key_binding(\"Map minus 0.5 sigma\",\":\",\nlambda: step_map_coarse_down(scroll_wheel_map()))\n\n#****Misc. functions (for keybindings and scripting****\ndef display_only_active_map():\n active_map=scroll_wheel_map()\n if not map_is_displayed(active_map):\n set_map_displayed(active_map,1)\n displayed_maps_count=0\n for map_id in map_molecule_list():\n displayed_maps_count=displayed_maps_count+map_is_displayed(map_id)\n if (map_is_displayed(map_id)==1) and (map_id!=active_map):\n set_map_displayed(map_id,0)\n if map_is_displayed(map_id):\n displayed_map=map_id\n if displayed_maps_count==1:\n index_displayed=map_molecule_list().index(active_map)\n try:\n next_map=map_molecule_list()[index_displayed+1]\n except IndexError:\n next_map=map_molecule_list()[0]\n set_map_displayed(active_map,0)\n set_map_displayed(next_map,1)\n for map_id in map_molecule_list():\n if map_is_displayed(map_id):\n set_scrollable_map(map_id)\n set_scroll_wheel_map(map_id) #New\n\ndef hide_active_mol():\n mol_id=active_residue()[0]\n set_mol_displayed(mol_id,0)\n\ndef display_only_active():\n mol_id_active=active_residue()[0]\n displayed_mols_count=0\n for mol_id in model_molecule_list():\n displayed_mols_count=displayed_mols_count+mol_is_displayed(mol_id)\n if (mol_is_displayed(mol_id)==1) and (mol_id!=mol_id_active):\n set_mol_displayed(mol_id,0)\n if mol_is_displayed(mol_id):\n displayed_mol=mol_id\n if displayed_mols_count==1:\n index_displayed=model_molecule_list().index(mol_id_active)\n try: \n next_mol=model_molecule_list()[index_displayed+1]\n except IndexError:\n next_mol=model_molecule_list()[0]\n set_mol_displayed(displayed_mol,0)\n set_mol_displayed(next_mol,1)\n \ndef step_map_coarse_up(mol_id):\n current_level=get_contour_level_in_sigma(mol_id)\n if (current_level >= 0.5) and (current_level <= 10.0):\n new_level=current_level+0.1\n elif (current_level<0.5):\n new_level=0.5\n elif (current_level>10.0):\n new_level=10.0\n set_contour_level_in_sigma(mol_id,new_level)\n\ndef step_map_coarse_down(mol_id):\n current_level=get_contour_level_in_sigma(mol_id)\n if (current_level >= 0.5) and (current_level <= 10.0):\n new_level=current_level-0.1\n elif (current_level<0.5):\n new_level=0.5\n elif (current_level>10.0):\n new_level=10.0\n set_contour_level_in_sigma(mol_id,new_level)\n","sub_path":"coot/matts_preferences.py","file_name":"matts_preferences.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"73501285","text":"from RobotArm import RobotArm\n\n\n\nrobotArm = RobotArm('exercise 8')\nrobotArm.speed = 3\n# Jouw python instructies zet je vanaf hier:\nfor rechts in range(1):\n robotArm.moveRight()\n\n for anntalblokken in range(7):\n robotArm.grab()\n\n for rechts2 in range(8):\n robotArm.moveRight()\n robotArm.drop()\n\n for links in range(8):\n robotArm.moveLeft()\n \n\n\n\n# Na jouw code wachten tot het sluiten van de window:\nrobotArm.wait()\n","sub_path":"robotarm-python-2021-main/oefening8.py","file_name":"oefening8.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"182513599","text":"from django.shortcuts import render,HttpResponse,redirect\nfrom .models import *\nimport datetime\nfrom django.db.models import F\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\n\n\ndef adlogin(request):\n return render(request, 'adlogin.html')\n\n#验证用户登陆\n#用户登陆\n\ndef user_login(request):\n # 模型操作数据库\n if request.method == 'GET':\n return render(request, 'adlogin.html')\n else:\n\n # 没有session验证的登陆验证\n\n # u = request.POST.get('user')\n # p = request.POST.get('pwd')\n # t = request.POST.get('logintype')\n # # print(t)\n # if t == 'admin':\n # #必须加入异常处理,不然会报错终止程序:报错为:admin query DoesNotExist,就是没有在数据库中找到数据从而报错,下面验证学生登陆一样\n # #或者使用get_object_or_404方法,但是我要自定义异常所以自定义\n # try:\n # admin = Admin.objects.get(aduser=u)\n # except Admin.DoesNotExist:\n # return render(request, 'Login.html', {'msg': '用户不存在!'})\n # if admin.adpwd != p:\n # return render(request,'Login.html',{'msg':'密码错误!'})\n # else:\n # return redirect('/index')\n # elif t=='stu':\n # try:\n # stu = Stu.objects.get(stuser=u)\n # except Stu.DoesNotExist:\n # return render(request, 'Login.html', {'msg': '用户不存在!'})\n # if stu.stpwd != p:\n # # return HttpResponse('用户名或密码错误!')\n # return render(request, 'Login.html', {'msg': '密码错误!'})\n # else:\n # return redirect('/index')\n nid=request.POST.get('userid')\n p = request.POST.get('pwd')\n # t = request.POST.get('logintype')\n # if t=='admin':\n user=Admin.objects.filter(adid=nid,adpwd=p)#相较于上面使用了模型更加简介的处理\n\n # elif t=='stu':\n # user=Stu.objects.filter(stuser=nid,stpwd=p)\n\n if user:\n #如果找到user,将名字存入session\n name=user[0].aduser\n request.session['username'] = name\n request.session.set_expiry(0)\n return redirect('/index')\n else:\n messages.warning(request,'用户名或密码错误!')\n return render(request,'adlogin.html')\n\n\n\n# 用户登出\ndef user_logout(request):\n\n logout(request)\n # return redirect('/login')\n messages.success(request,'登出成功!')\n return render(request, 'adlogin.html')\n\n\ndef stu_login(request):\n if request.method=='GET':\n return render(request,'stulogin.html')\n else:\n userid=request.POST.get('user')\n pwd=request.POST.get('pwd')\n user=Stu.objects.filter(stuser=userid,stpwd=pwd)\n d = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if user:\n request.session['userid']=userid\n username = user[0].stuname\n request.session['stuname'] = username\n Stu.objects.filter(stuser=userid).update(lastime=d)\n #存入近期登陆\n Recent.objects.create(userid=userid,username=username,logindate=d)\n #设置关闭浏览器清除session\n request.session.set_expiry(0)\n return redirect('/stumain')\n else:\n messages.warning(request,'用户名或密码错误!')\n return render(request,'stulogin.html')\n\ndef submaninfo(request):\n username=request.session.get('stuname','')\n if not username:\n messages.warning(request,'请登录!')\n return render(request,'stulogin.html')\n if request.method=='GET':\n equlist=Equinfo.objects.all()\n stat={'equlist':equlist,'username':username}\n return render(request,'submaninfo.html',stat)\n\ndef subadd(request):\n ret={'status':True,'error':None}\n equid=request.POST.get('equid')\n systemrun=request.POST.get('sytemrun')\n adout=request.POST.get('adout')\n otherset=request.POST.get('otherset')\n mouse=request.POST.get('mouse')\n keyboard=request.POST.get('keyboard')\n screen=request.POST.get('screen')\n engine=request.POST.get('engine')\n note=request.POST.get('note')\n d = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n stat=2\n\n if mouse!='正常' or keyboard!='正常' or screen!='正常' or engine!='正常':\n stat=1\n try:\n\n man = Mainfo.objects.create(\n id_ma_id=equid,\n systemrun=systemrun,\n adout=adout,\n subtime=d,\n othset=otherset,\n mouse=mouse,\n keyboard=keyboard,\n screen=screen,\n note=note,\n engine=engine,\n stat=stat\n )\n #增加一次提交次数\n nid = request.session.get('userid', '')\n stu = Stu.objects.filter(stuser=nid)\n total=stu[0].total\n total = total + 1\n Stu.objects.filter(stuser=nid).update(total=total)\n print('ok')\n # messages.success(request,'提交成功!可继续提交其他机器维护记录!')\n return HttpResponse('提交成功!可继续提交其他机器维护记录!')\n # messages.success(request,'提交成功!')\n # return render(request,'su')\n except Exception as e:\n ret['status']=False\n ret['error']=e\n messages.warning(request,e)\n return HttpResponse(str(ret))\n # messages.success(request,e)\n # return redirect('/submaninfo')\n\n\ndef stumain(request):\n username = request.session.get('stuname', '')\n if not username:\n messages.warning(request, '请登录!')\n return render(request, 'stulogin.html')\n if request.method=='GET':\n return render(request,'ui-cards.html')","sub_path":"EquMainSys/equipment/login_views.py","file_name":"login_views.py","file_ext":"py","file_size_in_byte":5937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"352641332","text":"#! python3\n#coding:UTF-8\n\n'''\nA simple tool to connet github.com or coding.net repertory as a FTP server.\nAuthor : Money\nLicence : MIT\n'''\n\nimport socket, threading, os, sys, time\nimport urllib.request,json,pickle,functools\n\n# Please configure these\napiurl = \"https://coding.net/u/moneyg/p/WinKit/git/raw/master/\"\n# ↑ This is just an example,see README.md\nlisten_ip = \"localhost\"\nlisten_port = 21\nmax_connections = 500\nconn_timeout = 120\n\nclass FtpConnection(threading.Thread):\n def __init__(self, fd):\n threading.Thread.__init__(self)\n self.fd = fd\n self.running = True\n self.setDaemon(True)\n self.alive_time = time.time()\n self.option_utf8 = False\n self.option_pasv = True\n self.username = \"\"\n def process(self, cmd, arg):\n cmd = cmd.upper()\n print(\">>\", cmd, arg)\n if cmd == \"BYE\" or cmd == \"QUIT\":\n self.message(221, \"Bye!\")\n self.running = False\n return\n elif cmd == \"USER\":\n if arg == \"\": arg = \"anonymous\"\n for c in arg:\n if not c.isalpha() and not c.isdigit() and c!=\"_\":\n self.message(530, \"Incorrect username.\")\n return\n self.username = arg\n self.curr_dir = \"/\"\n self.message(230, \"Identified!\")\n return\n elif cmd == \"PASS\":\n self.message(230, \"Identified!\")\n return\n\n self.alive_time = time.time()\n finish = True\n if cmd == \"NOOP\":\n self.message(200, \"ok\")\n elif cmd == \"TYPE\":\n self.message(200, \"ok\")\n elif cmd == \"SYST\":\n self.message(200, \"UNIX\")\n elif cmd == \"EPSV\" or cmd == \"PASV\":\n self.option_pasv = True\n try:\n self.data_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.data_fd.bind((listen_ip, 0))\n self.data_fd.listen(1)\n ip, port = self.data_fd.getsockname()\n if cmd == \"EPSV\":\n self.message(229, \"Entering Extended Passive Mode (|||\" + str(port) + \"|)\")\n else:\n ipnum = socket.inet_aton(ip)\n self.message(227, \"Entering Passive Mode (%s,%u,%u).\" %\n (\",\".join(ip.split(\".\")), (port>>8&0xff), (port&0xff)))\n except:\n self.message(500, \"failed to create data socket.\")\n elif cmd == \"EPRT\":\n self.message(500, \"implement EPRT later...\")\n elif cmd == \"PORT\":\n self.option_pasv = False\n self.data_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s = arg.split(\",\")\n self.data_ip = \".\".join(s[:4])\n self.data_port = int(s[4])*256 + int(s[5])\n self.message(200, \"ok\")\n elif cmd == \"PWD\" or cmd == \"XPWD\":\n if self.curr_dir == \"\": self.curr_dir = \"/\"\n self.message(257, '\"' + self.curr_dir + '\"')\n elif cmd == \"LIST\" or cmd == \"NLST\":\n if arg != \"\" and arg[0] == \"-\": arg = \"\"\n try:\n pathlist = self.parse_path(arg)\n pathlist.insert(0,pathtree)\n curr_tree = functools.reduce(lambda prev,to:prev[to],pathlist)\n if not isinstance(curr_tree,dict):\n raise KeyError\n except (AttributeError,KeyError):\n self.message(550, \"failed\")\n if not self.establish(): return\n self.message(150, \"ok\")\n for name,size in curr_tree.items():\n if cmd == \"NLST\":\n info = name + \"\\r\\n\"\n else:\n if isinstance(size, int):\n info = \"%s%s%s------- %04u %8s %8s %8lu %s %s\\r\\n\" % (\n \"-\",\"r\",\"w\",1, \"0\", \"0\",size,\n \"Jan 01 2017\",name)\n else:\n info = \"%s%s%s------- %04u %8s %8s %8lu %s %s\\r\\n\" % (\n \"d\",\"r\",\"w\",1, \"0\", \"0\",0,\n \"Jan 01 2017\",name)\n self.data_fd.send(info.encode('utf8'))\n self.message(226, \"Limit size: \" + str(0))\n self.data_fd.close()\n self.data_fd = 0\n elif cmd == \"REST\":\n self.file_pos = int(arg)\n self.message(250, \"ok\")\n elif cmd == \"FEAT\":\n features = \"211-Features:\\r\\nSITES\\r\\nEPRT\\r\\nEPSV\\r\\nMDTM\\r\\nPASV\\r\\n\"\\\n \"REST STREAM\\r\\nSIZE\\r\\nUTF8\\r\\n211 End\\r\\n\"\n self.fd.send(features.encode(\"utf8\"))\n elif cmd == \"OPTS\":\n arg = arg.upper()\n if arg == \"UTF8 ON\":\n self.option_utf8 = True\n self.message(200, \"ok\")\n elif arg == \"UTF8 OFF\":\n self.option_utf8 = False\n self.message(200, \"ok\")\n else:\n self.message(500, \"unrecognized option\")\n elif cmd == \"CDUP\":\n finish = False\n arg = \"..\"\n elif cmd == \"ABOR\":\n self.message(200, \"ok\")\n else:\n finish = False\n if finish: return\n \n if arg == \"\":\n self.message(500, \"where's my argument?\")\n return\n newpath = self.parse_path(arg,split=False)\n can_read, can_write, can_modify = True, False, False\n try:\n if cmd == \"CWD\":\n self.curr_dir = newpath\n self.message(250, '\"' + newpath + '\"')\n elif cmd == \"MDTM\":\n self.message(213, \"20170101120000\")\n elif cmd == \"SIZE\":\n pathlist = self.parse_path(arg)\n pathlist.insert(0,pathtree)\n size = functools.reduce(lambda prev,to:prev[to],pathlist)\n self.message(213, size)\n elif cmd == \"RETR\":\n try:\n pathlist = self.parse_path(newpath)\n newpath = functools.reduce(lambda a,b:a+'/'+b,pathlist)\n print(\"Getting...... : \",newpath)\n f = urllib.request.urlopen(apiurl+newpath)\n except urllib.error.HTTPError as err:\n print(err)\n self.message(550, \"failed\")\n return\n if not self.establish(): return\n self.message(150, \"ok\")\n while self.running:\n self.alive_time = time.time()\n data = f.read(8192)\n if len(data) == 0: break\n self.data_fd.send(data)\n f.close()\n self.data_fd.close()\n self.data_fd = 0\n self.message(226, \"ok\")\n elif cmd in (\"XMKD\", \"MKD\", \"RNFR\", \"RNTO\", \"XRMD\", \"RMD\", \"DELE\", \"STOR\", \"APPE\"):\n self.message(550, \"permission denied.\")\n return\n else:\n self.message(500, cmd + \" not implemented\")\n except:\n self.message(550, \"failed.\")\n \n def establish(self):\n if self.data_fd == 0:\n self.message(500, \"no data connection\")\n return False\n if self.option_pasv:\n fd = self.data_fd.accept()[0]\n self.data_fd.close()\n self.data_fd = fd\n else:\n try:\n self.data_fd.connect((self.data_ip, self.data_port))\n except:\n self.message(500, \"failed to establish data connection\")\n return False\n return True\n\n\n \n def parse_path(self, path,split=True):\n if path in (\".\",\"\"): path=self.curr_dir\n if path[0] != \"/\":\n if self.curr_dir != \"/\":\n path = self.curr_dir + \"/\" + path\n else:\n path = \"/\" + path\n if split:\n if path == '/':\n return []\n path = os.path.normpath(path).replace(\"\\\\\", \"/\").split(\"/\")[1:]\n point = pathtree\n for i in range(len(path)-1):\n path[i] = next(filter(lambda x:x.lower() == path[i].lower(),point.keys()))\n point = point[path[i]]\n return path\n else:\n return os.path.normpath(path).replace(\"\\\\\", \"/\")\n\n def run(self):\n try:\n if len(conn_list) > max_connections:\n self.message(500, \"too many connections!\")\n self.fd.close()\n self.running = False\n return\n self.message(220, \"Welcome\")\n line = \"\"\n while self.running:\n data = self.fd.recv(4096).decode(\"utf8\")\n if len(data) == 0: break\n line += data\n if line[-2:] != \"\\r\\n\": continue\n line = line[:-2]\n space = line.find(\" \")\n if space == -1:\n self.process(line, \"\")\n else:\n self.process(line[:space], line[space+1:])\n line = \"\"\n except:\n print(\"error\", sys.exc_info())\n self.running = False\n self.fd.close()\n print(\"connection end\", self.fd, \"user\", self.username)\n\n def message(self, code, s):\n print(\"<<\",code,s)\n s = str(s).replace(\"\\r\", \"\")\n ss = s.split(\"\\n\")\n if len(ss) > 1:\n r = (str(code) + \"-\") + (\"\\r\\n\" + str(code) + \"-\").join(ss[:-1])\n r += \"\\r\\n\" + str(code) + \" \" + ss[-1] + \"\\r\\n\"\n else:\n r = str(code) + \" \" + ss[0] + \"\\r\\n\"\n if self.option_utf8:\n r = r\n self.fd.send(r.encode('utf8'))\n\ndef server_listen():\n global conn_list\n listen_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n listen_fd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n listen_fd.bind((listen_ip, listen_port))\n listen_fd.listen(1024)\n conn_lock = threading.Lock()\n print(\"ftpd is listening on \", listen_ip + \":\" + str(listen_port))\n\n while True:\n conn_fd, remote_addr = listen_fd.accept()\n print(\"connection from \", remote_addr, \"conn_list\", len(conn_list))\n conn = FtpConnection(conn_fd)\n conn.start()\n\n conn_lock.acquire()\n conn_list.append(conn)\n # check timeout\n try:\n curr_time = time.time()\n for conn in conn_list:\n if int(curr_time - conn.alive_time) > conn_timeout:\n if conn.running == True:\n conn.fd.shutdown(socket.SHUT_RDWR)\n conn.running = False\n conn_list = [conn for conn in conn_list if conn.running]\n except:\n print(sys.exc_info())\n conn_lock.release()\n\nconn_list = []\ndef main():\n global pathtree\n with open(\"pathtree.pic\",\"rb\") as f:\n pathtree = pickle.load(f)\n server_listen()\n \nif __name__ == \"__main__\":\n main()\n\n","sub_path":"gitftp.py","file_name":"gitftp.py","file_ext":"py","file_size_in_byte":11037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"331843037","text":"import numpy as np\nimport pandas as pd\nimport time\n\nclass StopManager:\n def __init__(self):\n self.Accuracy = []\n self.Time = []\n self.Epoch = []\n self.MaxEpoch = None\n self.MinEpoch = 10\n self.LookBackRatioForP1 = 0.2 # Look back 20% of epoch to calculate P1\n self.AverageNum = 10\n self.Threshold = 3e-4\n return\n\n def __str__(self):\n return \"StopManager: Threshold = %.1f%% / 100epoch, MinEpoch = %d, MaxEpoch = %d\" % (self.Threshold*100*100,self.MinEpoch,self.MaxEpoch)\n\n def GetInfo(self):\n params = self.GetAI()\n if np.isnan(params[\"AIrate\"]):\n return \"\"\n else:\n return (\"Current: Accuracy = %.1f%%, Epoch = %.0f, Improvement = +%.1f%% / 100 epoch ( = %.0f min ), Stop threshold = %.1f%% / 100epoch\\n\"%(params[\"Current:Accuracy\"]*100,params[\"Current:Epoch\"],params[\"AIrate\"]*100*100,params[\"EpochTime\"]*100/60.,self.Threshold*100*100)\n +\"At threshold: Accuracy = %.1f%%, Epoch = %.0f, Time remaining = %.0f min\"%(params[\"Threshold:Accuracy\"]*100.,params[\"Threshold:Epoch\"],params[\"Threshold:TimeRemaining\"]/60.))\n\n\n def SetMaximumEpoch(self,maxEpoch=None):\n self.MaxEpoch = maxEpoch\n return\n\n def SetMinimumEpoch(self,minEpoch=10):\n self.MinEpoch = minEpoch\n return\n\n def SetStopThreshold(self,threshold=3e-4):\n self.Threshold = threshold\n return\n\n def AddAccuracy(self,accuracy):\n self.Accuracy.append(accuracy)\n self.Time.append(time.time())\n self.Epoch.append(len(self.Epoch)+1)\n return\n\n def GetAI(self):\n epoch = np.array(self.Epoch,dtype=np.int32)\n accur = np.array(self.Accuracy,dtype=np.float32)\n deltaE = self.LookBackRatioForP1\n p1 = (accur-accur[(epoch*(1-deltaE)).astype(np.int)])/(np.log(epoch)-np.log(epoch*(1-deltaE)))\n p1avg = np.array(pd.Series(p1).rolling(window=self.AverageNum).mean())\n ai = p1 / epoch\n aiavg = p1avg / epoch\n atime = np.array(self.Time,dtype=np.float64)\n atime -= atime[0]\n timeAvg = (atime[-1] - atime[(epoch[-1]*(1-deltaE)).astype(np.int)]) / (epoch[-1] - (epoch[-1]*(1-deltaE)).astype(np.int))\n\n params = {}\n Et = p1[-1] / self.Threshold\n params[\"Threshold:TimeRemaining\"] = timeAvg * p1[-1] * (1./self.Threshold - 1./aiavg[-1])\n params[\"Threshold:Epoch\"] = Et\n params[\"Threshold:Accuracy\"] = accur[-1] + p1[-1] * (np.log(Et) - np.log(epoch[-1]))\n params[\"Current:Epoch\"] = epoch[-1]\n params[\"Current:Accuracy\"] = accur[-1]\n params[\"AIrate\"] = aiavg[-1]\n params[\"EpochTime\"] = timeAvg\n return params\n\n def StopCheck(self):\n epoch = len(self.Accuracy)\n if self.MaxEpoch and epoch >= self.MaxEpoch: return True\n if epoch >= self.MinEpoch:\n params = self.GetAI()\n if params[\"AIrate\"]\\d+)/$', views.post_detail, name='post_detail'),\n url(r'^post/new/$', views.post_new, name='post_new'),\n url(r'^post/(?P\\d+)/edit/$', views.post_edit, name='post_edit'),\n url(r'^login/$',views.login,name='login'),\n url(r'^invalid/$',views.invalid_login,name='invalid'),\n url(r'^logout/$',views.logout,name='logout'),\n url(r'auth/$',views.auth_view),\n]","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"261661089","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 27 16:08:26 2018\n\n@author: L1YAK01\n\"\"\"\n\n## IMPORTING PACKAGES ##\nimport scipy.io as sio\nimport scipy.stats as st\nimport numpy as np\nimport scipy as sp\nimport os\nimport time\n\n######################################################################\n########SETTING DIRECTORIES AND RETRIEVING DATA/FUNCTIONS#############\n######################################################################\n#os.chdir('S:/GOLD_Interns/NicoleTrachman2018/blp')\n#import simulatemarketsharesCtranslation as simms\n\n#os.chdir('S:/GOLD_Interns/AmyKim2018/blp_shared') #If working locally\n#os.chdir('/mnt/lan-shared/GOLD_Interns/AmyKim2018/blp_shared') #If working on cluster\n\n#Importing helper functions\nimport helper as hlp\n\n\ndef initialization(data, debug, usequadrature, NS, BS, Nmarkets, Nproducts):\n #Total number of products\n N=Nmarkets*Nproducts\n \n #Taking the smaller value of NS and Nmarkets\n m=min(NS,Nmarkets)\n \n #Creating covariate matrix as numpy array\n if debug:\n covariates = np.hstack([data['hpwt'],data['space']])\n else:\n covariates = np.hstack([data['hpwt'],data['air'],data['mpd'],data['space']])\n \n #Getting number of covariates\n ncovariates = covariates.shape[1]\n \n #Adding row of ones\n Xdata = np.hstack([np.ones((len(covariates),1)),covariates])\n \n Ndata = len(Xdata)\n dimX = Xdata.shape[1]\n\n \n covx = np.cov(covariates.T)\n meanx = covariates.mean(axis=0)\n \n varp = np.var(data['price'], ddof=1)\n meanprice = data['price'].T.mean(axis=1)[0]\n \n ######################################################################\n ####################### KEY MATRIX CREATION ##########################\n ######################################################################\n \n #Initializing matrix of zeros for sum_other and sum_rival\n sum_other = np.zeros(Xdata.shape)\n sum_rival = np.zeros(Xdata.shape)\n \n #Filling sum matrices with sum of characteristics from other and rival products\n \n ##GENERAL VERSION\n #for i in range(Ndata):\n # other_ind = [(data['firmid']==data['firmid'][i]) & (data['cdid']==data['cdid'][i]) & (data['id']!=data['id'][i])][0] #Products in the same market and same firm\n # rival_ind = [(data['firmid']!=data['firmid'][i]) & (data['cdid']==data['cdid'][i])][0] #Products in the same market but different firm\n # total_ind = [(data['cdid']==data['cdid'][i])][0] #All products in the same market -->> Necessary?\n #\n # sum_other[i,:] = sum(Xdata[np.where(other_ind)[0],:])\n # sum_rival[i,:] = sum(Xdata[np.where(rival_ind)[0],:])\n \n ##MATCHING MATLAB\n for i in range(Ndata):\n other_ind = [(data['firmid']==data['firmid'][i]) & (data['cdid']==data['cdid'][i]) & (data['id']!=data['id'][i])][0] #Products in the same market and same firm\n rival_ind = [(data['firmid']!=data['firmid'][i]) & (data['cdid']==data['cdid'][i])][0] #Products in the same market but different firm\n total_ind = [(data['cdid']==data['cdid'][i])][0] #All products in the same market -->> Necessary?\n \n if len(other_ind[other_ind==True])==1:\n sum_other[i,:] = np.sum(Xdata[np.where(other_ind)[0],:])\n \n else:\n sum_other[i,:] = sum(Xdata[np.where(other_ind)[0],:])\n sum_rival[i,:] = sum(Xdata[np.where(rival_ind)[0],:])\n \n #Creating Instr. Var. matrix\n IV = np.hstack([Xdata, sum_other, sum_rival])\n covIVnotX = np.cov(IV[:,dimX:len(IV)].T, ddof=1)\n varIVnotX = np.var(IV[:,dimX:len(IV)], axis = 0, ddof=1)\n meanIVnotX = IV[:,dimX:len(IV)].mean(axis=0)\n dimIVnotX = len(meanIVnotX)\n \n if debug:\n theta2true = [2.009,1.586,1.51]\n Sigmatrue = np.diag(theta2true)\n betatrue = [-7.304,2.185,2.604,-0.2]\n gammatrue = [0.726,0.313,1.499]\n \n else:\n theta2true = [2.009,1.586,1.215,0.67,1.51]\n Sigmatrue = np.diag(theta2true);\n betatrue = [-7.304,2.185,0.579,-0.049,2.604,-0.2]\n gammatrue = [0.726,0.313,0.290,0.293,1.499]\n \n thetatrue = np.hstack([betatrue,theta2true])\n \n if usequadrature:\n [J,vdraws10,weights] = hlp.GH_Quadrature(10,dimX,np.identity(dimX))\n weights=weights.T\n musimtrue=np.matmul(np.matmul(Xdata,Sigmatrue),vdraws10.T)\n else:\n vdraws = np.random.multivariate_normal([0]*dimX,np.identity(dimX),NS)\n musimtrue=np.matmul(np.matmul(Xdata,Sigmatrue),vdraws.T)\n weights= np.tile(1/NS,(1,NS))\n \n ######################################################################\n ################## CREATING DELTAHAT AND XIHAT #######################\n ######################################################################\n \n #deltahat = simms.simulatemarketshares(data['share'],data['outshr'],musimtrue,np.shape(musimtrue)[1],data['cdindex'],weights,1e-4)\n deltahat = np.log(data['share']/data['outshr']) ##PLACEHOLDER FOR COMPUTEDELTAFROMSIMULATIONCCODE\n C = np.hstack([Xdata,data['price']])\n P = np.matmul(np.linalg.solve(np.matmul(IV.T,IV).T,IV.T).T,IV.T)\n betahat = np.linalg.solve(np.matmul(np.matmul(C.T,P),C),np.matmul(np.matmul(C.T,P),deltahat))\n xihat = (deltahat - np.matmul(C,betahat))\n varxi = np.var(xihat,axis=0,ddof=1)\n \n #Getting covariance between xihat and price\n covmatpricexihat = np.cov(xihat,data['price'],rowvar=False)\n covpricexihat = covmatpricexihat[0,1]\n \n return [dimX, meanx, covx, varxi, varp, covpricexihat, meanIVnotX, covIVnotX, gammatrue, Sigmatrue, betatrue, Xdata, thetatrue]\n\n\ndef datageneration(solveforprices, usequadrature, NS, Nmarkets, Nproducts, dimX, meanx, covx, varxi, varp, covpricexihat, meanIVnotX, covIVnotX, gammatrue, Sigmatrue, betatrue, Xdata):\n \n N = Nmarkets * Nproducts\n X= np.zeros((N, dimX))\n xi= np.zeros((N,1))\n \n for j in range(Nmarkets):\n xcharacteristics= np.random.multivariate_normal(meanx,covx,[Nproducts])\n X[(j*Nproducts):((j+1)*Nproducts), 0:5] = np.hstack([np.ones((Nproducts,1)), xcharacteristics])\n ximarket= np.random.normal(0, 0.5*varxi, Nproducts)\n xi[((j-1)*Nproducts):(j*Nproducts), :] = ximarket.shape\n \n v1= np.random.normal(0,abs(covpricexihat-varxi), Nmarkets*Nproducts)\n v2= np.random.normal(0,varxi, Nmarkets*Nproducts)\n v3= np.random.normal(0, abs(varp-varxi), Nmarkets*Nproducts)\n v4= np.random.multivariate_normal(meanIVnotX, covIVnotX,[N])\n \n v3ones= np.ones((v3.size,10))\n addstep= np.add(v3.size, v4[:]) #unsure of accuracy of this calculation \n IV= np.hstack((X, addstep)) \n \n \n etaconst=0.001\n eta=etaconst*(v1+v3)\n \n mc=X@gammatrue+eta\n \n #want to check that this produces the same value as MATLAB version-- may need to use sparse.kron or different complex conjugate transpose\n cdid= np.kron(range(1,Nmarkets+1),np.ones((1,Nproducts))).T\n cdindex = [i for i in range(Nproducts,N+1,Nproducts)]\n ######################################################################\n ###################### SOLVING FOR PRICES ############################\n ######################################################################\n \n start_time = time.time()\n #solving for prices\n if solveforprices:\n [J,vdraws10,weights] = hlp.GH_Quadrature(10,dimX,np.identity(dimX))\n weights=weights.T\n musim = np.matmul(np.matmul(X,Sigmatrue),vdraws10.T)\n print('Solving for prices using Bertrand-Nash')\n price = np.array([1.0]*N)\n profits = np.array([1]*Nmarkets)\n for j in range(Nmarkets):\n print('Market #:',j+1,'/',Nmarkets)\n cdindexformarket = cdindex[j]\n p0 = np.random.random(2)\n lb = np.array([0.01]*Nproducts)\n ub = np.array([100]*Nproducts)\n while p0[0] < lb[0] or p0[1] < lb[0]:\n p0 = np.random.random(2)\n priceformarket = sp.optimize.least_squares(lambda price: hlp.equationtosolveforprice(price, X[(cdindexformarket-Nproducts):cdindexformarket,:], betatrue, musim[(cdindexformarket-Nproducts):cdindexformarket,:],\n np.shape(musim)[1],Nproducts,1, mc[(cdindexformarket-Nproducts):cdindexformarket],weights),p0,bounds=(lb,ub),verbose=1)\n print(priceformarket['x'])\n price[(cdindexformarket-Nproducts):cdindexformarket] = priceformarket['x']\n else:\n e = st.truncnorm.rvs(0,1,size=(N,1))\n price = 0.5*abs(2+0.5*xi+e+np.sum(X,axis=1))\n \n price_time = time.time()-start_time\n print(\"Elapsed time:\", str(price_time))\n ##NOTE: ElAPSED TIME = 1700s = approx. 28 min\n ######################################################################\n ################### SIMULATING MARKET SHARES #########################\n ######################################################################\n \n deltatrue = (np.matmul(np.c_[np.array(X),np.array(price)],np.array(betatrue).T) + np.reshape(xi,(-1,1)).T).T\n if usequadrature:\n [J,vdraws10,weights] = hlp.GH_Quadrature(10,dimX,np.identity(dimX))\n weights=weights.T\n musim = np.matmul(np.matmul(X,Sigmatrue),vdraws10.T)\n else:\n vdraws = np.random.multivariate_normal([0]*dimX,np.identity(dimX),NS)\n musim = np.matmul(np.matmul(Xdata,Sigmatrue),vdraws.T)\n weights = np.tile(1/NS,(1,NS))\n \n individualshares, outsideshares = hlp.simulateMarketShares(deltatrue, musim, np.shape(musim)[1],cdindex)\n simshare = np.sum(np.tile(weights,(N,1))*individualshares,axis=1)\n simoutshare = np.sum(np.tile(weights,(N,1))*individualshares,axis=1)\n \n deltahat = np.log(simshare/simoutshare) ##PLACEHOLDER FOR COMPUTEDELTAFROMSIMULATIONCCODE\n C = np.hstack([X,price[:,None]])\n P = np.linalg.solve((IV.T @ IV).T,IV.T).T @ IV.T\n dimIV = np.shape(IV)[1]\n \n beta0 = np.linalg.lstsq((C.T @ P @ C),(C.T @ P @ deltahat),rcond=None)[0]\n theta0 = np.concatenate([beta0,np.diag(Sigmatrue)])\n \n return [C, X, xi, weights, price, simshare, simoutshare, cdid, cdindex, IV, vdraws, vdraws10, theta0, beta0]\n","sub_path":"python/data_generation_final.py","file_name":"data_generation_final.py","file_ext":"py","file_size_in_byte":10155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"253632385","text":"# -*- coding: utf-8 -*-\n# See LICENSE file for full copyright and licensing details.\n\n\"\"\"\nAdded to perform the amazon import, export operations and added onchange and methods\nto process for different amazon operations.\n\"\"\"\n\nimport base64\nimport csv\nimport time\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\nfrom io import StringIO\n\nfrom odoo import models, fields, api, _\nfrom odoo.addons.iap.tools import iap_tools\nfrom odoo.exceptions import UserError\n\nfrom ..endpoint import DEFAULT_ENDPOINT\n\n\nclass AmazonProcessImportExport(models.TransientModel):\n \"\"\"\n Added class to perform amazon import and export operations.\n \"\"\"\n _name = 'amazon.process.import.export'\n _description = 'Amazon Import Export Process'\n\n seller_id = fields.Many2one('amazon.seller.ept', string='Amazon Seller',\n help=\"Select Amazon Seller Account\")\n\n amazon_program = fields.Selection(related=\"seller_id.amazon_program\")\n\n instance_id = fields.Many2one('amazon.instance.ept', string='Instance',\n help=\"This Field relocates amazon instance.\")\n order_removal_instance_id = fields.Many2one('amazon.instance.ept', string='Removal Instance',\n help=\"This instance is used for the Removal order.\")\n is_allow_to_create_removal_order = fields.Boolean(\n related=\"instance_id.is_allow_to_create_removal_order\",\n string='Allow Create Removal Order In FBA?',\n help=\"Allow to create removal order in FBA.\")\n is_another_soft_create_fba_inventory = fields.Boolean(\n related=\"seller_id.is_another_soft_create_fba_inventory\",\n string=\"Does another software create the FBA Inventory reports?\",\n help=\"Does another software create the FBA Inventory reports\")\n instance_ids = fields.Many2many(\"amazon.instance.ept\", 'amazon_instance_import_export_rel',\n 'process_id', 'instance_id', \"Instances\",\n help=\"Select Amazon Marketplaces where you want to perform \"\n \"opetations.\")\n list_settlement_report = fields.Boolean(\"List settlement report?\")\n report_start_date = fields.Datetime(\"Start Date\", help=\"Start date of report.\")\n report_end_date = fields.Datetime(\"End Date\", help=\"End date of report.\")\n selling_on = fields.Selection([\n ('FBM', 'FBM'),\n ('FBA', 'FBA'),\n ('fba_fbm', 'FBA & FBM')\n ], 'Operation For')\n operations = fields.Selection([\n ('Export_Stock_From_Odoo_To_Amazon', 'Export Stock from Odoo to Amazon'),\n ('Update_Track_Number_And_Ship_Status', 'Update Tracking Number & Shipment Status'),\n ('Check_Cancel_Orders_FBM', 'Check Cancel Orders'),\n ('Import_FBM_Shipped_Orders', 'Import FBM Shipped Orders'),\n ('Import_Missing_Unshipped_Orders', 'Import Missing UnShipped Orders'),\n ('Import_Unshipped_Orders', 'Import Unshipped Orders')\n ], 'FBM Operations')\n fba_operations = fields.Selection([\n ('Import_Pending_Orders', 'Import Pending Orders'),\n ('Check_Cancel_Orders_FBA', 'Check Cancel Orders'),\n ('Shipment_Report', 'Shipment Report'),\n ('Stock_Adjustment_Report', 'Stock Adjustment Report'),\n ('Removal_Order_Report', 'Removal Order Report'),\n ('Customer_Return_Report', 'Customer Return Report'),\n ('removal_order_request', 'Removal Order Request'),\n ('Import Inbound Shipment', 'Import Inbound Shipment'),\n ('Create_Inbound_Shipment_Plan', 'Create Inbound Shipment Plan'),\n ('fba_live_inventory_report', 'FBA Live Inventory')\n ], 'Operations')\n\n both_operations = fields.Selection([\n ('Import_Product', 'Import Product'),\n ('Sync_Active_Products', 'Sync Active Products'),\n ('Export_Price_From_Odoo_To_Amazon', 'Export Price From Odoo to Amazon'),\n ('List_Settlement_Report', 'List Settlement report'),\n ('request_rating_report', 'Request Rating Report'),\n ('vcs_tax_report', 'VCS Tax Report')\n ], 'FBA & FBM Operations')\n is_vcs_enabled = fields.Boolean('Is VCS Report Enabled ?', default=False, store=False)\n is_split_report = fields.Boolean('Is Split Report ?', default=False)\n split_report_by_days = fields.Selection([\n ('3', '3'),\n ('7', '7'),\n ('15', '15')\n ])\n fbm_order_updated_after_date = fields.Datetime('Updates After')\n import_fba_pending_sale_order = fields.Boolean('Sale order(Only Pending Orders)',\n help=\"System will import pending FBA orders \"\n \"from Amazon\")\n check_order_status = fields.Boolean(\"Check Cancelled Order in Amazon\",\n help=\"If ticked, system will check the orders status in \"\n \"canceled in Amazon, then system will cancel that \"\n \"order \"\n \"is Odoo too.\")\n export_inventory = fields.Boolean()\n export_product_price = fields.Boolean('Update Product Price')\n updated_after_date = fields.Datetime('Updated After')\n shipment_id = fields.Char()\n from_warehouse_id = fields.Many2one('stock.warehouse', string=\"Warehouse\")\n update_price_in_pricelist = fields.Boolean(string='Update price in pricelist?', default=False,\n help='Update or create product line in pricelist '\n 'if ticked.')\n auto_create_product = fields.Boolean(string='Auto create product?', default=False,\n help='Create product in ERP if not found.')\n file_name = fields.Char(string='Name')\n choose_file = fields.Binary(filename=\"filename\")\n delimiter = fields.Selection([('tab', 'Tab'), ('semicolon', 'Semicolon'), ('comma', 'Comma')],\n string=\"Separator\", default='comma')\n user_warning = fields.Text(string=\"Note: \", store=False)\n\n @api.onchange('report_start_date', 'report_end_date')\n def onchange_shipment_report_date(self):\n \"\"\"\n Added onchange to allow option to split report based on selected date range difference is\n more than 7 days.\n \"\"\"\n if self.report_start_date and self.report_end_date:\n count = self.report_end_date.date() - self.report_start_date.date()\n if count.days > 7 and not self.seller_id.is_another_soft_create_fba_shipment:\n self.is_split_report = True\n else:\n self.is_split_report = False\n\n @api.onchange('selling_on')\n def onchange_selling_on(self):\n \"\"\"\n Added set operations vals false based on selling on.\n \"\"\"\n self.operations = False\n self.fba_operations = False\n self.both_operations = False\n\n @api.onchange('operations')\n def onchange_operations(self):\n \"\"\"\n On change of operations field it will check the active scheduler or scheduler\n exist then it's next run time.\n \"\"\"\n self.export_inventory = False\n self.export_product_price = False\n self.list_settlement_report = False\n self.fbm_order_updated_after_date = False\n self.updated_after_date = False\n self.report_start_date = False\n self.report_end_date = False\n\n self.user_warning = None\n if self.operations == \"Export_Stock_From_Odoo_To_Amazon\":\n self.check_running_schedulers('ir_cron_auto_export_inventory_seller_')\n\n if self.operations == \"Update_Track_Number_And_Ship_Status\":\n self.check_running_schedulers('ir_cron_auto_update_order_status_seller_')\n\n if self.operations == \"Check_Cancel_Orders_FBM\":\n self.check_running_schedulers('ir_cron_auto_check_canceled_fbm_order_in_amazon_seller_')\n\n if self.operations == \"Import_Unshipped_Orders\":\n self.check_running_schedulers('ir_cron_import_amazon_orders_seller_')\n\n @api.onchange('fba_operations')\n def onchange_fba_operations(self):\n \"\"\"\n On change of fba_operations field it set start and end date as per configurations from\n seller\n default start date is -3 days from the date.\n @author: Keyur Kanani\n :return:\n \"\"\"\n self.user_warning = None\n if self.fba_operations == \"Shipment_Report\":\n self.report_start_date = datetime.now() - timedelta(self.seller_id.shipping_report_days)\n self.report_end_date = datetime.now()\n self.check_running_schedulers('ir_cron_import_amazon_fba_shipment_report_seller_')\n\n if self.fba_operations == \"Customer_Return_Report\":\n self.report_start_date = datetime.now() - timedelta(\n self.seller_id.customer_return_report_days)\n self.report_end_date = datetime.now()\n self.check_running_schedulers('ir_cron_auto_import_customer_return_report_seller_')\n\n if self.fba_operations == \"Stock_Adjustment_Report\":\n self.report_start_date = datetime.now() - timedelta(\n self.seller_id.inv_adjustment_report_days)\n self.report_end_date = datetime.now()\n self.check_running_schedulers('ir_cron_create_fba_stock_adjustment_report_seller_')\n\n if self.fba_operations == \"Removal_Order_Report\":\n self.report_start_date = datetime.now() - timedelta(\n self.seller_id.removal_order_report_days)\n self.report_end_date = datetime.now()\n self.check_running_schedulers('ir_cron_create_fba_removal_order_report_seller_')\n\n if self.fba_operations == \"fba_live_inventory_report\" and \\\n self.seller_id.is_another_soft_create_fba_inventory:\n self.report_start_date = datetime.now() - timedelta(\n self.seller_id.live_inv_adjustment_report_days)\n self.report_end_date = datetime.now()\n self.check_running_schedulers('ir_cron_import_stock_from_amazon_fba_live_report_')\n\n if self.fba_operations == \"Import_Pending_Orders\":\n self.check_running_schedulers('ir_cron_import_amazon_fba_pending_order_seller_')\n\n if self.fba_operations == \"Import Inbound Shipment\":\n self.check_running_schedulers('ir_cron_inbound_shipment_check_status_')\n\n @api.onchange('both_operations')\n def onchange_both_operations(self):\n \"\"\"\n On change of fba_fbm_operations field it will check the active scheduler or scheduler\n exist then it's next run time.\n @author: Keyur Kanani\n :return:\n \"\"\"\n self.user_warning = None\n if self.both_operations == \"List_Settlement_Report\":\n self.check_running_schedulers('ir_cron_auto_import_settlement_report_seller_')\n if self.both_operations == \"request_rating_report\":\n self.report_start_date = datetime.now() - timedelta(self.seller_id.rating_report_days)\n self.report_end_date = datetime.now()\n self.check_running_schedulers('ir_cron_rating_request_report_seller_')\n if self.both_operations == \"vcs_tax_report\":\n self.report_start_date = datetime.now() - timedelta(self.seller_id.fba_vcs_report_days)\n self.report_end_date = datetime.now()\n self.check_running_schedulers('ir_cron_auto_import_vcs_tax_report_seller_')\n\n def check_running_schedulers(self, cron_xml_id):\n \"\"\"\n use: 1. If scheduler is running for ron_xml_id + seller_id, then this function will\n notify user that\n the process they are doing will be running in the scheduler.\n if they will do this process then the result cause duplicate.\n 2. Also if scheduler is in progress in backend then the execution will give UserError\n Popup\n and terminates the process until scheduler job is done.\n :param cron_xml_id: string[cron xml id]\n :return:\n \"\"\"\n cron_id = self.env.ref('amazon_ept.%s%d' % (cron_xml_id, self.seller_id.id),\n raise_if_not_found=False)\n if cron_id and cron_id.sudo().active:\n res = cron_id.try_cron_lock()\n if self._context.get('raise_warning') and res and res.get('reason'):\n raise UserError(_(\"You are not allowed to run this Action. \\n\"\n \"The Scheduler is already started the Process of Importing \"\n \"Orders.\"))\n if res and res.get('result'):\n self.user_warning = \"This process is executed through scheduler also, \" \\\n \"Next Scheduler for this process will run in %s Minutes\" \\\n % res.get('result')\n elif res and res.get('reason'):\n self.user_warning = res.get('reason')\n\n def import_export_processes(self):\n \"\"\"\n Import / Export Operations are managed from here.\n as per selection on wizard this function will execute\n :return: True\n \"\"\"\n sale_order_obj = self.env['sale.order']\n fbm_sale_order_report_obj = self.env['fbm.sale.order.report.ept']\n fba_shipping_report_obj = self.env['shipping.report.request.history']\n customer_return_report_obj = self.env['sale.order.return.report']\n amazon_product_obj = self.env['amazon.product.ept']\n stock_adjustment_report_obj = self.env['amazon.stock.adjustment.report.history']\n removal_order_request_report_record = self.env['amazon.removal.order.report.history']\n live_inventory_request_report_record = self.env['amazon.fba.live.stock.report.ept']\n amazon_removal_order_obj = self.env['amazon.removal.order.ept']\n import_shipment_obj = self.env['amazon.inbound.import.shipment.ept']\n rating_report_obj = self.env['rating.report.history']\n vcs_tax_report_obj = self.env['amazon.vcs.tax.report.ept']\n seller_pending_order_marketplaces = defaultdict(list)\n cancel_order_marketplaces = defaultdict(list)\n cancel_order_marketplaces_fbm = defaultdict(list)\n seller_stock_instance = defaultdict(list)\n export_product_price_instance = defaultdict(list)\n\n if self.both_operations == \"List_Settlement_Report\":\n self.with_context({'raise_warning': True}).check_running_schedulers(\n 'ir_cron_auto_import_settlement_report_seller_')\n vals = {'report_type': '_GET_V2_SETTLEMENT_REPORT_DATA_FLAT_FILE_V2_',\n 'name': 'Amazon Settlement Reports',\n 'model_obj': self.env['settlement.report.ept'],\n 'sequence': self.env.ref('amazon_ept.seq_import_settlement_report_job'),\n 'tree_id': self.env.ref('amazon_ept.amazon_settlement_report_tree_view_ept'),\n 'form_id': self.env.ref('amazon_ept.amazon_settlement_report_form_view_ept'),\n 'res_model': 'settlement.report.ept',\n 'start_date': self.report_start_date,\n 'end_date': self.report_end_date\n }\n return self.get_reports(vals)\n if self.both_operations == \"request_rating_report\":\n self.with_context({'raise_warning': True}).check_running_schedulers(\n 'ir_cron_rating_request_report_seller_')\n if not self.report_start_date or not self.report_end_date:\n raise UserError(_('Please select Date Range.'))\n\n rating_report_record = rating_report_obj.create({\n 'seller_id': self.seller_id.id,\n 'start_date': self.report_start_date,\n 'end_date': self.report_end_date\n })\n return {\n 'name': _('Rating Report Request History'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'rating.report.history',\n 'type': 'ir.actions.act_window',\n 'res_id': rating_report_record.id\n }\n if self.operations == 'Update_Track_Number_And_Ship_Status':\n self.with_context({'raise_warning': True}).check_running_schedulers(\n 'ir_cron_auto_update_order_status_seller_')\n return sale_order_obj.amz_update_tracking_number(self.seller_id)\n\n if self.operations == 'Import_FBM_Shipped_Orders':\n return sale_order_obj.import_fbm_shipped_or_missing_unshipped_orders(self.seller_id, self.instance_ids,\n self.fbm_order_updated_after_date, ['Shipped'])\n if self.operations == 'Import_Missing_Unshipped_Orders':\n return sale_order_obj.import_fbm_shipped_or_missing_unshipped_orders(self.seller_id, self.instance_ids,\n self.fbm_order_updated_after_date,\n ['Unshipped', 'PartiallyShipped'])\n\n\n if self.operations == \"Import_Unshipped_Orders\":\n self.with_context({'raise_warning': True}).check_running_schedulers(\n 'ir_cron_import_amazon_orders_seller_')\n record = fbm_sale_order_report_obj.create({\n 'seller_id': self.seller_id.id,\n 'report_type': '_GET_FLAT_FILE_ORDER_REPORT_DATA_',\n })\n record.request_report()\n return {\n 'name': _('FBM Sale Order'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'fbm.sale.order.report.ept',\n 'type': 'ir.actions.act_window',\n 'res_id': record.id\n }\n if self.fba_operations == \"Shipment_Report\":\n self.with_context({'raise_warning': True}).check_running_schedulers(\n 'ir_cron_import_amazon_fba_shipment_report_seller_')\n if not self.report_start_date or not self.report_end_date:\n raise UserError(_('Please select Date Range.'))\n if self.seller_id.is_another_soft_create_fba_shipment:\n vals = {'report_type': '_GET_AMAZON_FULFILLED_SHIPMENTS_DATA_',\n 'name': 'FBA Shipping Report',\n 'model_obj': self.env['shipping.report.request.history'],\n 'sequence': self.env.ref('amazon_ept.seq_import_shipping_report_job'),\n 'tree_id': self.env.ref(\n 'amazon_ept.amazon_shipping_report_request_history_tree_view_ept'),\n 'form_id': self.env.ref(\n 'amazon_ept.amazon_shipping_report_request_history_form_view_ept'),\n 'res_model': 'shipping.report.request.history',\n 'start_date': self.report_start_date,\n 'end_date': self.report_end_date\n }\n self.get_reports(vals)\n elif self.is_split_report and not self.split_report_by_days:\n raise UserError(_('Please select the Split Report By Days.'))\n elif self.is_split_report and self.split_report_by_days:\n start_date = self.report_start_date\n end_date = False\n shipping_report_record_list = []\n\n while start_date <= self.report_end_date:\n if end_date:\n start_date = end_date\n\n if start_date >= self.report_end_date:\n break\n\n end_date = (start_date + timedelta(int(self.split_report_by_days))) - timedelta(\n 1)\n if end_date > self.report_end_date:\n end_date = self.report_end_date\n\n shipping_report_record = fba_shipping_report_obj.create({\n 'seller_id': self.seller_id.id,\n 'start_date': start_date,\n 'end_date': end_date\n })\n shipping_report_record.request_report()\n shipping_report_record_list.append(shipping_report_record.id)\n\n return {\n 'name': _('FBA Shipping Report'),\n 'view_mode': 'tree, form',\n 'views': [\n (self.env.ref(\n 'amazon_ept.amazon_shipping_report_request_history_tree_view_ept').id,\n 'tree'),\n (False, 'form')],\n 'res_model': 'shipping.report.request.history',\n 'type': 'ir.actions.act_window',\n 'res_id': shipping_report_record_list\n }\n else:\n shipping_report_record = fba_shipping_report_obj.create({\n 'seller_id': self.seller_id.id,\n 'start_date': self.report_start_date,\n 'end_date': self.report_end_date\n })\n shipping_report_record.request_report()\n return {\n 'name': _('FBA Shipping Report'),\n 'view_mode': 'form',\n 'res_model': 'shipping.report.request.history',\n 'type': 'ir.actions.act_window',\n 'res_id': shipping_report_record.id\n }\n if self.fba_operations == 'Customer_Return_Report':\n self.with_context({'raise_warning': True}).check_running_schedulers(\n 'ir_cron_auto_import_customer_return_report_seller_')\n customer_return_report_record = customer_return_report_obj.create({\n 'seller_id': self.seller_id.id,\n 'start_date': self.report_start_date,\n 'end_date': self.report_end_date\n })\n customer_return_report_record.request_customer_return_report()\n return {\n 'name': _('Customer Return Report'),\n 'view_mode': 'form',\n 'res_model': 'sale.order.return.report',\n 'type': 'ir.actions.act_window',\n 'res_id': customer_return_report_record.id\n }\n if self.fba_operations == \"Stock_Adjustment_Report\":\n self.with_context({'raise_warning': True}).check_running_schedulers(\n 'ir_cron_create_fba_stock_adjustment_report_seller_')\n if not self.report_start_date or not self.report_end_date:\n raise UserError(_('Please select Date Range.'))\n\n stock_adjustment_report_record = stock_adjustment_report_obj.create({\n 'seller_id': self.seller_id.id,\n 'start_date': self.report_start_date,\n 'end_date': self.report_end_date\n })\n return {\n 'name': _('Stock Adjustment Report Request History'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'amazon.stock.adjustment.report.history',\n 'type': 'ir.actions.act_window',\n 'res_id': stock_adjustment_report_record.id\n }\n\n if self.fba_operations == 'fba_live_inventory_report':\n self.with_context({'raise_warning': True}).check_running_schedulers(\n 'ir_cron_import_stock_from_amazon_fba_live_report_')\n\n if self.seller_id.is_another_soft_create_fba_inventory:\n if not self.report_start_date or not self.report_end_date:\n raise UserError(_('Please select Date Range.'))\n vals = {'start_date': self.report_start_date,\n 'end_date': self.report_end_date,\n 'seller_id': self.seller_id, }\n fba_live_stock_report = live_inventory_request_report_record.get_inventory_report(\n vals)\n return fba_live_stock_report\n\n if self.amazon_program in ('pan_eu', 'cep'):\n report_type = '_GET_FBA_MYI_UNSUPPRESSED_INVENTORY_DATA_'\n self.create_and_request_amazon_live_inv_report_ids(report_type, False, False,\n False)\n\n elif not self.seller_id.is_european_region:\n report_type = '_GET_FBA_MYI_UNSUPPRESSED_INVENTORY_DATA_'\n self.with_context( \\\n {\n 'instance_ids': self.seller_id.instance_ids}).create_and_request_amazon_live_inv_report_ids( \\\n report_type, datetime.now(), False, False)\n\n elif self.amazon_program in ('efn'):\n start_date = (datetime.today().date() - timedelta(days=1)).strftime(\n '%Y-%m-%d 00:00:00')\n end_date = (datetime.today().date() - timedelta(days=1)).strftime(\n '%Y-%m-%d 23:59:59')\n report_type = '_GET_FBA_FULFILLMENT_CURRENT_INVENTORY_DATA_'\n self.create_and_request_amazon_live_inv_report_ids(report_type, False,\n start_date, end_date)\n elif self.amazon_program in ('mci', 'efn+mci'):\n start_date = (datetime.today().date() - timedelta(days=1)).strftime(\n '%Y-%m-%d 00:00:00')\n end_date = (datetime.today().date() - timedelta(days=1)).strftime(\n '%Y-%m-%d 23:59:59')\n report_type = '_GET_FBA_FULFILLMENT_CURRENT_INVENTORY_DATA_'\n self.with_context( \\\n {\n 'instance_ids': self.seller_id.instance_ids}).create_and_request_amazon_live_inv_report_ids( \\\n report_type, False, start_date, end_date)\n\n if self.fba_operations == \"Removal_Order_Report\":\n self.with_context({'raise_warning': True}).check_running_schedulers(\n 'ir_cron_create_fba_removal_order_report_seller_')\n if not self.report_start_date or not self.report_end_date:\n raise UserError(_('Please select Date Range.'))\n\n removal_order_request_report_record = removal_order_request_report_record.create({\n 'seller_id': self.seller_id.id,\n 'start_date': self.report_start_date,\n 'end_date': self.report_end_date\n })\n return {\n 'name': _('Removal Order Report Request History'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'amazon.removal.order.report.history',\n 'type': 'ir.actions.act_window',\n 'res_id': removal_order_request_report_record.id\n }\n if self.fba_operations == \"removal_order_request\":\n if not self.is_allow_to_create_removal_order or not self.order_removal_instance_id:\n raise UserError(_(\n 'This Seller no any instance configure removal order Please configure removal '\n 'order configuration.'))\n\n amazon_removal_order_obj = amazon_removal_order_obj.create({\n 'removal_disposition': 'Return',\n 'warehouse_id': self.order_removal_instance_id and\n self.order_removal_instance_id.removal_warehouse_id.id or\n False,\n 'ship_address_id': self.order_removal_instance_id.company_id.partner_id.id,\n 'company_id': self.seller_id.company_id.id,\n 'instance_id': self.order_removal_instance_id.id,\n })\n return {\n 'name': _('Removal Order Request'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'amazon.removal.order.ept',\n 'type': 'ir.actions.act_window',\n 'res_id': amazon_removal_order_obj.id\n }\n\n if self.fba_operations == \"Import Inbound Shipment\":\n self.with_context({'raise_warning': True}).check_running_schedulers(\n 'ir_cron_inbound_shipment_check_status_')\n import_shipment_obj.get_inbound_import_shipment(self.instance_id,\n self.from_warehouse_id,\n self.shipment_id)\n\n if self.fba_operations == \"Create_Inbound_Shipment_Plan\":\n return self.wizard_create_inbound_shipment_plan(self.instance_id)\n\n if self.both_operations == 'vcs_tax_report':\n if not self.seller_id.is_vcs_activated:\n raise UserError( \\\n _(\"Please Select Invoice Upload Policy as per Seller Central Configurations.\"))\n self.with_context({'raise_warning': True}).check_running_schedulers(\n 'ir_cron_auto_import_vcs_tax_report_seller_')\n\n vcs_report = vcs_tax_report_obj.create(\n {'report_type': '_SC_VAT_TAX_REPORT_',\n 'seller_id': self.seller_id.id,\n 'start_date': self.report_start_date,\n 'end_date': self.report_end_date,\n 'state': 'draft'\n })\n vcs_report.request_report()\n self.seller_id.write({'vcs_report_last_sync_on': self.report_end_date})\n\n if self.both_operations == \"Sync_Active_Products\":\n return self.create_sync_active_products(self.seller_id, self.instance_id,\n self.update_price_in_pricelist,\n self.auto_create_product)\n\n if self.both_operations == \"Import_Product\":\n return self.import_csv_file()\n\n if self.instance_ids:\n instance_ids = self.instance_ids\n else:\n instance_ids = self.seller_id.instance_ids\n\n for instance in instance_ids:\n if self.fba_operations == \"Check_Cancel_Orders_FBA\":\n cancel_order_marketplaces[instance.seller_id].append(instance.market_place_id)\n if self.operations == 'Check_Cancel_Orders_FBM':\n self.with_context({'raise_warning': True}).check_running_schedulers(\n 'ir_cron_auto_check_canceled_fbm_order_in_amazon_seller_')\n cancel_order_marketplaces_fbm[instance.seller_id].append(instance.market_place_id)\n if self.fba_operations == \"Import_Pending_Orders\":\n self.with_context({'raise_warning': True}).check_running_schedulers(\n 'ir_cron_import_amazon_fba_pending_order_seller_')\n seller_pending_order_marketplaces[instance.seller_id].append(\n instance.market_place_id)\n if self.operations == 'Export_Stock_From_Odoo_To_Amazon':\n self.with_context({'raise_warning': True}).check_running_schedulers(\n 'ir_cron_auto_export_inventory_seller_')\n seller_stock_instance[instance.seller_id].append(instance)\n if self.both_operations == 'Export_Price_From_Odoo_To_Amazon':\n export_product_price_instance[instance.seller_id].append(instance)\n\n if cancel_order_marketplaces:\n for seller, marketplaces in cancel_order_marketplaces.items():\n sale_order_obj.cancel_amazon_fba_pending_sale_orders(seller,\n marketplaceids=marketplaces,\n instance_ids=instance_ids.ids or [])\n if cancel_order_marketplaces_fbm:\n for seller, marketplaces in cancel_order_marketplaces_fbm.items():\n sale_order_obj.cancel_amazon_fbm_pending_sale_orders(seller,\n marketplaceids=marketplaces, \\\n instance_ids=instance_ids.ids or [])\n if seller_pending_order_marketplaces:\n for seller, marketplaces in seller_pending_order_marketplaces.items():\n sale_order_obj.import_fba_pending_sales_order(seller, marketplaces,\n self.updated_after_date)\n\n if seller_stock_instance:\n for seller, instance_ids in seller_stock_instance.items():\n for instance in instance_ids:\n instance.export_stock_levels()\n if export_product_price_instance:\n for seller, instance_ids in export_product_price_instance.items():\n for instance in instance_ids:\n amazon_products = amazon_product_obj.search(\n [('instance_id', '=', instance.id), ('exported_to_amazon', '=', True)])\n if amazon_products:\n amazon_products.update_price(instance)\n return True\n\n def create_and_request_amazon_live_inv_report_ids(self, report_type, report_date, start_date, \\\n end_date):\n \"\"\"\n Added to request for FBA line inventory report.\n \"\"\"\n ctx = self._context\n live_inventory_request_report_record = self.env['amazon.fba.live.stock.report.ept']\n fba_live_stock_report_vals = {'seller_id': self.seller_id.id,\n 'report_type': report_type,\n 'report_date': report_date,\n 'start_date': start_date,\n 'end_date': end_date}\n\n if ctx.get('instance_ids'):\n instance_ids = ctx.get('instance_ids')\n for instance in instance_ids:\n fba_live_stock_report_vals.update({'amz_instance_id': instance.id, })\n fba_live_stock_report = live_inventory_request_report_record.create( \\\n fba_live_stock_report_vals)\n fba_live_stock_report.request_report()\n return True\n\n fba_live_stock_report = live_inventory_request_report_record.create(\n fba_live_stock_report_vals)\n fba_live_stock_report.request_report()\n return True\n\n def prepare_merchant_report_dict(self, seller):\n \"\"\"\n Added by Udit\n :return: This method will prepare merchant' informational dictionary which will\n passed to amazon api calling method.\n \"\"\"\n account = self.env['iap.account'].search([('service_name', '=', 'amazon_ept')])\n dbuuid = self.env['ir.config_parameter'].sudo().get_param('database.uuid')\n return {\n 'merchant_id': seller.merchant_id and str(seller.merchant_id) or False,\n 'auth_token': seller.auth_token and str(seller.auth_token) or False,\n 'app_name': 'amazon_ept',\n 'account_token': account.account_token,\n 'emipro_api': 'get_reports_v13',\n 'dbuuid': dbuuid,\n 'amazon_marketplace_code': seller.country_id.amazon_marketplace_code or\n seller.country_id.code,\n }\n\n def get_reports(self, vals):\n \"\"\"\n Addded by Udit\n This method will get settlement report data from amazon and create it's record in odoo.\n :return: This method will redirecting us to settlement report tree view.\n \"\"\"\n\n tree_id = vals.get('tree_id')\n form_id = vals.get('form_id')\n seller = self.seller_id\n if not seller:\n raise UserError(_('Please select Seller'))\n\n start_date, end_date = self.get_fba_reports_date_format()\n kwargs = self.sudo().prepare_merchant_report_dict(seller)\n kwargs.update(\n {'report_type': vals.get('report_type'), 'start_date': start_date,\n 'end_date': end_date})\n response = iap_tools.iap_jsonrpc(DEFAULT_ENDPOINT + '/iap_request', params=kwargs,\n timeout=1000)\n if response.get('reason'):\n return UserError(_(response.get('reason')))\n\n list_of_wrapper = response.get('result')\n odoo_report_ids = self.prepare_fba_report_vals(list_of_wrapper, vals.get('start_date'),\n vals.get('end_date'), vals.get('model_obj'),\n vals.get('sequence'))\n if self._context.get('is_auto_process'):\n return odoo_report_ids\n\n return {\n 'type': 'ir.actions.act_window',\n 'name': vals.get('name'),\n 'res_model': vals.get('res_model'),\n 'domain': [('id', 'in', odoo_report_ids)],\n 'views': [(tree_id.id, 'tree'), (form_id.id, 'form')],\n 'view_id': tree_id.id,\n 'target': 'current'\n }\n\n def get_fba_reports_date_format(self):\n \"\"\"\n Added by Udit\n This method will convert selected time duration in specific format to send it to amazon.\n If start date and end date is empty then system will automatically select past 90 days\n time duration.\n :return: This method will return converted start and end date.\n \"\"\"\n start_date = self.report_start_date\n end_date = self.report_end_date\n if start_date:\n db_import_time = time.strptime(str(start_date), \"%Y-%m-%d %H:%M:%S\")\n db_import_time = time.strftime(\"%Y-%m-%dT%H:%M:%S\", db_import_time)\n start_date = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.gmtime(\n time.mktime(time.strptime(db_import_time, \"%Y-%m-%dT%H:%M:%S\"))))\n start_date = str(start_date) + 'Z'\n else:\n today = datetime.now()\n earlier = today - timedelta(days=90)\n earlier_str = earlier.strftime(\"%Y-%m-%dT%H:%M:%S\")\n start_date = earlier_str + 'Z'\n if end_date:\n db_import_time = time.strptime(str(end_date), \"%Y-%m-%d %H:%M:%S\")\n db_import_time = time.strftime(\"%Y-%m-%dT%H:%M:%S\", db_import_time)\n end_date = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.gmtime(\n time.mktime(time.strptime(db_import_time, \"%Y-%m-%dT%H:%M:%S\"))))\n end_date = str(end_date) + 'Z'\n else:\n today = datetime.now()\n earlier_str = today.strftime(\"%Y-%m-%dT%H:%M:%S\")\n end_date = earlier_str + 'Z'\n return start_date, end_date\n\n def prepare_fba_report_vals(self, list_of_wrapper, start_date, end_date, model_obj, sequence):\n \"\"\"\n Added by Udit\n This method will create settlement report and it's attachments from the amazon api response.\n :param list_of_wrapper: Dictionary of amazon api response.\n :param start_date: Selected start date in wizard in specific format.\n :param end_date: Selected end date in wizard in specific format.\n :return: This method will return list of newly created settlement report id.\n \"\"\"\n odoo_report_ids = []\n if list_of_wrapper is None:\n return []\n\n for result in list_of_wrapper:\n reports = []\n if not isinstance(result.get('ReportInfo', []), list):\n reports.append(result.get('ReportInfo', []))\n else:\n reports = result.get('ReportInfo', [])\n for report in reports:\n request_id = report.get('ReportRequestId', {}).get('value', '')\n report_id = report.get('ReportId', {}).get('value', '')\n report_type = report.get('ReportType', {}).get('value', '')\n report_exist = model_obj.search(\n ['|', ('report_request_id', '=', request_id), ('report_id', '=', report_id),\n ('report_type', '=', report_type)])\n if report_exist:\n report_exist = report_exist[0]\n odoo_report_ids.append(report_exist.id)\n continue\n vals = self.prepare_fba_report_vals_for_create(report_type, request_id, report_id,\n start_date, end_date,\n sequence)\n report_rec = model_obj.create(vals)\n report_rec.get_report()\n self._cr.commit()\n odoo_report_ids.append(report_rec.id)\n return odoo_report_ids\n\n def prepare_fba_report_vals_for_create(self, report_type, request_id, report_id, start_date,\n end_date, sequence):\n \"\"\"\n Added by Udit\n :param report_type: Report type.\n :param request_id: Amazon request id.\n :param report_id: Amazon report id.\n :param start_date: Selected start date in wizard in specific format.\n :param end_date: Selected end date in wizard in specific format.\n :return: This method will prepare and return settlement report vals.\n \"\"\"\n try:\n if sequence:\n report_name = sequence.next_by_id()\n else:\n report_name = '/'\n except:\n report_name = '/'\n return {\n 'name': report_name,\n 'report_type': report_type,\n 'report_request_id': request_id,\n 'report_id': report_id,\n 'start_date': start_date,\n 'end_date': end_date,\n 'state': '_DONE_',\n 'seller_id': self.seller_id.id,\n 'user_id': self._uid,\n }\n\n def create_sync_active_products(self, seller_id, instance_id,\n update_price_in_pricelist, auto_create_product):\n \"\"\"\n Process will create record of Active Product List of selected seller and instance\n @:param - seller_id - selected seller from wizard\n @:param - instance_id - selected instance from wizard\n @:param - update_price_in_pricelist - Boolean for create pricelist or not\n @:param - auto_create_product - Boolean for create product or not\n @author: Deval Jagad (16/11/2019)\n \"\"\"\n if not instance_id:\n raise UserError(_('Please Select Instance'))\n active_product_listing_obj = self.env['active.product.listing.report.ept']\n form_id = self.env.ref('amazon_ept.active_product_listing_form_view_ept')\n vals = {'instance_id': instance_id.id,\n 'seller_id': seller_id.id,\n 'update_price_in_pricelist': update_price_in_pricelist or False,\n 'auto_create_product': auto_create_product or False\n }\n\n active_product_listing = active_product_listing_obj.create(vals)\n try:\n active_product_listing.request_report()\n except Exception as exception:\n raise UserError(_(exception))\n return {\n 'type': 'ir.actions.act_window',\n 'name': 'Active Product List',\n 'res_model': 'active.product.listing.report.ept',\n 'res_id': active_product_listing.id,\n 'views': [(form_id.id, 'form')],\n 'view_id': form_id.id,\n 'target': 'current'\n }\n\n def download_sample_attachment(self):\n \"\"\"\n This Method relocates download sample file of amazon.\n :return: This Method return file download file.\n @author: Deval Jagad (26/12/2019)\n \"\"\"\n attachment = self.env['ir.attachment'].search([('name', '=', 'import_product_sample.csv')])\n return {\n 'type': 'ir.actions.act_url',\n 'url': '/web/content/%s?download=true' % (attachment.id),\n 'target': 'new',\n 'nodestroy': False,\n }\n\n def import_csv_file(self):\n \"\"\"\n This Method relocates Import product csv in amazon listing and mapping of amazon product\n listing.\n :return:\n @author: Deval Jagad (26/12/2019)\n \"\"\"\n if not self.choose_file:\n raise UserError(_('Please Upload File.'))\n\n self.read_import_csv_file()\n if self.choose_file:\n csv_file = StringIO(base64.b64decode(self.choose_file).decode())\n file_write = open('/tmp/products.csv', 'w+')\n file_write.writelines(csv_file.getvalue())\n file_write.close()\n\n instance_dict = {}\n if self.delimiter == \"tab\":\n reader = csv.DictReader(open('/tmp/products.csv', \"rU\"), delimiter=\"\\t\")\n elif self.delimiter == \"semicolon\":\n reader = csv.DictReader(open('/tmp/products.csv', \"rU\"), delimiter=\";\")\n else:\n reader = csv.DictReader(open('/tmp/products.csv', \"rU\"), delimiter=\",\")\n if reader:\n if reader.fieldnames and len(reader.fieldnames) == 5:\n for line in reader:\n odoo_default_code = line.get('Internal Reference')\n seller_sku = line.get('Seller SKU')\n amazon_marketplace = line.get('Marketplace')\n fulfillment = line.get('Fulfillment')\n instance = False\n\n if odoo_default_code:\n product_id = self.get_odoo_product_csv_data_ept(line)\n\n if amazon_marketplace:\n instance = instance_dict.get(amazon_marketplace)\n if not instance:\n instance = self.seller_id.instance_ids.filtered(\n lambda l: l.marketplace_id.name == amazon_marketplace)\n instance_dict.update({amazon_marketplace: instance})\n\n if instance and fulfillment and seller_sku:\n self.create_or_search_amazon_listing(instance, product_id, line)\n return {\n 'effect': {\n 'fadeout': 'slow',\n 'message': \"All products import successfully!\",\n 'img_url': '/web/static/src/img/smile.svg',\n 'type': 'rainbow_man',\n }\n }\n else:\n raise UserError(_( \\\n \"Either file is invalid or proper delimiter/separator is not specified \"\n \"or not found required fields.\"))\n else:\n raise UserError(_( \\\n \"Either file format is not csv or proper delimiter/separator is not specified\"))\n else:\n raise UserError(_(\"Please Select File and/or choose Amazon Seller to Import Product\"))\n\n def get_odoo_product_csv_data_ept(self, line_vals):\n \"\"\"\n This method will get the product vals and find or create the odoo product.\n :param line vals : csv file line data.\n return : odoo product.\n \"\"\"\n product_obj = self.env['product.product']\n\n amazon_product_name = line_vals.get('Title')\n odoo_default_code = line_vals.get('Internal Reference')\n\n product_id = product_obj.search(\n ['|', (\"default_code\", \"=\", odoo_default_code),\n (\"barcode\", \"=\", odoo_default_code)], limit=1)\n if not product_id:\n odoo_product_dict = {\n 'name': amazon_product_name,\n 'default_code': odoo_default_code,\n 'type': 'product'\n }\n product_id = product_obj.create(odoo_product_dict)\n\n return product_id\n\n def create_or_search_amazon_listing(self, instance, product_id, line_vals):\n \"\"\"\n This Method relocates if product exist in odoo and product doesn't exist in\n amazon create amazon product listing.\n :param instance: This arguments relocates instance of amazon.\n :param product_id: product record\n :param line_vals: amazon listing line vals\n :return: This method return boolean(True/False).\n @author: Deval Jagad (26/12/2019)\n \"\"\"\n amazon_product_ept_obj = self.env['amazon.product.ept']\n\n amazon_product_name = line_vals.get('Title')\n seller_sku = line_vals.get('Seller SKU')\n fulfillment = line_vals.get('Fulfillment')\n\n amazon_product_id = amazon_product_ept_obj.search_amazon_product( \\\n instance.id, seller_sku, fulfillment)\n if not amazon_product_id:\n amazon_product_ept_obj.create(\n {'name': amazon_product_name or product_id.name,\n 'fulfillment_by': fulfillment,\n 'product_id': product_id.id,\n 'seller_sku': seller_sku,\n 'instance_id': instance.id,\n 'exported_to_amazon': True}\n )\n return True\n\n def read_import_csv_file(self):\n \"\"\"\n This Method relocates read csv and check validation if seller sku doesn't exist in csv\n raise error.\n :return: This Method return boolean(True/False).\n \"\"\"\n if self.choose_file:\n data = StringIO(base64.b64decode(self.choose_file).decode())\n\n if self.delimiter == \"tab\":\n reader = csv.DictReader(data, delimiter='\\t')\n elif self.delimiter == \"semicolon\":\n reader = csv.DictReader(data, delimiter=';')\n else:\n reader = csv.DictReader(data, delimiter=',')\n seller_error_line = []\n\n next(reader)\n for line in reader:\n if not line.get('Seller SKU'):\n seller_error_line.append(reader.line_num)\n message = \"\"\n if seller_error_line:\n message += 'File is invalid Seller SKU must be required field.'\n if message:\n raise UserError(_(message))\n\n def wizard_create_inbound_shipment_plan(self, instance):\n \"\"\"\n This method will create shipment plan record of selected seller and instance\n :return:\n @author: Deval Jagad (26/12/2019)\n \"\"\"\n if not instance:\n raise UserError(_('Please Select Instance'))\n inbound_shipment_plan_obj = self.env['inbound.shipment.plan.ept']\n form_id = self.env.ref('amazon_ept.inbound_shipment_plan_form_view')\n\n warehouse_id = instance.warehouse_id\n vals = {'instance_id': instance.id,\n 'warehouse_id': warehouse_id.id,\n 'ship_from_address_id': warehouse_id.partner_id and \\\n warehouse_id.partner_id.id,\n 'company_id': instance.company_id and instance.company_id.id,\n 'ship_to_country': instance.country_id and instance.country_id.id\n }\n shipment_plan_id = inbound_shipment_plan_obj.create(vals)\n\n return {\n 'type': 'ir.actions.act_window',\n 'name': 'Inbound Shipment Plan',\n 'res_model': 'inbound.shipment.plan.ept',\n 'res_id': shipment_plan_id.id,\n 'views': [(form_id.id, 'form')],\n 'view_id': form_id.id,\n 'target': 'current'\n }\n","sub_path":"amazon_ept/wizard/amazon_process_import_export.py","file_name":"amazon_process_import_export.py","file_ext":"py","file_size_in_byte":51640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"43989256","text":"from GLOBAL_VAR import *\n\ngroup = 0\npair_Fn = '%s/%s_outlierPairs_group%d.txt' % (pairdir, LMfn, group)\nN0 = len(pd.read_csv(pair_Fn, header= None, sep='\\t', usecols=[0]))\n\n\npairs_N = []\nfor group in range(1, 23):\n pair_Fn = '%s/%s_outlierPairs_group%d.txt' % (pairdir, LMfn, group)\n N = len(pd.read_csv(pair_Fn, header= None, sep='\\t', usecols=[0]))\n pairs_N.append([N, N0])\n\npairs_N = pd.DataFrame(pairs_N)\npairs_N.columns = ['ts_N', 'shared_N']\npairs_N.to_csv('Fig2_sig_prop_%s.txt' % FMfn, sep='\\t', index = False)\n","sub_path":"Extended_Methods/Heuristic_1/2_savePairN.py","file_name":"2_savePairN.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"25621355","text":"import os\nimport constant\n\ndef countFiles(path):\n\tcount = 0\n\twith os.scandir(path) as it:\n\t\tfor i in it:\n\t\t\tcount = count + 1\n\treturn count\n\n\ndef readAllFiles(path):\n\tmsg = []\n\tnames = []\n\twith os.scandir(path) as it:\n\t\tfor entry in it:\n\t\t\tif entry.is_file():\n\t\t\t\tfMsg = open(path + entry.name, \"r\")\n\t\t\t\tmsg.append(fMsg.read())\n\t\t\t\tnames.append(entry.name)\n\t\t\t\tfMsg.close()\n\t\treturn (msg, names)\n","sub_path":"handleFolder.py","file_name":"handleFolder.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"}