diff --git "a/3556.jsonl" "b/3556.jsonl" new file mode 100644--- /dev/null +++ "b/3556.jsonl" @@ -0,0 +1,718 @@ +{"seq_id":"313925205","text":"# -*- coding: utf-8 -*-\n\n# 10\n# / \\\n# 00 01\n\nfrom hashlib import blake2b\n\nFANOUT = 2\nDEPTH = 2\nLEAF_SIZE = 4096\nINNER_SIZE = 64\n\nbuf = bytearray(6000)\n\n# Left leaf\nh00 = blake2b(buf[0:LEAF_SIZE], fanout=FANOUT, depth=DEPTH,\n leaf_size=LEAF_SIZE, inner_size=INNER_SIZE,\n node_offset=0, node_depth=0, last_node=False)\n# Right leaf\nh01 = blake2b(buf[LEAF_SIZE:], fanout=FANOUT, depth=DEPTH,\n leaf_size=LEAF_SIZE, inner_size=INNER_SIZE,\n node_offset=1, node_depth=0, last_node=False)\n# Root node\nh10 = blake2b(digest_size=32, fanout=FANOUT, depth=DEPTH,\n leaf_size=LEAF_SIZE, inner_size=INNER_SIZE,\n node_offset=0, node_depth=1, last_node=True)\nh10.update(h00.digest())\nh10.update(h01.digest())\nprint(h10.hexdigest())\n'3ad2a9b37c6070e374c7a8c508fe20ca86b6ed54e286e93a0318e95e881db5aa'","sub_path":"Library/HASHLIB/treemode.py","file_name":"treemode.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"300778038","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2017年4月14日\n\n@author: chenyitao\n'''\n\nimport gevent.monkey\ngevent.monkey.patch_all()\nimport os\nif os.path.exists('./Worker.log'):\n os.remove('./Worker.log')\n\nfrom tddc import WorkerManager, Postman\n\nfrom worker.status.status_manager import StatusManager\n\n\nclass MonitorManager(WorkerManager):\n '''\n classdocs\n '''\n\n def __init__(self):\n '''\n Constructor\n '''\n super(MonitorManager, self).__init__()\n self.info('Monitor Is Starting.')\n Postman()\n self._status_manager = StatusManager()\n self.info('->Monitor Was Started.')\n\n @staticmethod\n def start():\n MonitorManager()\n while True:\n gevent.sleep(60)\n\n\ndef main():\n MonitorManager.start()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"1810209","text":"import requests\nimport json\nfrom src.models import Item\n\nitems = [Item(menuId=1,\n options=[\"Bacon\",'Eggs','Paper'],\n special=\"Now I want it to be warm\",\n id=7\n ).toDict(),\n\n Item ( menuId=3 ,\n options=[ \"Blue\" , \"Sugar\",\"Spice\",\"Everything Nice\" ] ,\n special=\"Freeze before serving.\",\n id=8\n ).toDict()]\n\ndata = {\n 'orderId' : 16,\n 'userId' : 3,\n 'items' : items\n}\n\n# Success Case\njsonData = json.dumps(data)\nprint('Data: ' + jsonData)\nresp = requests.post('http://127.0.0.1:5000/edititems',json=jsonData)\n\nprint('Success? ' + str(resp.json()))","sub_path":"DBService/test/EditItemsTest.py","file_name":"EditItemsTest.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"338827430","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport logging\n\nimport sys\n\n\ndef init_logger(logger_name, log_file_name):\n logging.basicConfig(level=logging.INFO,\n format=\"%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)-8s %(message)s\",\n datefmt='%Y-%m-%d %H:%M:%S',\n filename='%s' % (log_file_name),\n filemode='w'\n )\n logger = logging.getLogger(logger_name)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)-8s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.NOTSET)\n return logger\n\n","sub_path":"PerfTest/src/util/LoggerHandle.py","file_name":"LoggerHandle.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"579312597","text":"class Student:\n def __init__(self, name, age, grade, sex):\n self.name = name\n self.age = age\n self.grade = grade\n self.sex = sex\n\n def print_self_info(self):\n print(\"%s,年龄%s,成绩%s,性别%s\" % (self.name, self.age, self.grade, self.sex))\n\n\ndef find01():\n for item in list01:\n if item.name == \"苏大强\":\n return item\n # return None 可以不写\n\n\nlist01 = [\n Student(\"赵敏\", 28, 100, \"女\"),\n Student(\"苏大强\", 68, 62, \"男\"),\n Student(\"明玉\", 30, 95, \"女\"),\n Student(\"无忌\", 29, 70, \"男\"),\n Student(\"张三丰\", 130, 96, \"男\"),\n Student(\"❤Alice❤\", 21, 99, \"女\")\n]\n\n\n# stu = find01()\n# print(stu.name,stu.age)\n\ndef find02():\n global result\n result = []\n for item in list01:\n if item.sex == \"女\":\n result.append(item)\n return result\n\n\n# stu = find02()\n# for item in stu:\n# item.print_self_info()\n\n\ndef find03():\n count = 0\n for item in list01:\n if item.age >= 30:\n count += 1\n return count\n\n\n# print(\"年龄大于30岁的人共有%d个\" % find03())\n\n\ndef find04():\n for item in list01:\n item.grade = 0\n\n#\n# find04()\n# for item in list01:\n# item.print_self_info()\n\ndef find05():\n result = []\n for item in list01:\n result.append(item.name)\n return result\n\n\n# print(find05())\n\ndef find06():\n max_stu = list01[0]\n for i in range(1, len(list01)):\n if list01[i].age > max_stu.age:\n max_stu = list01[i]\n return max_stu\n\n\nre = find06()\nre.print_self_info()","sub_path":"part_01_python_base/python_oo/day10/exercise02.py","file_name":"exercise02.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"630550371","text":"import optparse\n\nparser = optparse.OptionParser()\n\n \nclass Cli():\n def __init__(self):\n self.time = 5\n self.currency = 'usd'\n self.getCurrencies = False\n self.getCoins = False\n self.time_type = ''\n self.crypto = ''\n self.argument_parser()\n \n def argument_parser(self):\n parser.add_option(\n \"-T\",\n \"--time\",\n dest=\"time\",\n help=\"Set timer for notifications.\"\n )\n parser.add_option(\n \"-t\",\n \"--timetype\",\n dest=\"timeType\",\n help=\"Set timer type ('seconds' or 'minutes').\"\n )\n \n parser.add_option(\n '-c',\n \"--coin\",\n dest=\"cryptoCoin\",\n help=\"Specify the crypto currency you want.\"\n )\n parser.add_option(\n \"-L\",\n \"--listcoins\",\n dest=\"CoinsList\",\n help=\"Get all supported coins. Takes a boolearn (True or False)\"\n )\n\n parser.add_option(\n '-l',\n \"--listcurrency\",\n dest=\"Currencies\",\n help=\"Get list of supported curriencies.Takes a boolearn (True or False)\"\n )\n\n (options,arguments) = parser.parse_args()\n\n self.time = options.time\n self.time_type = options.timeType\n self.crypto = options.cryptoCoin\n\n get_coins = options.CoinsList\n get_currencies = options.Currencies\n\n if get_coins != None:\n self.getCoins = get_coins\n if get_currencies != None:\n self.getCurrencies = get_currencies\n\n if self.time_type != None:\n if self.time_type.lower() == 'seconds' or self.time_type.lower() == 's':\n self.time_type = 'seconds'\n if self.time_type.lower() =='minutes' or self.time_type.lower() == 'm':\n self.time_type = 'minutes'\n \n\n return self.time,self.time_type,self.crypto,self.getCurrencies,self.getCoins\n\n \n\n","sub_path":"cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"277673709","text":"from constant import test_data_path\nfrom collections import Counter\nimport numpy as np\nfrom constant import logger\nimport tensorflow as tf\nfrom keras.models import load_model\nimport pandas as pd\nfrom preprocess import sentence2vec\nfrom preprocess import db_init\n\n# from keras.backend.tensorflow_backend import set_session\n# config = tf.ConfigProto()\n# config.gpu_options.allow_growth = True\n# set_session(tf.Session(config=config))\n\nzero_or_one = lambda x : 0 if x<0.5 else 1\nclass Line():\n def __init__(self,id,comment,col,tokenizer):\n self.id = id\n self.comment = comment\n self.vec = sentence2vec(col,tokenizer,comment)\n \n def predict(self,net):\n self._class = wrapper_pridict(net,self.vec)[0]\n\n def to_labels(self):\n self.toxic = zero_or_one(self._class[0])\n self.severe_toxic = zero_or_one(self._class[1])\n self.obscene = zero_or_one(self._class[2])\n self.threat = zero_or_one(self._class[3])\n self.insult = zero_or_one(self._class[4])\n self.identity_hate = zero_or_one(self._class[5])\n def to_data_line(self):\n return [self.id,self.comment,self.toxic,self.severe_toxic,self.obscene,self.threat,self.insult,self.identity_hate]\n\ndef test_df(path=test_data_path):\n df = pd.read_csv(path)\n return df\n\ndef id_comment_dict(df):\n col,tokenizer = db_init()\n return {item[0]:sentence2vec(col,tokenizer, item[1]) for item in df.values}\n\n\nepoch = 14\n\ndef add1demosion(array):\n if isinstance(array,np.ndarray):\n array = array.tolist()\n # print(len(array.shape))\n # if len(array.shape) == 2:\n # array = array[np.newaxis,:,:]\n return array\n\ndef wrapper_pridict(net,vec):\n if len(vec) == 0:\n return np.array([[0,0,0,0,0,0]])\n if not isinstance(vec,np.ndarray):\n vec = np.array(vec)\n if len(vec.shape) == 2:\n vec = vec[np.newaxis,:,:]\n return net.predict_on_batch(vec)\n\ndef predict():\n logger.info('start predict')\n model_path = 'model\\\\' +'model_'+ str(epoch)+'_'+'.h5'\n logger.info('local net path is %s', model_path)\n net = load_model(model_path)\n assert net is not None\n logger.info('net loaded')\n df = test_df()\n data = df.values\n col,tokenizer = db_init()\n lines = [Line(item[0],item[1],col,tokenizer) for item in data]\n for line in lines:\n line.predict(net)\n for line in lines:\n line.to_labels()\n new_data = [line.to_data_line() for line in lines]\n new_df = pd.DataFrame(data=new_data,columns=[\"id\",\"comment_text\",\"toxic\",\"severe_toxic\",\"obscene\",\"threat\",\"insult\",\"identity_hate\"])\n return new_df\n\ndef save_csv(df,path='./predict.csv'):\n df.to_csv(path,encoding='utf-8')\n logger.info('predict result saved to %s',path)\n\nif __name__ == '__main__':\n df = predict()\n save_csv(df)\n # df = test_df()\n # col,tokenizer = db_init()\n # texts = np.array(df['comment_text'])\n # print(len(texts))\n # texts_len = [len(sentence2vec(col,tokenizer,text)) for text in texts]\n # print(dict(Counter(texts_len)))\n\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"623464613","text":"from astropy.io import fits\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef load_fits(file):\n hdulist = fits.open(file)\n data = hdulist[0].data\n hdulist.close()\n \n arg_max = np.argmax(data, axis=None)\n max_idx = np.unravel_index(arg_max, data.shape)\n\n return max_idx\n\ndef run_test(file):\n bright = load_fits(file)\n print(bright)\n\n hdulist = fits.open(file)\n data = hdulist[0].data\n hdulist.close()\n\n plt.imshow(data.T, cmap=plt.cm.viridis)\n plt.colorbar()\n plt.show()\n\nif __name__ == '__main__':\n # Test Case 1\n print(\"Test Case #1\")\n run_test('image0.fits')\n\n # Test Case 2\n print(\"Test Case #2\")\n run_test('image1.fits')\n\n # Test Case 3\n print(\"Test Case #3\")\n run_test('image2.fits')\n\n # Test Case 4\n print(\"Test Case #4\")\n run_test('image3.fits')","sub_path":"week1/1b/3_read_a_fits_file/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"588349110","text":"#Updated intrest\n#Austin C 11/20/18\n#Does intrest along with giving a bar graph\n\nfrom graphics import *\n\nimport math\n\ndef main():\n #intro\n print(\"Hello user, this program draws a graph based off your intrest.\")\n\n #gets values for principal and intrest\n #principal = float(input(\"Enter your inital principal: \"))\n #apr = float(input(\"What is the annualized intrest rate: \"))\n \n \n win = GraphWin(\"Future value calculator\", 300, 300)\n Text(Point(75, 150), \" Enter principal: \"). draw(win)\n Text(Point (75, 200), \"Enter interest rate: \").draw(win)\n p = Entry(Point(200, 200), 10).draw(win)\n a = Entry(Point(200, 150), 10).draw(win)\n Text(Point(100, 10), \"Click to exit\").draw(win)\n win.getMouse()\n principal = p.getText()\n principal = eval(principal)\n apr = a.getText()\n apr = eval(apr)\n win.close()\n \n wina = GraphWin(\"Investment growth chart\", 320, 240)\n inputText = Entry(Point(2.25, 3), 5)\n #Draw label \" 0.0k\" at (20, 230)\n Text(Point(20, 230), ' 0.0K').draw(wina)\n #Draw label \" 2.5k\" at (20, 180)\n Text(Point(20, 180), ' 2.5K').draw(wina)\n #Draw label \" 5.0k\" at (20, 130)\n Text(Point(20, 130), ' 5.0K').draw(wina)\n #Draw label \" 7.5k\" at (20, 80)\n Text(Point(20, 80), ' 7.5K').draw(wina)\n #Draw label \"10.0k\" at (20, 30)\n Text(Point(20, 30), '10.0K').draw(wina)\n #Draw a rectangle from (40,230) to (65, 230 - principal * 0.02)\n height = principal * 0.02\n bar = Rectangle (Point(40,230), Point(65, 230-height))\n bar.setFill(\"blue\")\n bar.setWidth(2)\n bar.draw(wina)\n\n for year in range (1, 11):\n principal = principal * (1 + apr)\n x11 = year * 25 + 40\n height = principal * 0.02\n bar = Rectangle(Point(x11, 230), Point(x11 + 25, 230 - height))\n bar.setFill(\"blue\")\n bar.setWidth(2)\n bar.draw(wina)\n\n button = Text(Point(160, 120), \"Click to Quit\")\n button.draw(wina)\n Rectangle(Point(110, 100), Point(210, 140)).draw(wina)\n \n # wait for click and then quit\n wina.getMouse()\n wina.close()\n \nmain()\n","sub_path":"python-1.4/Interestv2.py","file_name":"Interestv2.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"223429123","text":"from flask import Flask, jsonify, request, json\nfrom requests.sessions import session\nfrom validate import BaseValidate\nfrom exception import BaseException\nfrom controller import HPUEams, HPUZhhq, HPUVPN, HPUDorm, HPULib\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return 'Hello HPU_School_Helper_APIs!'\n\n# 获取教务session\n# 接收:ip \n# 返回:session content, shaHeader, aesHeader, captcha\n@app.route('/eams/session', methods=['POST'])\ndef EamsSession():\n BaseValidate.paramsExist(request.args.to_dict(), ['ip'])\n return HPUEams.get_session(request.args['ip'])\n\n# 登录教务 (eams)\n# 接收:ip, username, password, captcha, session, sha1H, aesH\n# 返回:session content\n@app.route('/eams/login', methods=['POST'])\ndef EamsLogin():\n BaseValidate.paramsExist(\n request.args.to_dict(), \n ['ip', 'username', 'password', 'captcha', 'session', 'sha1H', 'aesH']\n )\n args = request.args\n return HPUEams.login_eams(\n args['ip'],\n args['username'],\n args['password'],\n args['captcha'],\n args['session'],\n args['sha1H'],\n args['aesH'] \n )\n\n# 绑定教务 (用户账户绑定、获取课程表、获取成绩)\n# 接收:session (已成功登录教务的) , type\n# 返回:stu_info, stu_timetable, stu_grade\n@app.route('/eams/bind', methods=['POST'])\ndef EamsBind():\n BaseValidate.paramsExist(\n request.args.to_dict(), \n ['session']\n )\n return HPUEams.bind_eams(\n request.args['session'], \n request.args['type']\n )\n\n# 获取空教室查询教学楼\n# 接收:session\n# 返回:data\n@app.route('/eams/freeroom/buildings', methods=['POST'])\ndef EamsFreeroomBuildings():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['session']\n )\n return HPUEams.eams_freeroom_buildings(\n request.args['session']\n )\n\n# 查询空教室\n# 接受: \n# buildingId # buildingID(value); eg:1\n# dateBegin # 开始日期; eg:2020-09-09 \n# dateEnd # 结束日期; eg:2020-09-09\n# section # 0:自动爬取第1至第10小节;1-10:对应小节; eg:0/1-2\n# mode # 0:精简模式(只有教室名称);1:详细模式(结果包括教室信息); eg:0/1\n# ip # 客户端ip; eg:223.90.25.199 \n#返回\n@app.route('/eams/freeroom/search', methods=['POST'])\ndef EamsFreeroomSearch():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['session','buildingId','dateBegin','dateEnd','section','mode', 'ip']\n )\n return HPUEams.eams_freeroom_search(\n request.args['session'],\n request.args['buildingId'],\n request.args['dateBegin'],\n request.args['dateEnd'],\n request.args['section'],\n request.args['mode'],\n request.args['ip']\n )\n\n# 获取校园保修session\n# 接收:ip\n# 返回:session, lt, execution, token, captcha, captchaHtml\n@app.route('/zhhq/session', methods=['POST'])\ndef ZhhqSession():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['ip']\n )\n return HPUZhhq.get_session(\n request.args['ip']\n )\n\n# 登录校园报修\n# 接收:username, password, session, lt, execution, token, captcha, ip\n# 返回:academy, avatar, dorm, nickname, session\n@app.route('/zhhq/login', methods=['POST'])\ndef ZhhqLogin():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['username', 'password', 'session', 'lt', 'execution', 'token', 'captcha', 'ip']\n )\n return HPUZhhq.login_zhhq(\n request.args['session'],\n request.args['username'],\n request.args['password'],\n request.args['captcha'],\n request.args['lt'],\n request.args['execution'],\n request.args['token'],\n request.args['ip'],\n )\n\n# 登录HPU校外访问VPN\n# 接受:jwzh, jwmm, ip\n# 返回:session\n@app.route('/hpuvpn/login', methods=['POST'])\ndef HpuvpnLogin():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['jwzh', 'jwmm', 'ip']\n )\n return HPUVPN.login_hpu_vpn(\n request.args['jwzh'],\n request.args['jwmm'],\n request.args['ip']\n )\n\n# 获取公告列表\n# 接受:session, page, ip\n# 返回:data\n@app.route('/hpuvpn/school/ann', methods=['POST'])\ndef SchoolAnn():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['session', 'page', 'ip']\n )\n return HPUVPN.school_ann(\n request.args['session'],\n request.args['page'],\n request.args['ip']\n )\n\n# 获取公告文件列表\n# 接受:session, annid\n# 返回:data\n@app.route('/hpuvpn/school/ann/list', methods=['POST'])\ndef AnnList():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['session', 'annid']\n )\n return HPUVPN.school_ann_download(\n request.args['session'],\n request.args['annid']\n )\n\n# 获取讲座列表\n# 接受:session, page, ip\n# 返回:data\n@app.route('/hpuvpn/school/lectures', methods=['POST'])\ndef SchoolLectures():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['session', 'page', 'ip']\n )\n return HPUVPN.school_lectures(\n request.args['session'],\n request.args['page'],\n request.args['ip']\n )\n\n# 获取体测验证码\n# 接受:session, ip\n# 返回:captcha\n@app.route('/hpuvpn/tc/code', methods=['POST'])\ndef TcCode():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['session', 'ip']\n )\n return HPUVPN.school_tc_code(\n request.args['session'],\n request.args['ip']\n )\n\n# 获取体测成绩\n# 接受:session, usernum, userpwd, captcha, ip\n# 返回:data\n@app.route('/hpuvpn/tc/grade', methods=['POST'])\ndef TcGrade():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['session', 'usernum', 'userpwd', 'captcha', 'ip']\n )\n return HPUVPN.school_tc_grade(\n request.args['session'],\n request.args['usernum'],\n request.args['userpwd'],\n request.args['captcha'],\n request.args['ip']\n )\n\n# 获取宿舍电费使用情况\n# 接受:lou(可选), ceng(可选), room(可选)\n# 返回:data\n@app.route('/dorm/electricity', methods=['POST'])\ndef DormElectricity():\n args_d = request.args.to_dict()\n return HPUDorm.dorm_elec_search(\n args_d['lou'] if 'lou' in args_d else None,\n args_d['ceng'] if 'ceng' in args_d else None,\n args_d['room'] if 'room' in args_d else None,\n args_d['ip'] if 'ip' in args_d else None\n )\n\n# 获取图书馆session\n# 接收:ip\n# 返回: session\n@app.route('/lib/session', methods=['POST'])\ndef LibSession():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['ip']\n )\n return HPULib.lib_session(\n request.args['ip']\n )\n\n# 登录图书馆\n# 接收: session, username, password, captcha, codeKey, ip\n# 返回: session \n@app.route('/lib/login', methods=['POST'])\ndef LibLogin():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['session', 'username', 'password', 'captcha', 'codeKey', 'ip']\n )\n return HPULib.lib_login(\n request.args['session'],\n request.args['username'],\n request.args['password'],\n request.args['captcha'],\n request.args['codeKey'],\n request.args['ip'],\n )\n\n\n# 获取图书馆借阅信息\n# 接收: session\n# 返回: data\n@app.route('/lib/loanList', methods=['POST'])\ndef LibLoanList():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['session', 'ip']\n )\n return HPULib.lib_loan_list(\n request.args['session'],\n request.args['ip']\n )\n\n\n# 简单搜索馆藏\n# 接收: keyword, page, ip\n# 返回: data\n@app.route('/lib/book/simpleSearch', methods=['POST'])\ndef BookSimpleSearch():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['keyword', 'page', 'ip']\n )\n return HPULib.lib_simple_search(\n request.args['keyword'],\n request.args['page'],\n request.args['ip']\n )\n\n\n# 图书馆图书详情\n# 接收: recordId, ip\n# 返回: data\n@app.route('/lib/book/detail', methods=['GET', 'POST'])\ndef BookDetail():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['recordId', 'ip']\n )\n return HPULib.lib_book_detail(\n request.args['recordId'],\n request.args['ip']\n )\n\n# 图书馆图书封面\n# 接收: recordId, ip\n# 返回: data\n@app.route('/lib/book/cover', methods=['GET', 'POST'])\ndef BookCover():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['recordId', 'ip']\n )\n return HPULib.lib_book_cover(\n request.args['recordId'],\n False,\n request.args['ip']\n )\n\n\n# 图书馆图书封面, 直接返回封面base64\n# 接收: recordId\n# 返回: data\n@app.route('/lib/book/cover/raw/', methods=['GET', 'POST'])\ndef BookCoverRaw(recordId=None):\n if not recordId:\n BaseException.ParameterException('缺少参数: recordId')\n return HPULib.lib_book_cover(\n recordId,\n True\n )\n\n\n# 图书馆图书馆藏情况\n# 接收: recordId, ip\n# 返回: data\n@app.route('/lib/book/collection', methods=['GET', 'POST'])\ndef BookCollection():\n BaseValidate.paramsExist(\n request.args.to_dict(),\n ['recordId', 'ip']\n )\n return HPULib.lib_book_collection(\n request.args['recordId'],\n request.args['ip']\n )\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# 全局错误处理\n@app.errorhandler(Exception)\ndef framework_error(e):\n return BaseException.framework_error(e, app)\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":9882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"585046532","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef file_operations():\n fname=input(\"Enter the filename\\n\")\n print(\"Enter the five data in file with its label\\nIn this format\\n Title:Number example Maths:95\")\n with open(fname,\"w\") as fn: #fn=open(fname,\"w+\")\n for i in range(0,5):\n x=input()\n fn.write(str(x)+'\\n')\n label=[]\n title=[]\n with open(fname,\"r+\") as fr:\n\n for line in fr:\n print(line)\n loc=line.rfind(':')\n t=line[0:loc]\n title.append(str(t))\n l=line[loc+1:]\n label.append(int(l))\n \n\n fr.close()\n return label,title\n\n# loc=str_new.rfind(\":\")\ndef pie_graph(label,title):\n objects = title\n y_pos = np.arange(len(objects))\n x_pos = np.arange(len(label))\n performance = label\n colors=['blue','cyan','indigo','red','orange']\n\n plt.bar(y_pos, performance, align='center',color=colors)\n plt.xticks(y_pos)\n plt.yticks(performance)\n plt.xlabel('Items')\n plt.ylabel('Sales')\n plt.title('Stationary Product Sales Data')\n\n plt.show()\n\ndef main():\n label,title=file_operations()\n pie_graph(label,title)\n\n\nmain()\n","sub_path":"graph_file.py","file_name":"graph_file.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"39000214","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Author: ying.han@pku.edu.cn\n\n# import pickle\n# from ipdb import set_trace\nimport re\n\nkb_file = '/home/zhangzhipeng/semanticComputing/NLPCC2016QA-Update/knowledge/nlpcc-iccpol-2016.kbqa.kb'\npredicateSet = set()\nentitySet = set()\nwith open(kb_file, 'r') as kf, open('sp_dict', 'w') as md:\n reg = re.compile(r'\\(.*\\)|\\(.*\\)')\n for line in kf:\n line = line.strip()\n wordlist = line.split(' ||| ')\n if len(wordlist) == 3:\n e = reg.sub('', wordlist[0].strip())\n if e.startswith('《') and e.endswith('》'):\n e = e[3:len(e)-3]\n p = reg.sub('', wordlist[1].strip())\n if p not in predicateSet:\n predicateSet.add(p)\n if e not in entitySet:\n entitySet.add(e)\n '''\n wordlist = line.split(' ||| ')[1].split('\\t')\n for word in wordlist:\n simple_word = reg.sub('', word)\n if simple_word not in wordSet:\n wordSet.add(simple_word)\n '''\n for e in entitySet:\n md.write('%s 10000 enity\\n' % e)\n for p in predicateSet:\n md.write('%s 10000 predicate\\n' % p)\n","sub_path":"sckr/add_token_dic.py","file_name":"add_token_dic.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"160410021","text":"\"\"\"\nLoad PSPNet model using ros node\n\"\"\"\n\nimport os\nimport rospy\nimport sensor_msgs\nfrom sensor_msgs.msg import Image as sensor_img\nfrom std_msgs.msg import Int16\nfrom cv_bridge import CvBridge\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \nfrom scipy import misc\nimport matplotlib.pyplot as plt\nimport sys\nsys.path.append('/home/esraa/catkin_ws/src/DAT/dat_dyn/scripts/pspnet_helpers')\nfrom model import PSPNet101, PSPNet50 \nfrom tools import * \nimport PIL\nimport io\nfrom io import StringIO\nimport string\nimport pandas as pd\nfrom pandas.compat import StringIO\nfrom collections import Counter\nimport message_filters\nfrom datetime import datetime\nfrom sensor_msgs.msg import PointCloud2, PointField\nimport sensor_msgs.point_cloud2 as pc2\n\nfrom dat_dyn.msg import Centroid \n\nfrom dat_dyn.srv import *\n\n\n\nparam = {'crop_size': [473, 473],\n 'num_classes': 150, \n 'model': PSPNet50,\n 'weights_path': './pspnet_helpers/model/pspnet50-ade20k/model.ckpt-0'} \nIMAGE_MEAN = np.array((103.939, 116.779, 123.68), dtype=np.float32) \n\ndef get_net_output(img):\n \n h = 640\n w = 480\n img = tf.convert_to_tensor(img, dtype=tf.float32)\n \n \n # Create network.\n PSPNet = param['model']\n net = PSPNet({'data': img}, is_training=False, num_classes=param['num_classes'])\n\n ##Predict segmentation output\n raw_output = net.layers['conv6']\n\n # Predictions.\n raw_output_up = tf.image.resize_bilinear(raw_output, size=[h, w], align_corners=True)\n raw_output_up = tf.image.crop_to_bounding_box(raw_output_up, 0, 0, h, w)\n raw_output_up = tf.argmax(raw_output_up, dimension=3)\n pred,indices = decode_labels(raw_output_up, [h,w], param['num_classes'])\n return pred,indices\n\n\n\n\nclass PspNet():\n def __init__(self):\n self.bridge = CvBridge()\n self.input_img = tf.placeholder(tf.float32,[1,None,None,3],name=\"input_img\")\n self.pred,self.indices = get_net_output(self.input_img)\n\n self._saver = tf.train.Saver()\n self._session = tf.InteractiveSession()\n \n init_op = tf.global_variables_initializer()\n self._session.run(init_op)\n\n self._saver.restore(self._session, param['weights_path'])\n\n ## seg_pub publishes the segmentation results, the msg is a segmented image to be visualized by rviz\n self.seg_pub = rospy.Publisher('/result', sensor_img, queue_size=10)\n\n\n ## message filter to sync the rgb , depth msgs and point cloud \n self._sub = message_filters.Subscriber('/camera/rgb/image_color',sensor_img) \n self._depth_sub = message_filters.Subscriber('/camera/depth/image',sensor_img)\n self._cloud_sub = message_filters.Subscriber('/camera/depth_registered/points',PointCloud2)\n self.ts = message_filters.ApproximateTimeSynchronizer([self._sub,self._depth_sub,self._cloud_sub],queue_size=10,slop = 0.1,allow_headerless=True)\n self.ts.registerCallback(self.callback)\n\n \n\n\n # creates a centroid publisher that publish Cx,Cy in cm \n self.centroidseg_pub = rospy.Publisher('/centroid',Centroid,queue_size = 10)\n self.centroid_msg = Centroid()\n\n\n #creates a BBox server, that publish Cx,Cy,target_w,target_h in pixels\n self.count = 0\n self.status = 0\n self.cx = 0\n self.cy = 0\n self.target_h = 0\n self.target_w = 0\n \n '''\n def handle_bbox_req(self,req):\n\n return BboxResponse(self.cx,self.cy,self.target_w,self.target_h) \n '''\n\n def ros_img2np(self,ros_img):\n # convert ros_img to cv_img\n \n cv_image = self.bridge.imgmsg_to_cv2(ros_img, \"bgr8\")\n (rows,cols,channels) = cv_image.shape\n h = max(param['crop_size'][0],rows)\n w = max(param['crop_size'][1], cols)\n # pre-proecess image\n # Extract mean.\n img_np = np.asarray( cv_image, dtype=\"float32\" )\n img_np -= IMG_MEAN\n img_np = np.resize(img_np,(h,w,3))\n img_np = np.expand_dims(img_np,axis=0)\n return img_np,h,w\n\n\n\n\n def callback(self, rgb_img , depth_img,cloud):\n frame_nu = rospy.get_param('frame_num')\n rospy.set_param('frame_num',int(frame_nu+1))\n img,h,w = self.ros_img2np(rgb_img)\n preds,inds= self._session.run([self.pred,self.indices], feed_dict={self.input_img:img})\n arr = np.asarray( preds[0],dtype= \"uint8\" )\n \n self.seg_pub.publish(self.bridge.cv2_to_imgmsg(arr, \"bgr8\"))\n\n if inds.shape[0] == 0:\n print(\"no humans in the environment\")\n else :\n self.count +=1\n print(self.count)\n tmp = ((np.sum(inds, axis=0)/inds.shape[0]))\n \n pose_y_pixels = tmp[1]\n pose_x_pixels = tmp[2]\n\n if self.count == 30:\n rospy.set_param('Cx',int(pose_x_pixels))\n rospy.set_param('Cy',int(pose_y_pixels))\n rospy.set_param('target_h',400)\n rospy.set_param('target_w',100)\n rospy.set_param('status',True)\n tf.reset_default_graph()\n rospy.signal_shutdown('Shutting down PSPNet Model')\n \n '''\n self.cx = pose_x_pixels\n self.cy = pose_y_pixels\n self.target_w = 100\n self.target_h = 400\n rospy.sleep(5.)\n \n s = rospy.Service('bbox', Bbox, self.handle_bbox_req)\n tf.reset_default_graph()\n rospy.signal_shutdown('Shutting down PSPNet Model')\n '''\n \n ## Centroid msg \n a = 0.00173667\n self.centroid_msg.centroid_z = 0.1\n self.centroid_msg.centroid_x = (pose_x_pixels-320)*a*self.centroid_msg.centroid_z*100\n self.centroid_msg.centroid_y = (pose_y_pixels-240)*a*self.centroid_msg.centroid_z*100\n self.centroidseg_pub.publish(self.centroid_msg)\n ## end of centroid msg \n\n \n\n def main(self):\n rospy.spin()\n\nif __name__ == '__main__':\n rospy.init_node('rospspnet')\n tensor = PspNet()\n tensor.main()\n","sub_path":"dat_dyn/scripts/load_pspnet.py","file_name":"load_pspnet.py","file_ext":"py","file_size_in_byte":6208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"509460945","text":"import tensorflow as tf\n#import input_data\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\nx = tf.placeholder(dtype=tf.float32, shape=[None, 784], name=\"x_label\")\n\nW = tf.Variable(tf.zeros([784, 10]), dtype=tf.float32, name=\"W\")\nb = tf.Variable(tf.zeros([10]), dtype=tf.float32, name=\"b\")\n\nwith tf.name_scope(\"Wx_b\") as scope:\n\ty = tf.nn.softmax(tf.matmul(x, W) + b)\n\nw_h = tf.histogram_summary(\"weights\", W)\nb_h = tf.histogram_summary(\"biases\", b)\n\ny_ = tf.placeholder(dtype=tf.float32, shape=[None, 10], name=\"y_label\")\n\nwith tf.name_scope(\"cost_entropy\") as scope:\n\tcross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) # + (1 - y_) * tf.log(1-y)\n\ttf.scalar_summary(\"cost\", cross_entropy)\n\nwith tf.name_scope(\"train\") as scope:\n\ttrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\ncorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n#merged_summary_op = tf.merge_all_summaries()\n\nwith tf.Session() as sess:\n\tsess.run(tf.initialize_all_variables())\n\twriter = tf.train.SummaryWriter('./logs/dalsilog', sess.graph)\n\tmerged = tf.merge_all_summaries()\n\n\tfor i in range(1000):\n\t\tbatch_xs, batch_ys = mnist.train.next_batch(100)\n\t\tsess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n\n\t\tsummary = sess.run(merged, feed_dict={x: batch_xs, y_: batch_ys})\n\t\twriter.add_summary(summary, i)\n\n\tprint(sess.run(accuracy, feed_dict={x: mnist.train.images, y_: mnist.train.labels}))\n\tprint(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))\n","sub_path":"log_reg_MNIST_softmax_tensorboard.py","file_name":"log_reg_MNIST_softmax_tensorboard.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"447740004","text":"# -*- coding: latin-1 -*-\n# geração de variáveis aleatórias 1D\nimport numpy as np\nimport numpy.random as rd\nimport matplotlib.pyplot as plt\nx=rd.wald(1,3,int(1e5)) #1e5 pts aleatórios (mu=1,lambda=3)\n#fazer histograma + plot da distribuição\nhx,b=np.histogram(x,np.linspace(0,5,201),density=True)\n#b->array de 201 entradas (fronteiras de quantificação)\n#obter valores de quantificação (valor a meio dos intervalos)\nb=(b[:-1]+b[1:])/2.0\nt=np.linspace(0+1e-6,5,1000)\nfx=np.sqrt(3/(2*np.pi*t**3))*np.exp(-3*(t-1)**2/(2*t))\nplt.figure(figsize=(8,4))\nplt.bar(b,hx,width=0.025,color=[0.9,.9,.9])\nplt.plot(t,fx,'r',linewidth=2)\nplt.axis([0,5,0,1.1])\nplt.show()\n#plt.savefig('../figs/L1AAex001.png',\\\n#bbox_inches='tight',transparent=True) \n","sub_path":"Labs/Lab1/L1AAex001.py","file_name":"L1AAex001.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"260991203","text":"# coding=utf-8\nimport etcd\nimport gevent\nimport logging\nfrom gevent.hub import Waiter\n\nfrom .service import Service\nfrom octopus import err\nfrom octopus import constant\nfrom octopus.util.stoppable import Stoppable\nfrom octopus.proto import service_proto\n\nlog = logging.getLogger(constant.LOGGER_NAME)\n\n\nclass OctpClient(Stoppable):\n\n def __init__(self, etcd_options, service_names):\n \"\"\"\n\n :param etcd_options: etcd配置选项\n :param service_names: 需要监听的所有service的名称\n :type etcd_options: dict\n :type service_names: list, tuple\n :return:\n \"\"\"\n\n super(OctpClient, self).__init__()\n\n self.service_names = service_names\n self.service_dict = { # 所有service_name 对应的service list\n service_name: []\n for service_name in self.service_names\n }\n \"\"\":type: dict[str, list[Service]]\"\"\"\n\n self._diabled_service_list = [] # all disabled service\n \"\"\":type: list[Service]\"\"\"\n\n self._etcd_options = etcd_options\n self._ec = etcd.Client(**self._etcd_options)\n \"\"\":type: etcd.Client\"\"\"\n\n self._watcher_dict = {\n service_name: gevent.spawn(self._watcher_handler, service_name)\n for service_name in self.service_names\n }\n \"\"\":type: dict[str, Greenlet]\"\"\"\n self._waiter_dict = {\n service_name: set()\n for service_name in self.service_names\n }\n \"\"\":type: dict[str, set[Waiter]]\"\"\"\n self._watcher_starter_coroutine = None\n \"\"\":type: Greenlet\"\"\"\n\n\n def _start_handler(self):\n self._watcher_starter_coroutine = gevent.spawn(self._start_watcher)\n\n self._get_initialize_service() # 获取当前的service列表\n log.info('OctpClient(%s) started.', self.service_names)\n\n def _stop_handler(self):\n gevent.joinall([self._watcher_starter_coroutine,])\n log.info('OctpClient(%s) stopped.', self.service_names)\n\n def _restart_handler(self):\n pass\n\n def _get_initialize_service(self):\n for service_name in self.service_names:\n try:\n result = service_proto.get(self._ec, service_name)\n except err.OctpServiceNotFoundError:\n log.warn('Now, NO node for service(%s).', service_name)\n continue\n\n if not result._children:\n log.warn('Now, NO any server for service(%s).', service_name)\n else:\n for service_node in result.leaves:\n self._add_service(service_name, service_node)\n\n #### service ####\n def disable_service(self, service):\n \"\"\"\n\n :param service:\n :type service: Service\n :return:\n \"\"\"\n\n service_list = self._get_service_list(service.service_name)\n service_list.remove(service)\n self._diabled_service_list.append(service)\n\n def _get_service_list(self, service_name):\n \"\"\"\n Get service_list for the service_name.\n :param service_name:\n :type service_name: str\n :return:\n :rtype: list\n \"\"\"\n service_list = self.service_dict[service_name]\n\n return service_list\n\n #### waiter ####\n def add_waiter(self, service_name, waiter):\n \"\"\"\n 向指定service_name下增加一个waiter, 以等待该service_name下有新的事件发生\n :param service_name:\n :param waiter: Waiter\n :type service_name: str\n :type waiter: Waiter\n :return:\n \"\"\"\n\n self._waiter_dict[service_name].add(waiter)\n\n def del_waiter(self, service_name, waiter):\n \"\"\"\n 移除指定service_name下的一个waiter\n :param service_name:\n :param waiter:\n :type service_name: str\n :type waiter: Waiter\n :return:\n \"\"\"\n\n self._waiter_dict[service_name].discard(waiter)\n\n def _notify_waiter(self, service_name, action):\n \"\"\"\n 通知指定service_name下的所有waiter\n :param service_name:\n :param action: 当前变更动作,定义与constatn.SERVICE_ACTION\n :type service_name: str\n :type action: str\n :return:\n \"\"\"\n\n for waiter in self._waiter_dict[service_name]:\n gevent.get_hub().loop.run_callback(lambda: waiter.switch(action))\n\n #### 监听service的改动 ####\n def _start_watcher(self):\n gevent.joinall(self._watcher_dict.values())\n\n def _watcher_handler(self, service_name):\n while not self._stop:\n try:\n result = service_proto.watch(self._ec, service_name, timeout=10)\n self._deal_watch_result(service_name, result)\n except etcd.EtcdWatchTimedOut:\n log.debug('service watch timeout.')\n continue\n\n def _deal_watch_result(self, service_name, result):\n \"\"\"\n\n :param result: watch 返回的EtcdResult对象\n :type result: etcd.EtcdResult\n :return:\n \"\"\"\n\n log.debug('service change: %s', result)\n action = constant.SERVICE_ACTION.NONE\n\n if result.action in ('create',):\n self._add_service(service_name, result)\n action = constant.SERVICE_ACTION.ADD\n elif result.action in ('delete', 'expire', 'compareAndDelete'):\n self._del_service(service_name, result)\n action = constant.SERVICE_ACTION.DEL\n elif result.action in ('set', 'compareAndSwap', 'update',):\n self._update_service(service_name, result)\n action = constant.SERVICE_ACTION.UPDATE\n else:\n raise err.OctpServiceInvalidState('Encounter invalid action: %s', result.action)\n\n self._notify_waiter(service_name, action)\n\n log.debug('Now, service_dict: %s', self.service_dict)\n\n def _add_service(self, service_name, result):\n \"\"\"\n\n :param service_name:\n :param result:\n :type service_name: str\n :type result: etcd.EtcdResult\n :return:\n \"\"\"\n\n service_list = self._get_service_list(service_name)\n try:\n new_service = Service(service_name, result.key, result.value)\n except err.OctpServiceInfoError as e:\n # ignore invalid service_info\n log.warn(e)\n return\n\n service_list.append(new_service)\n log.info('Add service (%s : %s)', service_name, new_service)\n\n def _del_service(self, service_name, result):\n \"\"\"\n\n :param service_name:\n :param result:\n :type service_name: str\n :type result: etcd.EtcdResult\n :return:\n \"\"\"\n\n service_list = self._get_service_list(service_name)\n\n for index, old_service in enumerate(service_list):\n if old_service.name == result.key:\n break\n else:\n log.debug('service(%s) NOT find. maybe delete already.')\n return\n\n del service_list[index]\n log.info('Del service (%s : %s)', service_name, old_service)\n\n def _update_service(self, service_name, result):\n \"\"\"\n\n :param service_name:\n :param result:\n :type service_name: str\n :type result: etcd.EtcdResult\n :return:\n \"\"\"\n\n service_list = self._get_service_list(service_name)\n\n for index, old_service in enumerate(service_list):\n if old_service.name == result.key:\n break\n else:\n self._add_service(service_name, result)\n return\n\n try:\n new_service = Service(service_name, result.key, result.value)\n except err.OctpServiceInfoError as e:\n # ignore invalid service_info\n log.warn(e)\n return\n else:\n service_list[index] = new_service\n log.info('Update service (%s : %s -> %s)', service_name, old_service, new_service)\n","sub_path":"service/octp_client.py","file_name":"octp_client.py","file_ext":"py","file_size_in_byte":7926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"96854265","text":"## @file date_adt.py\n# @author Bruce He\n# @brief ADT for DateT\n# @date 2020/01/14\n\nfrom datetime import *\n\n\n# @brief create ADT for date related calculation\nclass DateT:\n\n ## @brief DateT constructor\n # @details initialize DateT object with integers d, m, y as input\n # If any invalid input of dates is given, a ValueError will shown\n # @param datetime represents current date in datetime type\n # @param d correspond to day\n # @param m correspond to month\n # @param y correspond to year\n # @exception ValueError throws when inputs are not valid for datetime\n def __init__(self, d, m, y):\n try:\n self.__datetime = datetime(y, m, d)\n except:\n raise ValueError(\"Please input valid datetime.\")\n self.__d = d\n self.__m = m\n self.__y = y\n\n ## @brief shows the value of current day\n # @return the value of current day\n def day(self):\n return self.__d\n\n ## @brief shows the value of current month\n # @return the value of current month\n def month(self):\n return self.__m\n\n ## @brief shows the value of current year\n # @return the value of current year\n def year(self):\n return self.__y\n\n ## @brief shows the day after current date\n # @detail First create a datetime object 'add1date' by adding self.__datetime and timedelta(1),\n # which represents 1 day. Then extract the values of new day, month and year by using Class Attributes.\n # Finally, return a DateT object with the new date.\n # @param add1date adding one day in current date\n # @return DateT object that is one day later than current date\n def next(self):\n add1date = self.__datetime + timedelta(1)\n self.__d = add1date.day\n self.__m = add1date.month\n self.__y = add1date.year\n\n return DateT(self.__d, self.__m, self.__y)\n\n ## @brief shows the day before current date\n # @detail With a similar to the next() method, prev() method instead return one day before current date.\n # @param minus1date showing one day earlier than current date\n # @return DateT object that is one day earlier than current date\n def prev(self):\n minus1date = self.__datetime + timedelta(-1)\n self.__d = minus1date.day\n self.__m = minus1date.month\n self.__y = minus1date.year\n\n return DateT(self.__d, self.__m, self.__y)\n\n ## @brief determine if current date is before the target date d\n # @detail Transfer target date d as datetime type. Then, use diff to store the difference in days between\n # current date and target date, in value of days.Then, convert diff from timedelta to integer.\n # If diff is smaller than 0, current date is before target date, so return True. Return false otherwise.\n # @param d target date for comparison\n # @param temp_date represent target date\n # @param diff value of difference between current date to target date, measured in days.\n # @return True if current date is before target date, False otherwise.\n def before(self, d):\n temp_date = datetime(d.year(), d.month(), d.day()) # Use day() to get value of day, instead of d.day\n diff = self.__datetime - temp_date\n diff = diff.days\n if diff < 0:\n return True\n else:\n return False\n\n ## @brief determine if current date is after the target date d\n # @detail Similar process as method before(self, d). This time, return true if diff is greater than 0, which means\n # current date is after target date. Return False otherwise.\n # @param d target date for comparison\n # @param temp_date represent target date\n # @param diff value of difference between current date to target date, measured in days.\n # @return True if current date is after target date, False otherwise.\n def after(self, d):\n temp_date = datetime(d.year(), d.month(), d.day())\n diff = self.__datetime - temp_date\n diff = diff.days\n if diff > 0:\n return True\n else:\n return False\n\n ## @brief determine if current date is equal to the target date d\n # @detail Similar process as method after(self, d) and before (self, d). This time, return true if diff is 0, which\n # means current is the same as target date. Return False otherwise.\n # @param d target date for comparison\n # @param temp_date represent target date\n # @param diff value of difference between current date to target date, measured in days.\n # @return True if current date is the same as target date, False otherwise.\n def equal(self, d):\n temp_date = datetime(d.year(), d.month(), d.day())\n diff = self.__datetime - temp_date\n diff = diff.days\n if diff == 0:\n return True\n else:\n return False\n\n ## @brief take integer n, return DateT that is n days later than current date\n # @detail Create a new datetime object new_date by adding current datetime with n days, in timedelta format.\n # I assume that n can either be positive or negative. If n is positive, that means we want a new date that\n # is n days after the current date. If n is negative, that means we want a new date that is n days earlier\n # than current date. If n is zero, new date is the same as current date.\n # Expect input n as integer with reasonable value.\n # @param n days be added on current date\n # @param new_date represents date to be shown, after calculation\n # @return DateT object, with n days added or subtracted to the current date\n def add_days(self, n):\n new_date = self.__datetime + timedelta(n)\n return DateT(new_date.day, new_date.month, new_date.year)\n\n ## @brief take DateT object d, return the number of days between current date and date d\n # @detail I assume that the number of difference in days, between two dates, is always non-negative.\n # So no matter current date is before or after the date stored in d, the returning value is non-negative.\n # First, transfer d from DateT object to datetime type, in new_date.\n # Then, subtracting one date to another date, and change result from timedelta object to integer.\n # Return the integer that represents the day difference between current date and date d.\n # @param d DateT object used to compare with current date\n # @param new_date represents date stored in DateT object d\n # @return the number of days between current date and date stored in d\n def days_between(self, d):\n new_date = datetime(d.year(), d.month(), d.day())\n if self.__datetime >= new_date:\n return (self.__datetime - new_date).days\n else:\n return (new_date - self.__datetime).days\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"A1/partner/date_adt.py","file_name":"date_adt.py","file_ext":"py","file_size_in_byte":6915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"225746203","text":"import praw\r\nfrom praw.models import MoreComments\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\n\r\nreddit = praw.Reddit(\r\n client_id = \"0CImLpjwo5_t9g\",\r\n client_secret = \"_UxJ95DWRlpseQfFjknMpf7Lrhw\",\r\n user_agent = \"reddit_script by /u/QuanticSaber\"\r\n)\r\n\r\nif reddit.read_only:\r\n print(\"Contact established\")\r\nelse:\r\n print(\"Contact FAILED\")\r\n\r\n\r\n\r\naw_subs = list(reddit.subreddit(\"aww\").hot(limit=22))\r\naw_comments = list()\r\n# the first two posts aren't related to r/aww\r\ndel aw_subs[0] # unrelated megathread\r\ndel aw_subs[1] # unrelated megathread\r\nfor post in aw_subs:\r\n for comment in post.comments.list():\r\n if not isinstance(comment, MoreComments):\r\n aw_comments.append(comment.body)\r\n\r\n\r\nsci_subs = list(reddit.subreddit(\"science\").hot(limit=20))\r\nsci_comments = list()\r\n\r\nfor post in sci_subs:\r\n for comment in post.comments.list():\r\n if not isinstance(comment, MoreComments):\r\n sci_comments.append(comment.body)\r\n\r\n# 0 is aww comment\r\n# 1 is science comment\r\ncorpus = aw_comments + sci_comments\r\ny_train = y_train = [0] * len(aw_comments) + [1] * len(sci_comments)\r\n\r\n\r\nvectorizer = CountVectorizer()\r\nvectorizer.fit(corpus)\r\nx_train = vectorizer.transform(corpus)\r\n\r\nclassifier = MultinomialNB()\r\nclassifier.fit(x_train, y_train)\r\n\r\n\r\nprint('Enter text to determine if it should belong to r/aww or r/science')\r\ntemp = input() \r\nx_test = vectorizer.transform(list(temp)) \r\nprint(classifier.predict(x_test))\r\n ","sub_path":"comment_detector.py","file_name":"comment_detector.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"262903871","text":"import requests\nimport json\n\nclass SimpleCrawler:\n def crawl(self, params=None):\n url = \"https://www.zhihu.com/api/v4/columns/pythoneer/followers\"\n params = {\n \"limit\": 20,\n \"offset\": 0,\n \"include\": \"data[*].follower_count, gender, is_followed, is_following\"\n }\n headers = {\n \"authority\": \"www.zhihu.com\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) \"\n \"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36\",\n }\n response = requests.get(url, headers=headers, params=params)\n # print(\"请求URL:\", response.url)\n # print(\"返回数据:\", response.text)\n # print(response.json())\n for follower in response.json().get(\"data\"):\n print(follower)\n with open('./followers.json', \"w\") as f:\n json.dump(response.json().get(\"data\"), f, ensure_ascii=False)\n print(\"加载文件完成...\")\n \nif __name__ == '__main__':\n SimpleCrawler().crawl()","sub_path":"python/gzh/zhua/zhihu.py","file_name":"zhihu.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"346831246","text":"import pygame\nfrom random import random, randint\n\n# Game classes\nfrom utils import *\nfrom object import Object\nfrom player import Player\nfrom solid import Solid\n\nwin = pygame.display.set_mode((600, 400))\npygame.display.set_caption(\"Nowa Gra\")\n\nWIDTH, HEIGHT = pygame.display.get_surface().get_size()\n\ncameraPos = V2(0, 0)\n\ndef draw(*args):\n for object in args:\n shape = (object.pos.x-cameraPos.x, object.pos.y-cameraPos.y, object.size.x, object.size.y)\n shape = tuple(shape)\n object.draw(win, shape)\n\nclock = pygame.time.Clock()\n\nframerate = 120\n\nplayer = Player(V2(100, 0), V2(26*2, 24*3), None)\n# player.color = (255, 255, 255)\nplayer.image = pygame.transform.scale(pygame.image.load(\"images/witch.png\"), (26*3, 24*3))\nplayer.velocity.x = 170\nplayer.alive = True\n\ntopSolid = []\nbottomSolid = []\nfor i in range(4):\n y = 0-randint(0, HEIGHT//2)\n topSolid.append(Solid(V2(WIDTH+230*i, y), V2(30, 396/2)))\n topSolid[-1].image = pygame.transform.scale(pygame.image.load(\"images/obstacleTop.png\"), (30, 396//2))\n bottomSolid.append(Solid(V2(WIDTH+230*i, y+HEIGHT), V2(30, 396/2)))\n bottomSolid[-1].image = pygame.transform.scale(pygame.image.load(\"images/obstacleBottom.png\"), (30, 396//2))\n\non = True\nwhile on:\n delta = clock.tick(framerate)/1000.0 # seconds after last tick\n\n # pygame.display.set_caption(str(int(1/delta))+\" FPS\")\n \n pygame.display.set_caption(\"Flappy Witch\")\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n on = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE and player.alive:\n player.velocity.y = -240\n \n # Moving\n player.velocity.y += 800 * delta\n \n movement = 0\n \n if player.alive:\n normal = player.move(delta, topSolid, bottomSolid)\n if normal:\n player.alive = False\n player.velocity.x = 0\n player.velocity.y = -350\n else:\n player.move(delta)\n \n # Display\n cameraPos.x = player.pos.x - 100\n \n win.fill((45, 65, 96))\n \n for solid in topSolid + bottomSolid:\n draw(solid)\n \n newTop = topSolid[:]\n newBottom = bottomSolid[:]\n for solid in topSolid:\n if cameraPos.x > solid.pos.x + solid.size.x:\n newTop.pop(0)\n newBottom.pop(0)\n \n y = 0-randint(0, HEIGHT//2)\n newTop.append(Solid(V2(WIDTH+230+cameraPos.x, y), V2(30, HEIGHT//2)))\n topSolid[-1].image = pygame.transform.scale(pygame.image.load(\"images/obstacleTop.png\"), (30, 396//2))\n newBottom.append(Solid(V2(WIDTH+230+cameraPos.x, y+HEIGHT), V2(30, HEIGHT//2)))\n bottomSolid[-1].image = pygame.transform.scale(pygame.image.load(\"images/obstacleBottom.png\"), (30, 396//2))\n \n topSolid = newTop\n bottomSolid = newBottom\n del newTop, newBottom\n \n draw(player)\n \n pygame.display.update()\n\npygame.quit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"602525523","text":"#!/usr/bin/env python\n\"\"\"Guide monitor\n\nHistory:\n2010-09-27 ROwen Initial version.\n2010-09-28 ROwen Modified to use new showY method, e.g. to always show the 1\" line.\n2010-09-30 ROwen Fixed FWHM units: they are pixels, not arcsec. Thanks to Joe F.\n2010-10-01 ROwen Modified to use TUI.Base.StripChartWdg.\n Turned off frame on legend.\n2010-10-18 ROwen Changed timespan to 1 hour (at Russet's request).\n Combined guide and seeing monitor (since they now have the same timespan).\n Changed guide star brightness chart to only include star data of type \"g\"\n (guider-reported star data); formerly it also showed \"c\" (manually centroided stars).\n2010-12-10 ROwen Reduced the memory leak by increasing updateInterval from its default value of 1.8 sec\n to 20 seconds. Return to the default value again once the matplotlib bug is fixed.\n2010-12-23 ROwen Modified to use new version of StripChartWdg.\n2014-06-06 ROwen Fix a traceback caused by unknown tccModel.guideOff.\n\"\"\"\nimport math\nimport Tkinter\nimport matplotlib\nimport RO.Wdg\nimport TUI.Base.StripChartWdg\nimport TUI.Guide.GuideModel\nimport TUI.TCC.TCCModel\n\nWindowName = \"Guide.Guide Monitor\"\n\ndef addWindow(tlSet):\n \"\"\"Create the window for TUI.\n \"\"\"\n tlSet.createToplevel(\n name = WindowName,\n defGeom = \"+434+22\",\n visible = False,\n resizable = True,\n wdgFunc = GuideMonitorWdg,\n )\n\nclass GuideMonitorWdg(Tkinter.Frame):\n \"\"\"Monitor guide star FWHM, focus and guide corrections.\n \"\"\"\n def __init__(self, master, timeRange=3600, width=10, height=6):\n \"\"\"Create a GuideMonitorWdg\n \n Inputs:\n - master: parent Tk widget\n - timeRange: range of time displayed (seconds)\n - width: width of plot (inches)\n - hiehgt: height of plot (inches)\n \"\"\"\n Tkinter.Frame.__init__(self, master)\n \n self.tccModel = TUI.TCC.TCCModel.getModel()\n \n self.stripChartWdg = TUI.Base.StripChartWdg.StripChartWdg(\n master = self,\n timeRange = timeRange,\n updateInterval = 20,\n numSubplots = 4,\n width = width,\n height = height,\n cnvTimeFunc = TUI.Base.StripChartWdg.TimeConverter(useUTC=True),\n )\n self.stripChartWdg.grid(row=0, column=0, sticky=\"nwes\")\n self.grid_rowconfigure(0, weight=1)\n self.grid_columnconfigure(0, weight=1)\n\n # the default ticks are not nice, so be explicit\n self.stripChartWdg.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=range(0, 60, 15)))\n\n spInd = 0\n \n # FWHM\n self.stripChartWdg.subplotArr[spInd].yaxis.set_label_text(\"FWHM (pix)\")\n self.fwhmLine = self.stripChartWdg.addLine(label=\"FWHM\", subplotInd=spInd, color=\"green\")\n self.stripChartWdg.addConstantLine(1.0, subplotInd=spInd, color=\"purple\")\n self.stripChartWdg.showY(0, 1.2, subplotInd=spInd)\n spInd += 1\n \n self.guideModelDict = {} # guide camera name: guide model\n for guideModel in TUI.Guide.GuideModel.modelIter():\n gcamName = guideModel.gcamName\n if gcamName.endswith(\"focus\"):\n continue\n self.guideModelDict[guideModel.gcamName] = guideModel\n guideModel.star.addCallback(self._updStar, callNow=False)\n \n # Brightness\n self.stripChartWdg.subplotArr[spInd].yaxis.set_label_text(\"Bright (ADU)\")\n self.brightnessLine = self.stripChartWdg.addLine(label=\"Brightness\", subplotInd=spInd, color=\"green\")\n self.stripChartWdg.showY(0, 100, subplotInd=spInd)\n spInd += 1\n\n # Focus\n self.stripChartWdg.subplotArr[spInd].yaxis.set_label_text(\"Focus (um)\")\n self.stripChartWdg.plotKeyVar(label=\"Sec Piston\", subplotInd=spInd, keyVar=self.tccModel.secOrient, color=\"green\")\n self.stripChartWdg.plotKeyVar(label=\"User Focus\", subplotInd=spInd, keyVar=self.tccModel.secFocus, color=\"blue\")\n self.stripChartWdg.showY(0, subplotInd=spInd)\n self.stripChartWdg.subplotArr[spInd].legend(loc=3, frameon=False)\n spInd += 1\n\n # Guide correction\n self.stripChartWdg.subplotArr[spInd].yaxis.set_label_text(\"Guide Off (\\\")\")\n self.azOffLine = self.stripChartWdg.addLine(label=\"Az (on sky)\", subplotInd=spInd, color=\"green\")\n self.altOffLine = self.stripChartWdg.addLine(label=\"Alt\", subplotInd=spInd, color=\"blue\")\n self.stripChartWdg.showY(-3.0, 3.0, subplotInd=spInd)\n self.stripChartWdg.subplotArr[spInd].legend(loc=3, frameon=False)\n spInd += 1\n\n self.tccModel.guideOff.addCallback(self._updGuideOff, callNow=False)\n \n def _updGuideOff(self, *args, **kargs):\n \"\"\"Updated actual guide offset in az, alt (\")\n \"\"\"\n if not self.tccModel.guideOff.isCurrent():\n return\n if not self.tccModel.guideOff.isGenuine():\n return\n\n guideOffPVTList = self.tccModel.guideOff.get()[0]\n if None in guideOffPVTList:\n return\n guideOffArcSecList = [pvt.getPos() * RO.PhysConst.ArcSecPerDeg for pvt in guideOffPVTList]\n currAlt = self.tccModel.axePos.getInd(1)[0]\n if currAlt is None:\n return\n azOffsetOnSky = guideOffArcSecList[0] * math.cos(currAlt * RO.PhysConst.RadPerDeg)\n \n self.azOffLine.addPoint(azOffsetOnSky)\n self.altOffLine.addPoint(guideOffArcSecList[1])\n \n def _updStar(self, valList, isCurrent=True, keyVar=None):\n \"\"\"Updated star data\n\n The fields are as follows, where lengths and positions are in binned pixels\n and intensities are in ADUs:\n 0 type characer: c = centroid, f = findstars, g = guide star\n 1 index: an index identifying the star within the list of stars returned by the command.\n 2,3 x,yCenter: centroid\n 4,5 x,yError: estimated standard deviation of x,yCenter\n 6 radius: radius of centroid region\n 7 asymmetry: a measure of the asymmetry of the object;\n the value minimized by PyGuide.centroid.\n Warning: not normalized, so probably not much use.\n 8 FWHM major\n 9 FWHM minor\n 10 ellMajAng: angle of ellipse major axis in x,y frame (deg)\n 11 chiSq: goodness of fit to model star (a double gaussian). From PyGuide.starShape.\n 12 counts: sum of all unmasked pixels within the centroid radius. From PyGuide.centroid\n 13 background: background level of fit to model star. From PyGuide.starShape\n 14 amplitude: amplitude of fit to model star. From PyGuide.starShape\n For \"g\" stars, the two following fields are added:\n 15,16 predicted x,y position\n \"\"\"\n if not isCurrent:\n return\n if valList[0] != \"g\":\n return\n self.fwhmLine.addPoint(valList[8])\n self.brightnessLine.addPoint(valList[12])\n\n\nif __name__ == \"__main__\":\n import TestData\n\n addWindow(TestData.tuiModel.tlSet)\n TestData.tuiModel.tlSet.makeVisible(WindowName)\n \n TestData.runTest()\n \n TestData.tuiModel.tkRoot.mainloop()\n","sub_path":"TUI/Guide/GuideMonitor/GuideMonitorWindow.py","file_name":"GuideMonitorWindow.py","file_ext":"py","file_size_in_byte":7320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"470136215","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 4 15:54:03 2018\n\n@author: gilbe\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom utils.utils import PROJECT_DATA_DIR\nimport os\nimport mxnet as mx\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom sklearn.preprocessing import (MinMaxScaler,\n StandardScaler,\n Imputer,\n QuantileTransformer)\nimport matplotlib.pyplot as plt\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, Dropout, BatchNormalization\nfrom keras import optimizers\nfrom sklearn.metrics import f1_score\n\n\ndef load_data(file='xab.csv'):\n \"\"\" Data is supposed to be preprocessed\n and space separated \"\"\"\n data = pd.read_csv(os.path.join(PROJECT_DATA_DIR, file),\n sep='\\s+', header=None)\n return data\n\n\ndef get_xy(data):\n \"\"\" We suppose the y labels are in the\n last column in the data set \"\"\"\n y = data[data.columns[-1]]\n x = data.drop(data.columns[-1], axis=1)\n\n return x, y\n\n\ndef scale_data(xtrain, xtest, scaler_mode=None):\n \"\"\" Args\n ----\n xtrain: training dataframe with features\n\n xtest: test dataframe with features\n\n scaler: a scikit-learn scaler, either minmax,\n or normalization\"\"\"\n\n if scaler_mode == 'minmax':\n scaler = MinMaxScaler()\n\n elif scaler_mode in ['normal', 'Normal', 'standardscaler']:\n scaler = StandardScaler()\n\n else:\n scaler = QuantileTransformer(output_distribution='normal')\n\n xtrain = scaler.fit_transform(xtrain)\n xtest = scaler.transform(xtest)\n\n return xtrain, xtest\n\n\ndef binarize_y(y, arg_list=[12, 13, 17, 20]):\n \"\"\" pass list to take values in one of 2 categories \"\"\"\n\n # Convert labels to categorical one-hot encoding\n # one_hot_labels = keras.utils.to_categorical(labels, num_classes=10)\n return y.map(lambda x: 1 if x in arg_list else 0)\n\n\ndef prepare_data(train, test, arg_list, binary_class=False):\n \"\"\" return xtran, ytrain, xtest, ytest\"\"\"\n xtrain, ytrain = get_xy(train)\n xtest, ytest = get_xy(test)\n unique_classes = len(np.unique(ytrain))\n if binary_class:\n ytrain = binarize_y(ytrain, arg_list)\n ytest = binarize_y(ytest, arg_list)\n else:\n ytrain = keras.utils.to_categorical(\n ytrain + 1,\n num_classes=unique_classes)\n ytest = keras.utils.to_categorical(\n ytest + 1,\n num_classes=unique_classes)\n\n return xtrain, ytrain, xtest, ytest\n\n\nif __name__ == '__main__':\n print('Loading data...')\n train = load_data(file='all_training_400_minisensor_1.csv')\n test = load_data(file='all_test_400_minisensor.csv')\n print('Done with loading data.')\n\n xtrain, ytrain = get_xy(train)\n xtest, ytest = get_xy(test)\n\n ytrain_bin = binarize_y(ytrain)\n ytest_bin = binarize_y(ytest)\n print('info xtrain:')\n print('xtrain.shape:', xtrain.shape, 'ytrain.shape:', ytrain_bin.shape)\n print('xtest.shape:', xtest.shape, 'ytest.shape:', ytest_bin.shape)\n\n print('unique ytrain values:', ytrain.unique())\n print('unique ytest values:', ytest.unique())\n\n print('unique ytrain values:', ytrain_bin.unique())\n print('unique ytest values:', ytest_bin.unique())\n\n print('')\n print('Look at both training and test sets.')\n print(xtrain.head())\n print(xtest.head())\n\n","sub_path":"code/load_preprocess.py","file_name":"load_preprocess.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"376863573","text":"import psutil\n\ndef hardDisks():\n drps = psutil.disk_partitions()\n drives = [dp.mountpoint for dp in drps if dp.fstype in ('NTFS','EXT4')]\n partitions = []\n\n for drive in drives:\n usage = psutil.disk_usage(drive)\n particao = drive.replace('\\\\', '').replace(':', '')\n total = (usage.total // 2**30)\n utilizado = (usage.used // 2**30)\n livre = (usage.free // 2**30)\n \n partitions.append([particao, total, utilizado, livre])\n\n return partitions","sub_path":"harddisks.py","file_name":"harddisks.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"413163858","text":"from wsgiref.simple_server import make_server\nfrom fanteweb import FanteWeb, jsonify\n\nidx = FanteWeb.Router()\npy = FanteWeb.Router('/python')\n\nFanteWeb.register(idx)\nFanteWeb.register(py)\n\n@idx.get('^/$')\ndef index(request:FanteWeb.Request):\n print(request)\n res = FanteWeb.Response()\n res.body = \"

爱学习的fante

\".encode()\n return res\n\n@py.get('/{name:str}/{id:int}')\ndef index(request:FanteWeb.Request):\n res = FanteWeb.Response()\n res.body = \"

爱学习的fante写Python {}

\".format(request.vars.id).encode()\n return res\n\n# 定义一个拦截器\n@idx.reg_postinterceptor\ndef showjson(ctx, request, response):\n body = response.body.decode()\n return jsonify(body=body)\n\nif __name__ == \"__main__\":\n ip = \"127.0.0.1\"\n port = 9999\n server = make_server(ip, port, FanteWeb())\n\n try:\n server.serve_forever()\n except KeyboardInterrupt:\n pass\n finally:\n server.server_close()\n","sub_path":"fanteweb/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"263804841","text":"import Levenshtein \n\nclass Classifier:\n\n municipio = []\n departamento = []\n categoria = []\n\n def __init__(self):\n self.municipio = []\n self.departamento = []\n self.categoria = []\n\n with open('Catalogs/municipios.txt') as munf:\n self.municipio = munf.readlines()\n with open('Catalogs/departamentos.txt') as depf:\n self.departamento = depf.readlines()\n with open('Catalogs/categorias.txt') as catf:\n self.categoria = catf.readlines()\n\n\n self.municipio = [x.decode('latin-1').upper().strip() for x in self.municipio]\n self.departamento = [x.decode('latin-1').upper().strip() for x in self.departamento]\n self.categoria = [x.decode('latin-1').upper().strip() for x in self.categoria]\n\n def byMunicipio(self, value):\n minratio = 0\n rvalue = value\n c = 0\n for x in self.municipio:\n val = Levenshtein.ratio(value,x.encode('utf-8'))\n if (val > minratio):\n rvalue = x.strip()\n minratio = val\n if (val == 1):\n return x.strip()\n return rvalue\n\n def byDepartamento(self, value):\n minratio = 0\n rvalue = value\n c = 0\n for x in self.departamento:\n val = Levenshtein.ratio(value,x.encode('utf-8'))\n if (val > minratio):\n rvalue = x.strip()\n minratio = val\n if (val == 1):\n return x.strip()\n return rvalue\n\n def byCategoria(self, value):\n minratio = 0\n rvalue = value\n c = 0\n for x in self.categoria:\n val = Levenshtein.ratio(value,x.encode('utf-8'))\n if (val > minratio):\n rvalue = x.strip()\n minratio = val\n if (val == 1):\n return x.strip()\n return rvalue\n","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"533673685","text":"import pygame\r\nfrom pygame.locals import *\r\nfrom random import randint\r\npygame.init()\r\n\r\n#COLOUR Variables\r\nBLACK=(0,0,0)\r\nWHITE=(255,255,255)\r\nRED=(255,0,0)\r\nGREEN=(0,255,0)\r\nBLUE=(50,50,225)\r\n\r\nmy_clock=pygame.time.Clock()\r\n\r\nscreen=pygame.display.set_mode((640, 480), 0, 32)\r\nscreen.fill(WHITE)\r\n\r\n#Getting the word from the text file\r\ntext_file=open(\"the_words.txt\")\r\nword=[]\r\nfor a in text_file:\r\n word += [a]\r\nword_length=len(word)\r\n\r\ndef randomizer(x):\r\n number=randint(0,x-1)\r\n return number\r\n\r\nword_number=randomizer(word_length)\r\nthe_special_word=word[word_number]\r\n\r\n\r\n#DEFAULT (COLOUR + TIME) \r\nx=10\r\ny=10\r\ncolour=BLACK\r\nfont=pygame.font.Font(\"Vera.ttf\",16)\r\n\r\n#PICTURES\r\n#palette= pygame.image.load('panel.png')\r\n\r\nwhile True:\r\n\r\n for event in pygame.event.get():\r\n if event.type ==pygame.QUIT:\r\n False\r\n #Draw where ever the mouse is clicked \r\n if pygame.mouse.get_pressed()[0]:\r\n mouse_position=pygame.mouse.get_pos()\r\n pygame.draw.circle(screen,colour,(mouse_position),10)\r\n if pygame.mouse.get_pressed()[2]:\r\n mouse_position=pygame.mouse.get_pos()\r\n pygame.draw.circle(screen,WHITE,(mouse_position),10)\r\n\r\n #Checks to see what keys are being pressed. Corresponding Action\r\n keys=pygame.key.get_pressed() \r\n #colour changing\r\n if keys[pygame.K_1]:\r\n colour=BLUE\r\n elif keys[pygame.K_2]:\r\n colour=RED\r\n elif keys[pygame.K_3]:\r\n colour=GREEN\r\n elif keys[pygame.K_4]:\r\n colour=BLACK\r\n elif keys[pygame.K_5]:\r\n colour=WHITE\r\n elif keys[pygame.K_7]:\r\n screen.fill(WHITE)\r\n word_number=randomizer(word_length)\r\n the_special_word=word[word_number]\r\n elif keys[pygame.K_s]: \r\n pygame.image.save(screen,\"latest_drawing.jpg\")\r\n \r\n \r\n pygame.display.update()\r\n my_clock.tick(120) #120 FPS\r\n\r\n #Prints out the word to draw \r\n time_surface=font.render(\"Draw a(n) \"+the_special_word.strip(),True, (0,192,0))\r\n screen.blit(time_surface,(230,20))\r\n\r\n #screen.blit(palette, (500,340))\r\n \r\n\r\npygame_quit()\r\n","sub_path":"Pictionary Pygame Game/old game versions/v6.py","file_name":"v6.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"624380259","text":"import socket\nimport threading\nimport time\n\nclients = []\nPORT = 4567\nBUF_SIZE = 1024\n\n\ndef server_task(sock, addr):\n while True:\n data = sock.recv(BUF_SIZE)\n\n # Quit 요청한 연결은 끊음\n if 'quit' in data.decode() and sock in clients:\n print(addr, ': exited')\n clients.remove(sock)\n continue\n\n print(time.asctime() + str(addr) + ':' + data.decode())\n\n # 송신 측 소켓 제외 나머지 모든 Client 들에게 메세지 전달\n for x in clients:\n if x != sock:\n x.send(data)\n\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind(('', PORT))\ns.listen(50)\n\nprint('=' * 20 + 'Server Started' + '=' * 20)\n\nwhile True:\n conn, addr = s.accept()\n clients.append(conn)\n print(conn, ': connected')\n threading.Thread(target=server_task, args=(conn, addr)).start()\n","sub_path":"HW10/tcp_multi_chat_server.py","file_name":"tcp_multi_chat_server.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"505403554","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass Usuario(models.Model):\n \"\"\"\n Nome e email são as informações mínimas\n para alguém se inscrever em um evento.\n \"\"\"\n nome = models.CharField(max_length=255, blank=False)\n email = models.EmailField(max_length=255, blank=False)\n cpf = models.CharField(max_length=20, blank=True)\n\n def __str__(self):\n return self.nome\n\n def inscrever_em_evento(self, evento, tipo):\n evento.realizar_inscricao(evento, self, tipo)\n\n class Meta:\n verbose_name_plural = 'Usuários'\n\n\nclass Evento(models.Model):\n nome = models.CharField(max_length=255)\n descricao = models.TextField()\n local = models.CharField(max_length=255)\n data_hora = models.DateTimeField()\n organizador = models.ForeignKey(\n Usuario,\n on_delete=models.CASCADE,\n related_name=\"meus_eventos\")\n\n def __str__(self):\n return self.nome\n \n def add_inscricao(self, tipo_evento, usuario_inscrito):\n \"\"\"Inscrever um usuário no Evento e retornar a Inscricao\"\"\"\n nova_inscricao = Inscricao.objects.create(\n evento=self, tipo=tipo_evento, inscrito=usuario_inscrito)\n return nova_inscricao\n\n def add_tipo_inscricao(self, nome, descricao, preco, quantidade, data_inicio_vendas, data_fim_vendas):\n \"\"\"Adiciona um tipo de Inscrição ao evento. Exemplo: Gratuita.\"\"\"\n novo_tipo_inscricao = TipoInscricao.objects.create(\n evento=self, nome=nome,\n descricao=descricao, preco=preco,\n quantidade=quantidade,\n data_inicio_vendas=data_inicio_vendas,\n data_fim_vendas=data_fim_vendas\n )\n return novo_tipo_inscricao\n \n class Meta:\n verbose_name_plural = 'Eventos'\n\n\nclass TipoInscricao(models.Model):\n \"\"\"\n Quanto custa, qual período, o que está embutido no preço?\n \"\"\"\n nome = models.CharField(max_length=255)\n descricao = models.TextField()\n preco = models.DecimalField(max_digits=12, decimal_places=2)\n quantidade = models.IntegerField()\n data_inicio_vendas = models.DateTimeField()\n data_fim_vendas = models.DateTimeField()\n\n evento = models.ForeignKey(\n Evento,\n on_delete=models.CASCADE,\n related_name='evento')\n\n def __str__(self):\n return f'{self.nome} - {self.preco}'\n\n class Meta:\n verbose_name_plural = 'Tipos de Inscrições'\n\n\nclass Inscricao(models.Model):\n data = models.DateTimeField(auto_now_add=True)\n\n tipo = models.ForeignKey(\n TipoInscricao,\n on_delete=models.DO_NOTHING)\n\n evento = models.ForeignKey(\n Evento,\n on_delete=models.DO_NOTHING)\n\n inscrito = models.ForeignKey(\n Usuario,\n on_delete=models.DO_NOTHING,\n related_name='minhas_inscricoes')\n\n def __str__(self):\n return self.data\n\n class Meta:\n verbose_name_plural = 'Inscrições'\n\n","sub_path":"inscricoes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"425172826","text":"class RoverControl:\n\n def __init__(self):\n self.frontRW = 0\n self.frontLW = 0\n self.backRW = 0\n self.backLW = 0\n self.digArmActuator = 0\n self.digBelt = 'off'\n self.digBeltActuator = 0\n \n def move_forward (self, speed):\n self.frontRW = speed\n self.frontLW = speed\n self.backRW = speed\n self.backLW = speed\n\n def move_backward (self, speed):\n self.frontRW = (-1*speed)\n self.frontLW = (-1*speed)\n self.backLW = (-1*speed)\n self.backRW = (-1*speed)\n\n def turn_left (self, speed):\n self.frontLW = speed\n self.backLW = speed\n self.frontRW = (-1*speed)\n self.backRW = (-1*speed)\n\n def turn_right (self, speed):\n self.frontRW = speed\n self.backRW = speed\n self.frontLW = (-1*speed)\n self.backLW = (-1*speed)\n\n def start_dig (self):\n self.digBelt = 'on'\n self.digBeltActuator = 'on'\n\n def stop_dig (self):\n self.digBelt = 'off'\n self.digBeltActuator = 'off'\n\n def lower_arm_belt (self, speed):\n self.digArmActuator = (-1*speed)\n\n def raise_arm_belt (self, speed):\n self.digArmActuator = speed\n\n def display_values (self):\n print(f'front left wheel: {self.frontLW}')\n print(f'back left wheel: {self.backLW}')\n print(f'front right wheel: {self.frontRW}')\n print(f'back right wheel: {self.backRW}')\n print(f'dig arm actuator: {self.digArmActuator}')\n print(f'dig belt: {self.digBelt}')\n print(f'dig belt actuator: {self.digBeltActuator}')\n\ndef start_my_rover_repl():\n '''starts my rover repl so that i can send commands to it'''\n not_done = True\n my_rov = RoverControl() # the rover control class calls its __init__function here.\n while not_done:\n #expect method then atrributes seperated by spaces\n control = input('Control: ')\n method_and_params = control.split(' ')\n if method_and_params[0] == 'exit':\n not_done = False\n continue\n try:\n # calls the method with its params\n getattr(my_rov, method_and_params[0])(*method_and_params[1:])\n except AttributeError:\n print('Incorrect Control')\n\nif __name__ == '__main__':\n start_my_rover_repl()\n\n","sub_path":"practice/gitlab4/DavidPippenGitlab4.py","file_name":"DavidPippenGitlab4.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"246251221","text":"#!/usr/bin/env python3\nimport sys\nimport os\nimport requests\n\nif \"win\" in sys.platform or \"darwin\" in sys.platform:\n pass\nelse:\n print(\"This tool is for Windows and Mac only.\")\n\nclass Setup:\n def __init__(self):\n self.base_url = \"https://raw.githubusercontent.com/kai-dg/ffxiv-crafter-gather-automation/master/\"\n self.files = [\"crafter.py\", \"gatherer.py\", \"json_editor.py\", \"process.py\", \"help.txt\"]\n\n def writer(self, name, mode=None):\n if mode == \"au\":\n with open((\"au_\" + name), \"w+\") as f:\n r = requests.get(self.base_url + name)\n f.write(r.content.decode(\"utf-8\"))\n else:\n with open(name, \"w+\") as f:\n r = requests.get(self.base_url + name)\n f.write(r.content.decode(\"utf-8\"))\n\n def requirements(self):\n self.writer(\"requirements.txt\", \"au\")\n os.system(\"pip install -r au_requirements.txt\")\n\n def setuper(self):\n self.writer(\"setup.py\", \"au\")\n\n def automation_files(self):\n for f in self.files:\n self.writer(f)\n\nif __name__ == \"__main__\":\n args = set(sys.argv[1:])\n s = Setup()\n s.automation_files()\n s.setuper()\n if \"setup\" in args:\n s.requirements()\n","sub_path":"automation_setup.py","file_name":"automation_setup.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"629557230","text":"#!/usr/bin/env python \n\n# #########################################################################\n# Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved. #\n# #\n# Copyright 2015. UChicago Argonne, LLC. This software was produced #\n# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #\n# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #\n# U.S. Department of Energy. The U.S. Government has rights to use, #\n# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #\n# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #\n# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #\n# modified to produce derivative works, such modified software should #\n# be clearly marked, so as not to confuse it with the version available #\n# from ANL. #\n# #\n# Additionally, redistribution and use in source and binary forms, with #\n# or without modification, are permitted provided that the following #\n# conditions are met: #\n# #\n# * Redistributions of source code must retain the above copyright #\n# notice, this list of conditions and the following disclaimer. #\n# #\n# * Redistributions in binary form must reproduce the above copyright #\n# notice, this list of conditions and the following disclaimer in #\n# the documentation and/or other materials provided with the #\n# distribution. #\n# #\n# * Neither the name of UChicago Argonne, LLC, Argonne National #\n# Laboratory, ANL, the U.S. Government, nor the names of its #\n# contributors may be used to endorse or promote products derived #\n# from this software without specific prior written permission. #\n# #\n# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #\n# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #\n# POSSIBILITY OF SUCH DAMAGE. #\n# #########################################################################\n\n'''\nThis module segments an image into as many as types defined in the trained data.\n'''\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport pdb\nimport numpy as np\nimport h5py\nimport os.path\nfrom glob import glob\nimport time\nfrom segmentation_param import *\n\n__author__ = \"Mehdi Tondravi\"\n__copyright__ = \"Copyright (c) 2017, UChicago Argonne, LLC.\"\n__docformat__ = 'restructuredtext en'\n__all__ = ['segment_pixels']\n\ndef create_segmented_subvol(subvol_im, pixel_masks, filename, orig_idx_data, rightoverlap_data, leftoverlap_data, seg_output):\n \"\"\" \n Separates pixels in an input sub-volume image array and creates an hdf5 for each input array.\n Pixel mask for each class defined in the trained data is an input to this script.\n This script creates segmented image for each sub-volume by element by element multiplication \n of the mask and the corresponding composite sub-volume image. Segmented pixels for each class of \n the sub-volume is written into a separated dataset of the segmented output hdf5 file/sub-volume.\n \n Inputs: \n subvol_im - Composite sub-volumes image array\n pixel_mask - sub-volume mask array\n filename - output file name\n orig_idx_data - whole volume array indices\n rightoverlap_data - number of overlapped pixels from the right side of the sub-volume.\n leftoverlap_data - number of overlapped pixels from the left side of the sub-volume.\n seg_output - whether or not to save segmented output as binary or pixel intensity.\n \n Ouputs:\n a hdf5 file per sub-volume with a dataset for each defined segmented class.\n \"\"\"\n \n start_time = time.time()\n ilastik_classes = get_ilastik_labels()\n print(\"Ilastik classes are \", ilastik_classes)\n im_out_filename = outimage_file_location + '/subvol_' + filename + '.h5'\n seg_im_file = h5py.File(im_out_filename, 'w')\n # Save sub-volume indices \n subvol_indx = seg_im_file.create_dataset('orig_indices', (6,), dtype='uint64')\n subvol_indx[...] = orig_idx_data\n # Save sub-volume right and left side overlaps.\n subvol_rightoverlap = seg_im_file.create_dataset('right_overlap', (3,), dtype='uint8')\n subvol_rightoverlap[...] = rightoverlap_data\n subvol_leftoverlap = seg_im_file.create_dataset('left_overlap', (3,), dtype='uint8')\n subvol_leftoverlap[...] = leftoverlap_data\n \n for label in range(len(ilastik_classes)):\n seg_im_ds = seg_im_file.create_dataset(ilastik_classes[label], subvol_im.shape, subvol_im.dtype)\n multiply_time = time.time()\n if seg_output == True:\n seg_im_ds[...] = pixel_masks[..., label]\n else:\n seg_im_ds[...] = subvol_im * pixel_masks[..., label]\n print(\"Multiply time for one dataset is %d Sec\" % (time.time() - multiply_time))\n \n seg_im_file.close()\n end_time = time.time()\n print(\"Exec time for create_segmented_subvol is %d Sec\" % ((end_time - start_time)))\n return\n","sub_path":"create_segmented_subvol.py","file_name":"create_segmented_subvol.py","file_ext":"py","file_size_in_byte":6372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"461000951","text":"__author__ = 'Sasha'\r\n\r\n'''\r\n1. Создать сокет, ожидать подлкючений\r\n2. При новом подлючении создать поток ThreadForExchange\r\n3. В потоке:\r\n - создать файл лога с именем ИмяПрограммы_Дата_ВремяСозданияЛога.log\r\n - цикл приёма:\r\n - прочитать принятые данные\r\n - записать в лог\r\n'''\r\n\r\n'''\r\nФормат команды:\r\n0xff, 0xff, 0xff, 0xff, 0xff\r\n0xXX - номер команды\r\n0xXX - количество байтов параметров (между номером команды и завершающими пятью байтами)\r\n...\r\n... - байты параметров\r\n...\r\n0xfe, 0xfe, 0xfe, 0xfe, 0xfe\r\n'''\r\n\r\n'''\r\n0 - указать параметры для создания файла лога (разделяются нулём):\r\n - каталог;\r\n - имя программы\r\n\r\n1 - записать сообщение:\r\n - уровень (1 байт);\r\n - текст сообщения\r\n\r\n2 - завершить сеанс\r\n'''\r\n\r\nfrom socket import *\r\nfrom datetime import datetime\r\nfrom _thread import start_new_thread\r\n\r\nimport sys\r\nimport os\r\n\r\nSTART_MSG = '\\xff\\xff\\xff\\xff\\xff'\r\nEND_MSG = '\\xfe\\xfe\\xfe\\xfe\\xfe'\r\nCOM_CONNECT = '\\x00'\r\nCOM_WRITE = '\\x01'\r\nCOM_DISCONNECT = '\\x02'\r\n\r\ndef ProcessingBuffer(file, buffer):\r\n posEnd = -1\r\n if len(buffer) > len(START_MSG) + len(END_MSG):\r\n if buffer[:len(START_MSG)] == START_MSG:\r\n posEnd = buffer.find(END_MSG)\r\n if posEnd != -1:\r\n command = buffer[len(END_MSG)]\r\n if command == COM_CONNECT:\r\n text = buffer[len(START_MSG) + 1 : posEnd]\r\n dt = datetime.now()\r\n nameFile = text + dt.date().strftime(\"_%y_%m_%d\") + dt.time().strftime(\"_%H_%M_%S\") + \".log\"\r\n file = open(nameFile, \"w\")\r\n else:\r\n if command == COM_WRITE:\r\n text = buffer[len(START_MSG) + 1 : posEnd]\r\n file.write(text + '\\n')\r\n else:\r\n if command == COM_DISCONNECT:\r\n file.close()\r\n else:\r\n print('ERROR!!! Unknown command ', command)\r\n if posEnd != -1:\r\n buffer = buffer[posEnd + len(END_MSG):]\r\n return (file, buffer)\r\n\r\ndef PrintByteArray(array):\r\n for byte in array:\r\n print(str(byte) + ' ')\r\n\r\ndef ThreadForExchange(connection, address):\r\n try:\r\n file = 0\r\n buffer = ''\r\n while True:\r\n buffer += connection.recv(1024).decode()\r\n file, buffer = ProcessingBuffer(file, buffer)\r\n except:\r\n file.close()\r\n return 0\r\n\r\ndef ThreadForControl():\r\n while True:\r\n msg = input()\r\n if msg == 'exit':\r\n os._exit(0)\r\n\r\n'''\r\nНачало здесь\r\n'''\r\nsockobj = socket(AF_INET, SOCK_STREAM)\r\nsockobj.bind(('localhost', 50000))\r\nsockobj.listen(5)\r\nstart_new_thread(ThreadForControl, ())\r\nwhile True:\r\n connection, address = sockobj.accept()\r\n start_new_thread(ThreadForExchange, (connection, address, ))","sub_path":"Logger/src/Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"143670259","text":"import taskplan\nimport pickle\n\nfrom taskconf.config.Configuration import Configuration\n\ntry:\n from pathlib2 import Path\nexcept ImportError:\n from pathlib import Path\n\napi = taskplan.Api()\npath, config = api.load_task('38a0dec1-3d08-463d-9c58-2bff1cba0f54')\n\nwith open(str(path / Path(\"model.pk\")), 'rb') as handle:\n sum = pickle.load(handle)\n\nprint(sum, config.get_int('step'))\n\ncustom_config = Configuration({\"config\": {\"step\": 10}})\nprint(custom_config.get_int(\"step\"))\n\npath, config = taskplan.Api.load_task_from_folder(\"tasks/TestTask/38a0dec1-3d08-463d-9c58-2bff1cba0f54\")\n\nwith open(str(path / Path(\"model.pk\")), 'rb') as handle:\n sum = pickle.load(handle)\n\nprint(sum, config.get_int('step'))\n\nconfig = api.build_config(\"TestTask\", {\n \"step\": -1\n})\nprint(config.get_int(\"offset\"), config.get_int(\"step\"))","sub_path":"example/check_state.py","file_name":"check_state.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"81471380","text":"import copy\nimport logging\n\nfrom torch import nn\n\nlogger = logging.getLogger('global')\n\n\ndef init_weights_normal(module, std=0.01):\n for m in module.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(\n m, nn.ConvTranspose2d):\n nn.init.normal_(m.weight.data, std=std)\n if m.bias is not None:\n m.bias.data.zero_()\n\n\ndef init_weights_xavier(module):\n for m in module.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(\n m, nn.ConvTranspose2d):\n nn.init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n m.bias.data.zero_()\n\n\ndef initialize(model, method, **kwargs):\n if method == 'xavier':\n init_weights_xavier(model)\n elif method == 'normal':\n init_weights_normal(model, **kwargs)\n else:\n raise NotImplementedError(f'{method} not supported')\n\n\ndef initialize_from_cfg(model, cfg):\n if cfg is None:\n initialize(model, 'normal', std=0.01)\n return\n\n cfg = copy.deepcopy(cfg)\n method = cfg.pop('method')\n initialize(model, method, **cfg)\n","sub_path":"Data19/workspace/FoldTest/pos/models/initializer.py","file_name":"initializer.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"280626153","text":"from isis.main_window import Main_Window\nfrom isis.push_button import Push_Button\nfrom isis.v_box_layout import V_Box_Layout\nfrom isis.data_model.table import Table\nfrom isis.table_view import Table_View\nfrom isis.itzamara.item_list import Item_List\nfrom decimal import Decimal\nfrom sarah.acp_bson import Client\nfrom isis.widget import Widget\n\n\nclass Model_Inventory(Table):\n def __init__(self):\n Table.__init__(self, 'inventory')\n self.columns.add('sku', str)\n self.columns.add('description', str)\n self.columns.add('inventory_absolut', Decimal, '#,##0.###')\n self.columns.add('updated', str)\n self.columns['inventory_absolut'].name = 'inventory'\n self.with_new_empty_row = False\n self.readonly = True\n\n\nclass Table_View_Inventory(Table_View):\n def __init__(self, *args, **kwargs):\n Table_View.__init__(self, *args, **kwargs)\n self.setSelectionBehavior(self.SelectRows)\n self.setSelectionMode(self.SingleSelection)\n\n\nclass Inventoring:\n def __init__(self, gui_parent=None):\n self.model = Model_Inventory()\n self.gui_parent = gui_parent\n self._tableview = Table_View_Inventory(self.gui_parent)\n self._tableview.setModel(self.model)\n # self.inventoring = list()\n self.model.datasource = list()\n\n def change_inventoring(self):\n item_list_manager = Item_List(self.gui_parent)\n item_list_manager.items = self.model.datasource\n item_list_manager.exec_()\n self.inventoring = item_list_manager.items\n\n @property\n def tableview(self):\n return self._tableview\n\n @tableview.setter\n def tableview(self, tableview):\n old_table = self._tableview\n self.tableview = tableview\n tableview.setModel(self.model)\n if old_table is not None:\n old_table.deleteLater()\n del old_table\n\n @property\n def inventoring(self):\n return self.model.datasource\n\n @inventoring.setter\n def inventoring(self, inventoring):\n agent_valentine = Client('inventory', 'valentine')\n from dict import Dict\n msg = Dict({'type_message': 'request', 'request_type': 'get', 'get': 'valentine/inventory_absolut',\n 'items': inventoring, 'projection': {'updated': True}})\n try:\n storage = self.storage\n if storage is not None:\n msg.storage = storage\n except:\n pass\n answer = agent_valentine(msg)\n self.model.datasource = answer['items']\n\n def storage_changed(self, storage):\n self.storage = storage\n agent_valentine = Client('inventory', 'valentine')\n from dict import Dict\n msg = Dict({'type_message': 'request', 'request_type': 'get', 'get': 'valentine/inventory_absolut',\n 'items': self.model.datasource})\n try:\n storage = self.storage\n if storage is not None:\n msg.storage = storage\n except:\n pass\n answer = agent_valentine(msg)\n self.model.datasource = answer['items']\n\n\nclass Inventory(Main_Window):\n def __init__(self):\n Main_Window.__init__(self)\n self.setWindowTitle('Inventory')\n self.resize(500, 600)\n\n self.cwidget = Widget(self)\n self.setCentralWidget(self.cwidget)\n\n self._inventoring = Inventoring(gui_parent=self.cwidget)\n from isis.valentine.widget_viewer_storage import Widget_Viewer_Storage\n self.widget_storage = Widget_Viewer_Storage(self.cwidget)\n self.tableview = self.inventoring.tableview\n\n btn_inventoring = Push_Button('inventoring', self.cwidget)\n self.widget_storage.with_button_change = True\n\n layout_main = V_Box_Layout(self.cwidget)\n layout_main.addWidget(self.widget_storage)\n layout_main.addWidget(self.tableview)\n layout_main.addWidget(btn_inventoring)\n self.cwidget.setLayout(layout_main)\n\n btn_inventoring.clicked.connect(self.inventoring.change_inventoring)\n self.widget_storage.storage_changed.suscribe(self.inventoring.storage_changed)\n\n @property\n def inventoring(self):\n return self._inventoring\n\n @inventoring.setter\n def inventoring(self, inventoring):\n if isinstance(inventoring, list):\n self._inventoring.inventoring = inventoring\n\n\nif __name__ == '__main__':\n import sys\n from isis.application import Application\n app = Application()\n vv = Inventory()\n vv.show()\n sys.exit(app.exec_())\n","sub_path":"isis/valentine/inventory.pyw","file_name":"inventory.pyw","file_ext":"pyw","file_size_in_byte":4552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"395330773","text":"import os\n\nenv = Environment(CXXFLAGS=['-g3', '-std=c++14', '-stdlib=libc++',\n '-Weverything', '-Wno-c++98-compat',\n '-Wno-missing-prototypes',\n '-Wno-padded', '-isystem./catch'])\n\nenv['CXX'] = 'clang++'\nenv['ENV']['TERM'] = os.environ['TERM']\nenv['LIBS'] = 'c++'\nenv.Program('app', Glob(\"*.cpp\"))\n","sub_path":"SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"376236634","text":"\r\n# Mystery bug\r\n\r\n# This program should implement two independent timers\r\n# each having their own start and stop buttons.\r\n# Find and correct the error in the code below.\r\n\r\nimport simplegui\r\n\r\n# Initialize two counters.\r\ncounter1 = [0, 0]\r\ncounter2 = [0, 0]\r\n\r\n\r\n# Define event handlers.\r\ndef start1():\r\n timer1.start()\r\n \r\ndef stop1():\r\n timer1.stop()\r\n \r\ndef start2():\r\n timer2.start()\r\n \r\ndef stop2():\r\n timer2.stop()\r\n \r\ndef tick1():\r\n global counter1\r\n if counter1[1] == 9:\r\n counter1[0] += 1\r\n counter1[1] = 0\r\n else:\r\n counter1[1] += 1\r\n\r\ndef tick2():\r\n global counter2\r\n if counter2[1] == 9:\r\n counter2[0] += 1\r\n counter2[1] = 0\r\n else:\r\n counter2[1] += 1\r\n \r\n \r\n# Define draw handler.\r\ndef draw(canvas):\r\n canvas.draw_text(\"Timer 1: \" + str(counter1[0] % 10) + \".\" + str(counter1[1]), [50, 100], 24, \"White\")\r\n canvas.draw_text(\"Timer 2: \" + str(counter2[0] % 10) + \".\" + str(counter2[1]), [50, 200], 24, \"White\")\r\n\r\n# Register event handlers.\r\nframe = simplegui.create_frame(\"Mystery bug\", 300, 300)\r\nframe.add_button(\"Start timer1\", start1, 200)\r\nframe.add_button(\"Stop timer1\", stop1, 200)\r\nframe.add_button(\"Start timer2\", start2, 200)\r\nframe.add_button(\"Stop timer2\", stop2, 200)\r\nframe.set_draw_handler(draw)\r\n\r\ntimer1 = simplegui.create_timer(100, tick1)\r\ntimer2 = simplegui.create_timer(100, tick2)\r\n\r\n\r\n# Start frame.\r\nframe.start()\r\n","sub_path":"user43_tOTAAA3MNm_0.py","file_name":"user43_tOTAAA3MNm_0.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"387890082","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2019 Cesar Sinchiguano \n#\n# Distributed under terms of the MIT License license.\n\n\"\"\"\n\n\"\"\"\nimport sys\nsys.path.insert(0, '/home/casch/catkin_ws/src/yumi_cobot/src/project')\nfrom thesis_library import *\nfrom yumipy import YuMiRobot\nfrom yumipy import YuMiState\nfrom autolab_core import RigidTransform\n\n# with aruco marker and easy_eye calibration eye in hand\n\nmoves_deg = np.array([[117.45, -56.45, -4.88, -119.77, 132.21, 138.14, -41.45],\n [126.78, -54.86, -7.94, -119.18, 130.6, 144.74, -50.02],\n [137.2, -53.75, -13.63, -119.76, 128.31, 149.09, -59.0],\n [145.71, -53.4, -19.64, -120.79, 126.16, 150.95, -65.92],\n [150.89, -53.42, -23.83, -121.59, 124.73, 151.48, -69.93],\n [144.46, -56.13, -11.15, -117.47, 127.59, 159.16, -66.53],\n [135.43, -56.59, -4.57, -116.02, 129.92, 157.27, -59.2],\n [117.18, -59.2, 4.53, -114.48, 133.5, 148.31, -43.44],\n [107.24, -61.67, 13.28, -108.29, 133.15, 150.3, -38.65]])\n\n# tool_cesar_cal = RigidTransform(np.array([ [1, 0, 0],\n# [0,1, 0],\n# [0, 0, 1]]),np.array([0, 0, 0]))\n\n\n#with chessboard with a correction \ntool_cesar_cal = RigidTransform(np.array([[ -0.9922444, -0.0046806, -0.1242145],\n [ -0.0882110, 0.7305723, 0.6771137],\n [ 0.0875783, 0.6828192, -0.7253191]]), np.array([0.0, 0.0, 0.0])) \n\n\n# tool_cesar_cal = RigidTransform(np.array([[0.0887406, 0.0865996, -0.9922830],\n# [-0.6828846, 0.7305213, 0.0026840],\n# [0.7251164, 0.6773766, 0.1239644 ] ]), np.array([0, 0.0, 0.0])) \n\n \n\n#Home position for the camera in the following setup: Eye in hand\nhome_camera=[96.01, -57.26, 70.18, 162.37, 114.49, 180.36, -97.91]\n\n\ng_timestamp_last_move = 0\ng_index_last_move = 0\n\ndef move(yumi_robot):\n import time\n global g_index_last_move\n global g_timestamp_last_move\n\n if (time.time() - g_timestamp_last_move) < 8:\n print('Remained timeL {}'.format((time.time() - g_timestamp_last_move)))\n return\n\n #Object that encapsulates a yumi arm joint angle configuration.\n moves = [YuMiState(p) for p in moves_deg]\n\n g_index_last_move = (g_index_last_move + 1) % len(moves)\n\n yumi_robot.right.goto_state(moves[g_index_last_move])\n g_timestamp_last_move = time.time()\n\n\ndef pose_to_tf(br,pose_translation,pose_quaternion):\n\n t = geometry_msgs.msg.TransformStamped()\n t.header.stamp = rospy.Time.now()\n t.header.frame_id = 'base_link'\n t.child_frame_id = 'ee_link_r'\n t.transform.translation.x = pose_translation[0]\n t.transform.translation.y = pose_translation[1]\n t.transform.translation.z = pose_translation[2]\n t.transform.rotation.w = pose_quaternion[0]\n t.transform.rotation.x = pose_quaternion[1]\n t.transform.rotation.y = pose_quaternion[2]\n t.transform.rotation.z = pose_quaternion[3]\n br.sendTransform(t)\n\ndef main():\n global tool_cesar_cal\n rospy.init_node('base_link_tcp_link_r', anonymous=True)\n br = tf2_ros.TransformBroadcaster()\n rate = rospy.Rate(10)\n\n # starting the robot interface\n y = YuMiRobot(arm_type='remote')\n\n\n #y.right.set_tool(tool_cesar_cal)\n state = y.right.get_state(raw_res=False)\n print('State: {}'.format(state))\n\n counter=0.0\n while (True):\n counter+=1\n pose = y.right.get_pose(raw_res=False)\n print('moving!!!',counter)\n print('translation {}'.format(pose.translation))\n # print('quaternion {}'.format(pose.quaternion))\n # print('rotation matrix \\n{}'.format(pose.rotation))\n print('publishing right TF (robot base -->> ee_link_r)!!!')\n #move(y)\n pose_to_tf(br,pose.translation,pose.quaternion)\n\n\n # we should expect to go through the loop 10 times per second\n rate.sleep()\nif __name__ == '__main__':\n main()\n","sub_path":"src/yumi_cobot/src/tmp/1/publishingTF_right_arm.py","file_name":"publishingTF_right_arm.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"394262524","text":"from django.shortcuts import render, reverse, HttpResponse, get_object_or_404\nfrom .forms import OrderForm, PaymentForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.utils import timezone\n\n#import settings so that we can access the public stripe key\nfrom django.conf import settings\nimport stripe\n\n# function available for logged in users only\n@login_required \ndef charge(request):\n # amount is getting from name of input in charge.html\n amount = request.GET['amount']\n \n if request.method == 'GET':\n order_form = OrderForm()\n payment_form = PaymentForm()\n \n #show form\n return render(request, 'checkout/charge.html', {\n 'publishable' : settings.STRIPE_PUBLISHABLE_KEY,\n 'order_form' : OrderForm,\n 'payment_form' : PaymentForm,\n 'amount': amount\n })\n \n else:\n # set secret key for Stripe API\n stripeToken = request.POST['stripe_id']\n stripe.api_key = settings.STRIPE_SECRET_KEY\n \n order_form = OrderForm(request.POST)\n payment_form = PaymentForm(request.POST)\n \n if order_form.is_valid() and payment_form.is_valid():\n try:\n customer = stripe.Charge.create(\n # stripe only accepts cents hence *100 and integer\n amount=int(float(request.POST['amount'])*100),\n currency='sgd',\n description='Payment',\n card=stripeToken\n )\n \n if customer.paid:\n order = order_form.save(commit=False)\n order.date = timezone.now()\n order.save()\n request.session['shopping_cart'] = {}\n return render(request, 'checkout/checkout_success.template.html')\n \n else:\n messages.error(request, \"Your card has been declined!\")\n \n except stripe.error.CardError:\n messages.error(request, \"Your card was declined!\")\n \n else:\n return render(request, 'checkout/charge.html', {\n 'order_form' : OrderForm,\n 'payment_form' : PaymentForm,\n 'amount': amount,\n 'publishable' : settings.STRIPE_PUBLISHABLE_KEY\n })","sub_path":"checkout/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"125694907","text":"from django.conf.urls import url\n\nfrom . import views\n\n\nurlpatterns = [\n url(r'^$', views.CapturaList.as_view(), name='captura-list'),\n url(r'^pedir-captura/$', views.PedirCaptura.as_view(), name='pedir-captura'),\n url(r'^registrar/$', views.Registrar.as_view(), name='registrar'),\n]\n","sub_path":"moviles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"380794394","text":"\"\"\"\nVanilla VAE model\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom tensorboardX import SummaryWriter\nfrom torchlib.common import FloatTensor\nfrom torchlib.common import enable_cuda\nfrom tqdm import tqdm\n\n\nclass VAE(object):\n def __init__(self, encoder: nn.Module, decoder: nn.Module, code_size: int, optimizer: torch.optim.Optimizer):\n self.encoder = encoder\n self.decoder = decoder\n self.code_size = code_size\n self.optimizer = optimizer\n if enable_cuda:\n self.encoder.cuda()\n self.decoder.cuda()\n\n def _set_to_train(self):\n self.encoder.train()\n self.decoder.train()\n\n def _set_to_eval(self):\n self.encoder.eval()\n self.decoder.eval()\n\n def sample_latent_code(self, batch_size):\n z = torch.randn(batch_size, self.code_size).type(FloatTensor)\n return z\n\n def reparameterize(self, mu, logvar):\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return eps.mul(std).add_(mu)\n\n def encode(self, x):\n mu, logvar = self.encoder.forward(x)\n return mu, logvar\n\n def encode_reparm(self, x):\n mu, logvar = self.encode(x)\n return self.reparameterize(mu, logvar)\n\n def decode(self, z):\n return self.decoder.forward(z)\n\n def reconstruct(self, data):\n self._set_to_eval()\n with torch.no_grad():\n mu, logvar = self.encode(data)\n z = self.reparameterize(mu, logvar)\n return self.decode(z)\n\n def save_checkpoint(self, checkpoint_path):\n print('Saving checkpoint to {}'.format(checkpoint_path))\n state = {\n 'net_g': self.encoder.state_dict(),\n 'net_dec': self.decoder.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n }\n torch.save(state, checkpoint_path)\n\n def load_checkpoint(self, checkpoint_path, all=True):\n print('Loading checkpoint from {}'.format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path)\n self.encoder.load_state_dict(checkpoint['net_g'])\n self.decoder.load_state_dict(checkpoint['net_dec'])\n if all:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n\n\"\"\"\nTrainer for VAE\n\"\"\"\n\n\nclass Trainer(object):\n def __init__(self, recon_loss_f):\n self.recon_loss_f = recon_loss_f\n\n def train(self, num_epoch, train_data_loader, model: VAE, checkpoint_path, epoch_per_save, callbacks,\n summary_writer: SummaryWriter):\n n_iter = 0\n for epoch in range(num_epoch):\n model._set_to_train()\n reconstruction_loss_train = 0.\n kl_divergence_train = 0.\n print('Epoch {}/{}'.format(epoch + 1, num_epoch))\n for input, label in tqdm(train_data_loader):\n model.optimizer.zero_grad()\n input = input.type(FloatTensor)\n mu, logvar = model.encode(input)\n z = model.reparameterize(mu, logvar)\n out = model.decode(z)\n\n reconstruction_loss = self.recon_loss_f(out, input)\n kl_divergence = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n loss = reconstruction_loss + kl_divergence\n loss.backward()\n model.optimizer.step()\n\n reconstruction_loss_train += reconstruction_loss.item()\n kl_divergence_train += kl_divergence.item()\n\n summary_writer.add_scalar('data/reconstruction_loss', reconstruction_loss.item(), n_iter)\n summary_writer.add_scalar('data/kl_divergence', kl_divergence.item(), n_iter)\n\n n_iter += 1\n\n reconstruction_loss_train /= len(train_data_loader.dataset)\n kl_divergence_train /= len(train_data_loader.dataset)\n\n print('Reconstruction loss: {:.4f} - KL divergence: {:.4f}'.format(reconstruction_loss_train,\n kl_divergence_train))\n\n if (epoch + 1) % epoch_per_save == 0:\n model.save_checkpoint(checkpoint_path)\n\n for callback in callbacks:\n callback(epoch, model, summary_writer)\n\n model.save_checkpoint(checkpoint_path)\n","sub_path":"torchlib/generative_model/autoencoder/vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":4281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"248612124","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 27 10:52:54 2018\r\n\r\n@author: backesj\r\n\"\"\"\r\n\r\nimport time\r\n\r\n\r\ndef yes_no(answer):\r\n yes = set(['yes','y'])\r\n no = set(['no','n',''])\r\n \r\n while True:\r\n choice = input(answer).lower()\r\n if choice in yes:\r\n return True\r\n elif choice in no:\r\n return False\r\n else:\r\n print(\"Please respond with (y/n)\")\r\n\r\nanswer = yes_no(' Would you like to run the CATS report?: ')\r\n \r\nif answer != True:\r\n raise SystemExit\r\nelse:\r\n time.sleep(5)\r\n print('program will continue')\r\n ","sub_path":"sysExit test.py","file_name":"sysExit test.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"141492595","text":"import sys\nimport numpy as np\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import SVC\n\n\ndef sarcasm(file_path, max_features, kernel, max_items):\n df = pd.read_csv(file_path)\n if max_items == -1:\n max_items = df.shape[0]\n df = df[0:max_items]\n # print(list(df))\n # print(df.count())\n df.dropna(subset=['comment'], inplace=True)\n # print(df.count())\n train_texts, valid_texts, y_train, y_valid = train_test_split(df['comment'], df['label'], random_state=17)\n features_comment = TfidfVectorizer(ngram_range=(1, 2), max_features=max_features, stop_words='english')\n logit = SVC(kernel=kernel, gamma='scale')\n pipeline = Pipeline([('features_comment', features_comment), ('logit', logit)])\n pipeline.fit(train_texts, y_train)\n print(\"Training Accuracy for SVM with stopwords : \", pipeline.score(train_texts, y_train) * 100)\n prediction = pipeline.predict(valid_texts)\n print(\"Test Accuracy for SVM with stopwords : \", accuracy_score(y_valid, prediction) * 100)\n\n\nif __name__ == '__main__':\n file_path = str(sys.argv[1])\n max_features = int(sys.argv[2])\n kernel = str(sys.argv[3])\n max_items = int(sys.argv[4])\n sarcasm(file_path, max_features, kernel, max_items)\n","sub_path":"SVMSarcasmWithStopwords.py","file_name":"SVMSarcasmWithStopwords.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"253078290","text":"'''\nYou are given a string S of lowercase characters, find the rank of the string amongst its permutations when sorted lexicographically. \nReturn 0 if the characters are repeated in the string.\nNote: Return the rank%1000000007 as the answer as rank might overflow.\n\nInput:\nThe first line of input contains an integer T denoting the number of test cases. \nEach test case consists of a string S in 'lowercase' only in a separate line.\n\nOutput:\nFor each testcase, in a new line, print the rank of the string amongst its lexicographically-sorted-permutations.\n\nYour Task:\nThis is a function problem. You only need to complete the function findRank that takes string S as a parameter and returns the rank.\n\nExpected Time Complexity: O(N).\nExpected Auxiliary Space: O(N).\n\nConstraints:\n1 ≤ T ≤ 50\n1 ≤ |S| ≤ 15\n\nExample:\nInput:\n2\nabc\nacb\nOutput:\n1\n2\n\nExplanation:\nTestcase 1: In 'abc' when we sort all the permutations in lexicographic order 'abc' will be at the first position.\nTestcase2: In 'acb' .The lexicographically-sorted permutations with letters 'a', 'c', and 'b' will be at second position. \n\nhints:\n\nLet the given string be “STRING”. In the input string, ‘S’ is the first character. There are total 6 characters and 4 of them are smaller than ‘S’. So there can be 4 * 5! smaller strings where first character is smaller than ‘S’, like following\n\nR X X X X X\nI X X X X X\nN X X X X X\nG X X X X X\n\nNow let us Fix S’ and find the smaller strings staring with ‘S’.\n\nRepeat the same process for T, rank is 4*5! + 4*4! +…\n\nNow fix T and repeat the same process for R, rank is 4*5! + 4*4! + 3*3! +…\n\nNow fix R and repeat the same process for I, rank is 4*5! + 4*4! + 3*3! + 1*2! +…\n\nNow fix I and repeat the same process for N, rank is 4*5! + 4*4! + 3*3! + 1*2! + 1*1! +…\n\nNow fix N and repeat the same process for G, rank is 4*5! + 4*4! + 3*3! + 1*2! + 1*1! + 0*0!\n\nRank = 4*5! + 4*4! + 3*3! + 1*2! + 1*1! + 0*0! = 597\n\nNote that the above computations find count of smaller strings. Therefore rank of given string is count of smaller strings plus 1. The final rank = 1 + 597 = 598\n\n'''\ndef findRank(s):\n \n Ispresent=[0 for i in range(256)]\n \n for char in s:\n if(Ispresent[ord(char)]):\n #return 0 if any character repeats\n return 0\n else:\n #initialize boolean value 1 for every character\n Ispresent[ord(char)] = 1\n \n #initialize rank to 0 \n rank = 0\n \n for i in range(len(s)):\n #take sum all ord value of chars which is less than current char\n #this is to find total chars which is smaller than current string\n less = sum(Ispresent[:ord(s[i])])\n #less * fact(i)\n rank += (get_fact(len(s) -i -1) * less) % 1000000007\n #initialize char value to 0 again\n Ispresent[ord(s[i])] -= 1\n #return rank + 1\n return rank + 1\n \n\ndef get_fact(n):\n if(n == 0):\n return 0\n else:\n fact = 1\n for i in range(1,n+1):\n fact *= i\n return fact % 1000000007\n\ns = \"ACB\"\nprint(findRank(s))","sub_path":"geeksforgeeks/strings/21_Lexicographic_rank_of_string_difficult.py","file_name":"21_Lexicographic_rank_of_string_difficult.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"297130693","text":"# coding: utf8\n\n\nimport sympy as sp\nfrom sympy import pi, Matrix\nimport pylab as pl\nimport numpy as np\nimport pickle\n\nfrom control_aux import symb_tools as st\nfrom IPython import embed as IPS\n\n\n\"\"\"\nSkript zum Steuerungsentwurf für das Doppelpendel auf Rädern\n\n\"\"\"\n\n\n# Systemgleichungen laden:\npfname = \"data_model_equations.pcl\"\nwith open(pfname, \"rb\") as pfile:\n pdict = pickle.load(pfile)\n print(pfname, \"geladen\")\n\n\n\n# Systemgleichungen und Symbole in den globalen Namensraum einfügen\nglobals().update(pdict)\n\nF = Matrix(sp.symbols('F0:3'))\n\n\n# Keys haben Datentyp str\nnumparams = dict(m0 = 1, m1 = 1, m2 = 1, r = 1, g = 10, l1 = 2, l2 = 0.3,\n s1 = 1, s2 = 1, J0 = 0.1, J1 = 0.1, J2 = 0.1,\n delta0 = .0, delta1 = .1, delta2 = .1)\n\n# keys: Symbole\nparams_symb_keys = dict([(par, numparams.get(par.name)) for par in pdict['params'] ])\n\n# Immutable -> Mutable\nM0 = Matrix(pdict['M0'])\nK0 = Matrix(pdict['K0'])\nBp = Matrix(pdict['Bp'])\nD0 = Matrix(pdict['D0'])\n\n# Laplace-Variable\ns = sp.Symbol('s')\n\nAp = (M0*s**2 + D0*s+K0).subs(numparams)\n\nABp = Ap.row_join(Bp)\n\n# Linksteilerfreihheit überprüfen (Spaltennummerierung beginnt bei 0) #*+\nS1 = set(st.roots(st.col_minor(ABp, 0,1,2))) # Nullstellen des OLCP\nS2 = set(st.roots(st.col_minor(ABp, 2,3,4)))\nassert len(S1.intersection(S2)) == 0 #*-\n# Schnittmenge leer -> Minoren haben keine gemeinsame Nullstelle\n# -> Linksteilerfrei\n\n\n##-> Festlegung der Basisgrößen: #*+\n# Ergänzung der System-Matrix, sodass diese quadratisch und unimodular wird\n\nk1, k2 = sp.symbols('k1, k2')\n\n# Betrachtung von drei Varianten:\n# Ansatz1: xi1 := k1*phi0 + k2*phi1, xi2 := phi2\n# Ansatz2: xi1 := k1*phi0 + k2*phi2, xi2 := phi1\n# Ansatz3: xi1 := -phi0, xi2 := k1*phi1 + k2*phi2\n\n# Randbedingungen am Anfang\nxa = Matrix( [0, 0, 0])\n# RB Ende:\nxb = Matrix( [-4, 0, 0])\n\nvariant = 1\nif variant == 1:\n Z = Matrix([[k1, k2, 0, 0, 0], [0, 0, 1, 0 ,0]])\n # res = {k1: 0.0333333333333333, k2: 0.0474178403755869}\n T_end = 4.25\nelif variant == 2:\n Z = Matrix([[k1, 0, k2, 0, 0], [0, 1, 0, 0 ,0]])\n # res = {k1: -0.100000000000000, k2: -0.0577464788732394}\n T_end = 9.5\nelif variant == 3:\n Z = Matrix([[-1, 0, 0, 0, 0], [0, k1, k2, 0 ,0]])\n # res = {k1: -0.100000000000000, k2: -0.0577464788732394}\n T_end = 17\n xb = Matrix( [2, 0, 0])\nelse:\n raise ValueError(\"Unerwartete Variante\")\n\nM = st.row_stack(ABp, Z) # Hyper-Zeilen zusammenfügen #*-\nM = st.clean_numbers(M).as_mutable()\n\n# Forderung: k1, k2 so wählen, dass det == 1\n# Liste aller Koeff. des Polynoms M.det()-1\ndet = st.trunc_small_values(M.berkowitz_det().expand())\neqns = list(sp.Poly(det-1, s, domain = 'EX').as_dict().values())\neqns = st.clean_numbers(eqns)\n# alle müssen identisch 0 werden\nres = sp.solve(eqns, [k1,k2])\n\nM = M.subs(res)\n\nassert M.det().expand() == 1 # Konsistenzprüfung auf Unimodularität\n\nU_R = M.adjugate() # Hier inv == adjugate (weil det == 1) #*+\nU_12R = U_R[:3, 3:]\nU_22R = U_R[3:, 3:] #*-\n\n\n\n##-> Wunschtrajektorien im Zeitbereich festlegen #*+\n\nZ1 = M[-2:, :3]\n\n# Definition der Basisgrößen Xi aus Systemgrößen X:\n# Xi := Z1 * X\n\nxi_a = Z1 * xa\nxi_b = Z1 * xb\n\n# Übergangspolynome (Trajektorien der Basissignale (Zeitbereich))\nt = sp.Symbol('t')\nxi_polys = []\n##-> Glattheitsanforderung (>=3) ist ein Entwurfsfreiheitsgrad\ncn = 3 # Glattheitsforderung (legt Anzahl der Randbed. fest)\n\nfor i in range(2):\n\n # Randbedingungen:\n left = (0,xi_a[i,0]) + (0,)*cn\n right = (T_end,xi_b[i,0]) + (0,)*cn\n\n poly = st.trans_poly(t, cn, left, right) # Polynome bestimmen\n print(\"xi_{0}(t) = \".format(i), poly.evalf())\n\n # Stückweise definierte Funktion für konstante Teile am Anfang und Ende:\n pw = sp.Piecewise((left[1], t{0}{1}'.format(k, v))\n\n body = body + '
{0}
'.format(''.join(other_params))\n\n self.save_emails(sender_name, sender_email, receiver_email, subject, body, *args, **kwargs)\n j.clients.email.send(receivers, sender, subject, body)\n\n return 'Success'\n\n def save_emails(self, sender_name, sender_email, receiver_email, subject, body, *args, **kwargs):\n system_path = j.portal.tools.server.active.getSpace('system').model.path\n emails_file = os.path.join(system_path, '.space', 'emails.json')\n try:\n emails = j.data.serializer.json.loads(open(emails_file).read())\n except IOError: # File doesn't exist yet\n emails = []\n\n emails.append({\n 'sender_name': sender_name,\n 'sender_email': sender_email,\n 'receiver_email': receiver_email,\n 'subject': subject,\n 'body': body,\n 'args': args,\n 'other_data': kwargs\n })\n\n open(emails_file, 'w').write(j.data.serializer.json.dumps(emails))\n","sub_path":"apps/portalbase/system/system__emailsender/methodclass/system_emailsender.py","file_name":"system_emailsender.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"532344635","text":"import unittest\r\n\r\nfrom pony import orm\r\nfrom pony.orm.core import *\r\nfrom pony.orm.tests.testutils import raises_exception\r\n\r\ndb = Database('sqlite', ':memory:')\r\n\r\nclass Person(db.Entity):\r\n id = orm.PrimaryKey(int, auto=True)\r\n name = orm.Required(str, 40)\r\n lastName = orm.Required(str, max_len=40, unique=True)\r\n age = orm.Optional(int)\r\n groupName = orm.Optional('Group')\r\n chiefOfGroup = orm.Optional('Group')\r\n\r\nclass Group(db.Entity):\r\n name = orm.Required(str)\r\n persons = orm.Set(Person)\r\n chief = orm.Optional(Person, reverse='chiefOfGroup')\r\n\r\ndb.generate_mapping(create_tables=True)\r\n\r\nclass TestEntityInstances(unittest.TestCase):\r\n \r\n def setUp(self):\r\n rollback()\r\n db_session.__enter__()\r\n\r\n def tearDown(self):\r\n rollback()\r\n db_session.__exit__()\r\n \r\n def test_create_instance(self):\r\n with orm.db_session:\r\n Person(id=1, name='Philip', lastName='Croissan')\r\n Person(id=2, name='Philip', lastName='Parlee', age=40)\r\n Person(id=3, name='Philip', lastName='Illinois', age=50)\r\n commit()\r\n \r\n def test_getObjectByPK(self):\r\n self.assertEqual(Person[1].lastName, \"Croissan\")\r\n \r\n @raises_exception(ObjectNotFound , \"Person[666]\")\r\n def test_getObjectByPKexception(self):\r\n p = Person[666]\r\n \r\n def test_getObjectByGet(self):\r\n p = Person.get(age=40)\r\n self.assertEqual(p.lastName, \"Parlee\")\r\n \r\n def test_getObjectByGetNone(self):\r\n self.assertIsNone(Person.get(age=41))\r\n \r\n @raises_exception(MultipleObjectsFoundError , 'Multiple objects were found.'\r\n ' Use Person.select(...) to retrieve them')\r\n def test_getObjectByGetException(self):\r\n p = Person.get(name=\"Philip\")\r\n \r\n def test_updateObject(self):\r\n with db_session:\r\n Person[2].age=42\r\n self.assertEqual(Person[2].age, 42)\r\n commit()\r\n\r\n @raises_exception(ObjectNotFound, 'Person[2]')\r\n def test_deleteObject(self):\r\n with db_session:\r\n Person[2].delete()\r\n p = Person[2]\r\n \r\n def test_bulkDelete(self):\r\n with orm.db_session:\r\n Person(id=4, name='Klaus', lastName='Mem', age=12)\r\n Person(id=5, name='Abraham', lastName='Wrangler', age=13)\r\n Person(id=6, name='Kira', lastName='Phito', age=20)\r\n delete(p for p in Person if p.age <= 20)\r\n self.assertEqual(select(p for p in Person if p.age <= 20).count(), 0)\r\n \r\n def test_bulkDeleteV2(self):\r\n with orm.db_session:\r\n Person(id=4, name='Klaus', lastName='Mem', age=12)\r\n Person(id=5, name='Abraham', lastName='Wrangler', age=13)\r\n Person(id=6, name='Kira', lastName='Phito', age=20)\r\n Person.select(lambda p: p.id >= 4).delete(bulk=True)\r\n self.assertEqual(select(p for p in Person if p.id >= 4).count(), 0)\r\n \r\n @raises_exception(UnresolvableCyclicDependency, 'Cannot save cyclic chain: Person -> Group') \r\n def test_saveChainsException(self):\r\n with orm.db_session:\r\n claire = Person(name='Claire', lastName='Forlani')\r\n annabel = Person(name='Annabel', lastName='Fiji')\r\n Group(name='Aspen', persons=[claire, annabel], chief=claire)\r\n print('group1=', Group[1])\r\n \r\n def test_saveChainsWithFlush(self):\r\n with orm.db_session:\r\n claire = Person(name='Claire', lastName='Forlani')\r\n annabel = Person(name='Annabel', lastName='Fiji')\r\n flush()\r\n Group(name='Aspen', persons=[claire, annabel], chief=claire)\r\n self.assertEqual(Group[1].name, 'Aspen')\r\n self.assertEqual(Group[1].chief.lastName, 'Forlani')","sub_path":"Python 3/Clase 13/flask/env2/Lib/site-packages/pony/orm/tests/test_entity_instances.py","file_name":"test_entity_instances.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"412841265","text":"#! python3\n# -*- coding:utf-8 -*-\n# @Time : 2017/06/11 17:11\n# @Author : Hython.com\n# @File : test.py\n# @IDE : PyCharm\nimport matplotlib.pyplot as plt\n\nfrom random_walk import RandomWalk\n\n# 只要处于活动状态,就不断模拟随机漫步\nwhile True:\n # 创建一个 RandomWalk 实例并将其包含点都绘制出来\n rw = RandomWalk(50000)\n rw.fill_walk()\n\n # 设置绘图窗口尺寸\n plt.figure(dpi=128, figsize=(10, 6))\n\n point_numbers = list(range(rw.num_points))\n plt.scatter(rw.x_values, rw.y_values, c=point_numbers, cmap=plt.cm.Blues, edgecolors='none', s=1)\n\n # 突出起点和终点\n plt.scatter(0, 0, c='green', edgecolors='none', s=40)\n plt.scatter(rw.x_values[-1], rw.y_values[-1], c='red', edgecolors='none', s=40)\n\n # 隐藏坐标轴\n plt.axes().get_xaxis().set_visible(False)\n plt.axes().get_yaxis().set_visible(False)\n\n plt.show()\n\n keep_running = input(\"Make another walk? (y/n): \")\n if keep_running == 'n':\n break","sub_path":"PythonCrashCourse/ch16/matplotlib/rw_visual.py","file_name":"rw_visual.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"250612127","text":"\"\"\"\nMichael S. Emanuel\nSat Oct 22 14:34:53 2016\n\nReciprocal cycles\nProblem 26\nA unit fraction contains 1 in the numerator. The decimal representation of the\nunit fractions with denominators 2 to 10 are given:\n\n1/2\t = \t0.5\n1/3\t = \t0.(3)\n1/4\t = \t0.25\n1/5\t = \t0.2\n1/6\t = \t0.1(6)\n1/7\t = \t0.(142857)\n1/8\t = \t0.125\n1/9 = \t0.(1)\n1/10 = \t0.1\nWhere 0.1(6) means 0.166666..., and has a 1-digit recurring cycle.\nIt can be seen that 1/7 has a 6-digit recurring cycle.\n\nFind the value of d < 1000 for which 1/d contains the longest recurring cycle\nin its decimal fraction part.\n\"\"\"\nfrom Euler.Utility import decLongDivision\nfrom typing import List\n\n\ndef main() -> int:\n # Tabulate list of cycle length of 1/d from 2 to 999\n longestCycle: int = 0\n longestN: int = 0\n # Types\n longestAns: List[int] = []\n n: int\n ans: List[int]\n cycleLength: int\n # Compute cycle length of 1/n in decimal for each n in 2-999\n for n in range(2, 1000):\n (ans, cycleLength) = decLongDivision(1, n)\n if cycleLength > longestCycle:\n longestCycle = cycleLength\n longestN = n\n longestAns = ans\n print(f'Longest cycle length of 1 / n at n = {longestN}')\n print(f'Cycle length is {longestCycle}')\n print('Long division result is:')\n print(''.join(str(d) for d in longestAns))\n return longestN\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n","sub_path":"Prob026_ReciprocalCycles.py","file_name":"Prob026_ReciprocalCycles.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"413619622","text":"import datetime as dt\n#import pyfits\nimport astropy.io.fits as pyfits\n#import matplotlib.pyplot as plt\n#import matplotlib.dates as mdates\n#from matplotlib import cm\n#import os\n#import numpy as np\n#from matplotlib.dates import DateFormatter\n#import sys\n\n\n\ndef checkFitsCallisto(fitsfile):\n\t\"\"\"Check whether fits file has two HDUs or not\n\t\"\"\"\n\thdus = pyfits.open(fitsfile)\n\tif len(hdus) == 2:\n\t\thdus.close()\n\t\t#del hdus\n\t\treturn True\n\telse:\n\t\thdus.close()\n\t\t#del hdus\n\t\treturn False\n\ndef checkBInTable(fitsfile):\n\t\"\"\"Check whether fits file bintable is valid\n\t\"\"\"\n\thdus = pyfits.open(fitsfile)\n\tbintablehdu = hdus[1]\n\n\ttry:\n\t\tbintablehdu.data\n\t\thdus.close()\n\t\t#del hdus\n\t\treturn True\n\texcept:\n\t\thdus.close()\n\t\t#del hdus\n\t\treturn False\n\n\ndef tosec(td):\n\t\"\"\"Calculate the total seconds in a timedate.timedate object\n\t\"\"\"\n\treturn (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6\n\n\ndef toDate(string):\n\t\"\"\"Break the string with \"/\" separator\n\treturn the equivalent datetime.date object\n\t\"\"\"\n\tyr, mnth, day = string.split('/')\n\tyr, mnth, day = int(yr), int(mnth), int(day) \n\treturn dt.date(yr, mnth, day)\n\n\ndef toTime(string):\n\t\"\"\"\n\tbreak the string with \"/\" separator\n\treturn the datetime.time object\n\t\"\"\"\n\thr, mn, sec = string.split(':')\n\tsec = sec.split('.')[0] # if sec has a fractional value\n\thr, mn, sec = int(hr), int(mn), int(sec)\n\t#if second's value is 60, replace it to 59\n\t#some old data from some observatories has this \"leap second\" problem\n\tif sec == 60:\n\t\tsec = 59\n\treturn dt.time(hr, mn, sec)\n\n\ndef visualise(plt_object, show =True, outpath= 'tmp.png'):\n\t\"\"\"\n\tinput \n\t\t1) matplotlib.pyplot object (plt)\n\t\t2) outpath : path of the image to be stored\n\t\t3) show (keyword) = if True, just show, if False save instead plotting\n\t\n\treturns\n\t\treturns nothing , saves or shows plot\n\t\"\"\"\n\t\n\tif show:\n\t\tplt_object.show()\n\telse:\n\t\tplt_object.savefig(outpath)\n\n","sub_path":"src/pyCallistoUtils.py","file_name":"pyCallistoUtils.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"399497202","text":"import numpy as np\nimport cv2\nfrom chainercv.datasets import coco_bbox_label_names, voc_bbox_label_names\n\n\nclass Visualizer(object):\n \"\"\"Visualizer for object detection\n\n \"\"\"\n\n def __init__(self, dataset_type, thickness=2, input_type='rgb'):\n if dataset_type == 'COCO':\n self._label_names = coco_bbox_label_names\n elif dataset_type == 'VOC':\n self._label_names = voc_bbox_label_names\n else:\n raise ValueError(\n 'Not support visualization for dataset `{}`'.format(\n dataset_type))\n\n self._thickness = thickness\n assert input_type in ['rgb', 'bgr']\n self._input_type = input_type\n\n def visualize(self, img, outputs):\n if img.shape[0] == 3:\n img = np.transpose(img, (1, 2, 0))\n if img.dtype != np.uint8:\n img = img.astype(np.uint8)\n if self._input_type == 'rgb':\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\n if len(outputs) == 3:\n bboxes, labels, scores = outputs\n elif len(outputs) == 2:\n bboxes, labels = outputs\n scores = [[None for _ in range(bboxes[0].shape[0])]]\n else:\n raise ValueError()\n bboxes = bboxes[0].astype(np.int)\n labels = labels[0]\n scores = scores[0]\n\n for bbox, label, score in zip(bboxes, labels, scores):\n self._add_bbox(img, bbox)\n self._add_text(img, bbox, label, score)\n return img\n\n def _add_bbox(self, img, bbox):\n pt1 = (bbox[1], bbox[0])\n pt2 = (bbox[3], bbox[2])\n cv2.rectangle(img, pt1, pt2, (50, 50, 250), self._thickness)\n\n def _add_text(self, img, bbox, label, score):\n if score is None:\n score = ''\n else:\n score = ': {:.2f}'.format(score)\n cat = self._label_names[label] + score\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n cat_size = cv2.getTextSize(cat, font, 0.5, 2)[0]\n cv2.rectangle(img, (bbox[1], bbox[0] - cat_size[1] - 2),\n (bbox[1] + cat_size[0], bbox[0] - 2), (50, 50, 250), -1)\n cv2.putText(img, cat, (bbox[1], bbox[0] - 2),\n font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA)\n","sub_path":"utils/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"605925178","text":"from difflib import SequenceMatcher\nfrom cleanUP import *\n\ndef plagerised_ratio(filename1, filename2):\n tokens1 = tokenize(filename1)\n file1=toText(tokens1)\n tokens2 = tokenize(filename2)\n file2=toText(tokens2)\n SM = SequenceMatcher(None,file1,file2)\n similarity_ratio = SM.ratio()\n print(similarity_ratio) # ratio of plagiarised content\n blocks=list(SM.get_matching_blocks())\n blocks=blocks[:-1]\n f1=open(filename1, \"r\")\n for i in blocks:\n flag = 0\n for j in range(len(tokens1)):\n if tokens1[j][2] == i[0]:\n start = tokens1[j][1]\n flag = 1\n if tokens1[j][2] == (i[0] + i[2] - 1):\n end = tokens1[j][1]\n break\n if not flag == 0 and (end - start) > 100:\n f1.seek(start, 0)\n print(f1.read(end - start))\n\nplagerised_ratio(\"test2.py\", \"test3.py\")\n","sub_path":"seqMatcher.py","file_name":"seqMatcher.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"117607373","text":"from matplotlib import colors as mcolors\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.cluster import DBSCAN\n\nfrom scipy.spatial import Voronoi, voronoi_plot_2d\n\nnp.random.seed(0)\navgPoints = 250\n\ncolors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)\ncolors.pop('w')\ncolors.pop('k')\nprint(colors)\n\nC1 = [-5, -2] + .8 * np.random.randn(avgPoints*2, 2)\nC4 = [-2, 3] + .3 * np.random.randn(avgPoints//5, 2)\nC3 = [1, -2] + .2 * np.random.randn(avgPoints*5, 2)\nC5 = [3, -2] + 1.6 * np.random.randn(avgPoints, 2)\nC2 = [4, -1] + .1 * np.random.randn(avgPoints//2, 2)\nC6 = [5, 6] + 2 * np.random.randn(avgPoints, 2)\nX = np.vstack((C1, C2, C3, C4, C5, C6))\n\n\n\nfig, ((plt1, plt2, plt3), (plt4,plt5, plt6)) = plt.subplots(2, 3, figsize=(15, 9))\n\npoints = np.array([[-5, -2], [-2, 3] , [1, -2], [3, -2], [4, -1], [5, 6]])\nvor = Voronoi(points)\nvoronoi_plot_2d(vor)\n\nplt1.set_title('Original data')\nplt1.plot(C1[:, 0], C1[:, 1], 'b.', alpha=0.3)\nplt1.plot(C2[:, 0], C2[:, 1], 'r.', alpha=0.3)\nplt1.plot(C3[:, 0], C3[:, 1], 'g.', alpha=0.3)\nplt1.plot(C4[:, 0], C4[:, 1], 'c.', alpha=0.3)\nplt1.plot(C5[:, 0], C5[:, 1], 'm.', alpha=0.3)\nplt1.plot(C6[:, 0], C6[:, 1], 'y.', alpha=0.3)\n\n\n\n\nNN = NearestNeighbors(n_neighbors=np.log(X.size).astype(int)).fit(X)\ndistances, indices = NN.kneighbors(X)\n\n\nplt2.set_ylim(0, 1.25)\nplt2.set_title('eps elbow')\nplt2.plot(np.sort(distances[:, distances.shape[1]-1]), color='red', label = 'Elbow')\nplt2.legend()\n\n\n# DBSCAN at 0.25\neps = 0.25\nmin_samples=np.log(len(X))\ndb = DBSCAN(eps=eps, min_samples=min_samples).fit(X)\nlabels = db.labels_\n\nn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n\nprint(eps)\nprint('Estimated number of clusters for eps: %d' % n_clusters_)\nunique, counts = np.unique(labels, return_counts=True)\nprint(dict(zip(unique, counts)))\n\nplt3.set_title('DBSCAN at '+str(eps))\nplt3.scatter(X[:, 0], X[:, 1], marker='o', c=labels, s=25, edgecolor='k')\n\n# DBSCAN at 0.5\neps = 0.5\nmin_samples=np.log(len(X))\ndb = DBSCAN(eps=eps, min_samples=min_samples).fit(X)\nlabels = db.labels_\n\nn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n\nprint(eps)\nprint('Estimated number of clusters for eps: %d' % n_clusters_)\nunique, counts = np.unique(labels, return_counts=True)\nprint(dict(zip(unique, counts)))\n\nplt4.set_title('DBSCAN at '+str(eps))\nplt4.scatter(X[:, 0], X[:, 1], marker='o', c=labels, s=25, edgecolor='k')\n\n# DBSCAN at 0.75\neps = 0.8\nmin_samples=np.log(len(X))\ndb = DBSCAN(eps=eps, min_samples=min_samples).fit(X)\nlabels = db.labels_\n\nn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n\nprint(eps)\nprint('Estimated number of clusters for eps: %d' % n_clusters_)\nunique, counts = np.unique(labels, return_counts=True)\nprint(dict(zip(unique, counts)))\n\nplt5.set_title('DBSCAN at '+str(eps))\nplt5.scatter(X[:, 0], X[:, 1], marker='o', c=labels, s=25, edgecolor='k')\n\n# DBSCAN at 1\neps = 1\nmin_samples=np.log(len(X))\ndb = DBSCAN(eps=eps, min_samples=min_samples).fit(X)\nlabels = db.labels_\n\nn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n\nprint(eps)\nprint('Estimated number of clusters for eps: %d' % n_clusters_)\nunique, counts = np.unique(labels, return_counts=True)\nprint(dict(zip(unique, counts)))\n\nplt6.set_title('DBSCAN at '+str(eps))\nplt6.scatter(X[:, 0], X[:, 1], marker='o', c=labels, s=25, edgecolor='k')\n\nfrom sklearn.preprocessing import scale\nXs = scale(X)\n#plt8.set_title('Scaled')\n#plt8.scatter(Xs[:, 0], Xs[:, 1], marker='o', s=25, edgecolor='k')\n\neps = 0.35\nmin_samples=np.log(len(Xs))\ndb = DBSCAN(eps=eps, min_samples=min_samples).fit(Xs)\nlabels = db.labels_\n\nn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n\nprint(eps)\nprint('Estimated number of clusters for eps: %d' % n_clusters_)\nunique, counts = np.unique(labels, return_counts=True)\nprint(dict(zip(unique, counts)))\n\n#plt9.set_title('DBSCAN at '+str(eps))\n#plt9.scatter(Xs[:, 0], Xs[:, 1], marker='o', c=labels, s=25, edgecolor='k')\n\nplt.tight_layout()\nplt.show()\n\n","sub_path":"Ardelean Eugen-Richard/Meeting6/DBSCAN.py","file_name":"DBSCAN.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"555225633","text":"class Solution:\n def maxProfit(self, prices: 'List[int]') -> 'int':\n #brute force \n N=len(prices)\n if not prices:return 0\n rest,hold,sold=[0]*N,[0]*N,[0]*N\n hold[0]=-prices[0]\n for i in range(1,N):\n rest[i]=max(rest[i-1],sold[i-1])\n sold[i]=hold[i-1]+prices[i]\n hold[i]=max(hold[i-1],rest[i-1]-prices[i])\n \n return max(sold[-1],rest[-1])\n\n'''\n rest(not buy)\n \\\nbuy(-price) / rest\\\n / \\\n hold --- sold\n / sell(+price)\n\n hold(not sell)\n\n https://www.youtube.com/watch?v=oL6mRyTn56M\n'''\n\n\n'''\nbecause we only care about answer right before i , space can be O(1)\n\nclass Solution:\n def maxProfit(self, prices: 'List[int]') -> 'int':\n \n N=len(prices)\n if not prices:return 0\n rest,hold,sold=0,-prices[0],0\n \n for i in range(1,N):\n temp=rest\n rest=max(rest,sold)\n sold=hold+prices[i]\n hold=max(hold,temp-prices[i])\n \n return max(sold,rest)\n\nO(N) O(1)\n'''\n","sub_path":"BestTimeBuySellStockCoolDown.py","file_name":"BestTimeBuySellStockCoolDown.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"323628793","text":"from utils import *\nfrom layers import *\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom sklearn.utils import shuffle\n\nclass CNN(object):\n def __init__(self, name, architecture_params, learning_params={'lr':1e-3, 'mu':0.99, 'reg':1e-3, 'decay':0.99999, 'eps':1e-10}):\n #architecture_params: [C,C,P,C,P,FC,FC,T]\n #learning_params: [learning_rate, momentum, regularizer, decay_rate, epsilon]\n self.name = \"CNN_\" + name #+ \"_\" + uniqueStr();\n self.architecture_params = architecture_params\n self.lr = np.float32(learning_params['lr'])\n self.mu = np.float32(learning_params['mu'])\n self.reg = np.float32(learning_params['reg'])\n self.decay = np.float32(learning_params['decay'])\n self.eps = np.float32(learning_params['eps'])\n self.layers = []\n self.K = 2\n\n def build(self, initial_shape, debug):\n N, width, height, ch = initial_shape\n mi = ch\n outw = width\n outh = height\n i = 0\n cp_first = True\n fc_first = True\n for layer_param in self.architecture_params:\n layer = None\n layer_type = layer_param['type']\n layer_param['depth'] = i\n if layer_type == 'C':\n outw =math.ceil(outw/layer_param['stride'][0])\n outh =math.ceil(outh/layer_param['stride'][0])\n layer_param['num_input'] = mi\n layer = ConvLayer(layer_param)\n cp_first = False\n mi = layer_param['num_output']\n elif layer_type == 'P':\n outw =math.ceil(outw/layer_param['stride'][0])\n outh =math.ceil(outh/layer_param['stride'][0])\n layer = PoolLayer(layer_param)\n elif layer_type == 'DO':\n layer = DOLayer(layer_param)\n elif layer_type == 'FC':\n if fc_first:\n # size must be flattened output of last convpool layer\n mi *= outw * outh\n fc_first = False\n layer_param['num_input'] = mi\n layer = FCLayer(layer_param)\n mi = layer_param['num_output']\n elif layer_type == 'T':\n layer_param['num_input'] = mi\n layer_param['num_output'] = self.K\n layer = TerminalLayer(layer_param)\n self.layers.append(layer)\n i+=1;\n\n if debug:\n self.printArchitecture()\n print(\"built CNN\")\n \n def printArchitecture(self):\n for layer in self.layers:\n layer.printArchitecture()\n \n def forward(self, X):\n in_ = X\n out_ = None\n fc_first = True\n for layer in self.layers:\n layer_type = layer.layer_type\n if (layer_type == 'FC') and (fc_first):\n #flatten before dense layer\n in_shape = in_.get_shape().as_list()\n in_ = tf.reshape(in_, [-1, np.prod(in_shape[1:])])\n fc_first = False\n out_ = layer.forward(in_)\n in_ = out_\n return out_\n\n def computeLoss(self, X,y):\n non_terminal_layers = self.layers[:-1]\n terminal_layer = self.layers[-1]\n params = [terminal_layer.W, terminal_layer.b]\n for layer in non_terminal_layers:\n if (layer.layer_type == 'C') or (layer.layer_type == 'FC'):\n params += [layer.W, layer.b]\n\n regularized_loss = self.reg*sum([tf.nn.l2_loss(p) for p in params])\n\n y_pred = self.forward(X)\n cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_pred,labels=y))\n loss = cross_entropy_loss + regularized_loss \n return loss\n\n def fit(self, X, Y, batch_sz=500, epochs=1, method=\"adam\",debug=False):\n self.K = len(set(Y))\n \n # make a validation set\n X, Y = shuffle(X, Y)\n X = X.astype(np.float32)\n Y = oneHotEncoder(Y).astype(np.float32)\n \n Xvalid, Yvalid = X[-1000:], Y[-1000:]\n X, Y = X[:-1000], Y[:-1000]\n Yvalid_flat = np.argmax(Yvalid, axis=1) # for calculating error rate\n \n # initialize the architecture\n self.params = self.build(X.shape, debug)\n N, width, height,ch = X.shape\n \n # set up tensorflow functions and variables\n tfX = tf.placeholder(tf.float32, shape=(None, width, height, ch), name='X')\n tfY = tf.placeholder(tf.float32, shape=(None, self.K), name='Y')\n \n loss = self.computeLoss(tfX, tfY)\n prediction = self.predict(tfX)\n \n if (method==\"rms\"):\n train_op = tf.train.RMSPropOptimizer(self.lr, decay=self.decay, momentum=self.mu).minimize(loss)\n elif (method==\"momentum\"):\n train_op = tf.train.MomentumOptimizer(self.lr, momentum=self.mu).minimize(loss)\n elif (method==\"adam\"):\n train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)\n elif (method==\"gd\"):\n train_op = tf.train.GradientDescentOptimizer(self.lr).minimize(loss)\n else:\n print(\"optimizer unkown\")\n n_batches = N // batch_sz\n losses = []\n train_errors = []\n\n model_saver = tf.train.Saver()\n init = tf.global_variables_initializer()\n with tf.Session() as session:\n session.run(init)\n for i in range(epochs):\n X, Y = shuffle(X, Y)\n for j in range(n_batches):\n Xbatch = X[j*batch_sz:(j*batch_sz+batch_sz)]\n Ybatch = Y[j*batch_sz:(j*batch_sz+batch_sz)]\n \n session.run(train_op, feed_dict={tfX: Xbatch, tfY: Ybatch})\n\n #if j == (n_batches-1):\n if j == 20:\n l = session.run(loss, feed_dict={tfX: Xvalid, tfY: Yvalid})\n losses.append(l)\n\n p = session.run(prediction, feed_dict={tfX: Xvalid})\n print(set(p), len(set(p)))\n \n e = error_rate(Yvalid_flat, p)\n train_errors.append(e)\n if debug:\n print(\"i:\", i, \"j:\", j, \"nb:\", \"loss:\", l, \"error rate:\", e)\n model_saver.save(session, \"models/\"+self.name)\n if debug:\n fig, axes = plt.subplots(nrows=1,ncols=2) \n axes[0].plot(losses)\n axes[1].plot(train_errors)\n plt.tight_layout()\n #plt.show()\n plt.savefig(\"imgs/\"+self.name+\".png\")\n print ('\\n training error_rate: ', train_errors[-1])\n\n def predict(self, X):\n if (str(type(X)) == \"\"):\n uninit = False\n #running previously fit model\n if len(self.layers) == 0:\n uninit = True\n print(\"running previously fit model\")\n input()\n self.params = self.build(X.shape, False)\n \n with tf.Session() as session:\n if uninit:\n session.run(tf.global_variables_initializer())\n uninit = False\n load_model = tf.train.import_meta_graph(\"models/\"+self.name+'.meta')\n load_model.restore(session, tf.train.latest_checkpoint('./models/'))\n \n N, width, height,c = X.shape\n tfX = tf.placeholder(tf.float32, shape=(None, width, height, c), name='X')\n #Y = tf.placeholder(tf.float32, shape=(None, self.K), name='Y')\n pY = self.forward(tfX)\n prediction = tf.argmax(pY, 1)\n p = session.run(prediction, feed_dict={tfX: X})\n print(set(p), len(set(p)))\n return p\n else:\n pY = self.forward(X)\n return tf.argmax(pY, 1)\n \n def score(self, X,y):\n pred = self.predict(X)\n return np.mean(y == pred)\n \n def get_params(self,deep=True):\n #hyper_params: [learning_rate, momentum, regularizer, decay_rate, epsilon]\n return {'lr':self.lr,'mu':self.mu,'reg':self.reg,'decay': self.decay,'eps':self.eps}\n #return {'architecture_params':{'pool_layer_sizes':self.pool_layer_sizes,'hidden_layer_sizes':self.hidden_layer_sizes,'dropout_rates':self.dropout_rates},'hyper_params':{'lr':self.lr,'mu':self.mu,'reg':self.reg,'decay': self.decay,'eps':self.eps}}\n \n def set_params(self,**parameters):\n for parameter, value in parameters.items():\n setattr(self, parameter, value)\n return self\n\t\n\n","sub_path":"convolutional-neural-nets/cnn-advanced-architecture/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":8719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"567244304","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport matplotlib.colorbar as colorbar\n\n# Load detections\ndetected_centralized_closures = np.load('../results/cen_scores.npy')\ndetected_decentralized_closures = np.load('../results/dec_scores_10.npy')\ngt_scores = np.load('../results/groundtruth_scores.npy')\n\ndetected_centralized_best_idx = np.argmax(\n detected_centralized_closures,axis=1)\n\n\ndetected_decentralized_best_idx = np.argmax(\n detected_decentralized_closures, axis=1)\n\nx_cen = np.arange(len(detected_centralized_best_idx))\nx_decen = np.arange(len(detected_decentralized_best_idx))\n\ncolors_cen = cm.jet(gt_scores[x_cen, detected_centralized_best_idx])\ncolors_decen = cm.jet(gt_scores[x_decen, detected_decentralized_best_idx])\n\n\n# Filter out scores which are too low\nscore_lim = 0.25\nmax_val_cen = detected_centralized_closures[np.arange(\n len(detected_centralized_best_idx)), detected_centralized_best_idx]\nmax_val_decen = detected_decentralized_closures[np.arange(\n len(detected_decentralized_best_idx)), detected_decentralized_best_idx]\n\nrejected_arg_cen = np.argwhere(max_val_cen < score_lim)\nrejected_arg_decen = np.argwhere(max_val_decen < score_lim)\n\n# Put them at values which won't show in the plot\nx_cen[rejected_arg_cen] = -10\nx_decen[rejected_arg_decen] = -10\ndetected_centralized_best_idx[rejected_arg_cen] = -10\ndetected_decentralized_best_idx[rejected_arg_decen] = -10\n\n\nplt.rc('font', family='serif')\nplt.rc('xtick', labelsize='x-small')\nplt.rc('ytick', labelsize='x-small')\n\n\nfig = plt.figure(figsize=(6, 3))\nax0 = fig.add_subplot(1, 2, 1)\nax1 = fig.add_subplot(1, 2, 2)\n\nax0.set_title('Centralized')\nax0.scatter(x_cen, detected_centralized_best_idx, s=2.0, c=colors_cen)\ncax = fig.add_axes([0.92, 0.1,0.008, 0.8])\ncax.set_label('test')\ncb = colorbar.ColorbarBase(cax, cmap=cm.jet,label='Ground-truth score', orientation='vertical')\ncb.ax.invert_yaxis()\n\nax1.set_title('Decentralized')\nax1.scatter(x_decen, detected_decentralized_best_idx, s=1.5, c=colors_decen)\nax0.set_xlabel('Index of the queried frame')\nax0.set_ylabel('Index of the matched frame')\nax1.set_xlabel('Index of the queried frame')\nplt.gca().invert_yaxis()\n\nplt.show()\n","sub_path":"plot_scripts/plot_confusion_matrix.py","file_name":"plot_confusion_matrix.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"403991882","text":"from pwn import *\nimport time\nimport sys\nimport signal\nimport sf\n\ntarget = process(\"./Correction-FsGotShellcode-x86\")\ngdb.attach(target, execute=\"verify_exploit\")\n\nbof_payload = sf.BufferOverflow(arch=32)\n\ntarget.recvuntil(\"Tell me I was never good enough: \")\nleak = int(target.recvline().strip(b\"\\n\"), 16)\nret_address = leak + (92)\nfs = sf.WriteFmtStr(\n\t\tarch = 32,\n\t\tvalue = -0x36,\n\t\taddress = 0x804b2d8,\n\t\toffset = 0x4,\n\t\tprinted_bytes = 0x0,\n\t\talignment_bytes = 0x0,\n\t\tvalue_base = ret_address,\n\t\taddress_base = 0)\n\npayload = sf.BufferOverflow(arch=32, start=92)\npayload.add_bytes(92, fs.generate_fmt_str())\npayload.add_bytes(54, b\"\\x6a\\x46\\x58\\x31\\xdb\\x31\\xc9\\xcd\\x80\\x31\\xd2\\x6a\\x0b\\x58\\x52\\x68\\x2f\\x2f\\x73\\x68\\x68\\x2f\\x62\\x69\\x6e\\x89\\xe3\\x52\\x53\\x89\\xe1\\xcd\\x80\")\ntarget.sendline(payload.generate_payload())\n\n# Exploit Verification starts here 15935728\n\ndef handler(signum, frame):\n\traise Exception(\"Timed out\")\n\nsignal.signal(signal.SIGALRM, handler)\nsignal.alarm(2)\n\ntry:\n\twhile True:\n\t\ttarget.recvall(timeout=2)\nexcept Exception:\n\tprint(\"Exploit timed out\")\n","sub_path":"Correction-unit-tests/07_32/remenissions-work/exploit-FsGotShellcode-0.py","file_name":"exploit-FsGotShellcode-0.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"231056959","text":"\nimport fire\nimport backtrace\n\nbacktrace.hook(\n reverse=False,\n align=True,\n strip_path=False,\n enable_on_envvar_only=False,\n on_tty=False,\n conservative=False,\n styles={})\n\nimport subprocess\n\nclass Machine():\n \"\"\" class description \"\"\"\n prefix = \"prefix\"\n\n\n def __init__(self,id=1,name=\"ipm_machine\",files_to_load=\"\"):\n self.name = self.prefix + \"_\" + name + \"_\" + str(id)\n self.id = id\n self.ip = id + 1\n self.files_to_load = files_to_load\n \n def __del__(self):\n y = 2\n #TODO docker rm\n\n def create(self, demo=False):\n # ~1270ms\n self.check()\n # String -> conatiner_created\n # TODO network\n # TODO boostrap node\n # TODO fix IP\n if demo:\n self.bash(f\"\\\n docker create \\\n --tty \\\n --cap-add=NET_ADMIN \\\n --name {self.name} \\\n simcoin2:v0.0.3 \\\n \")\n else:\n self.bash(f\"\\\n docker create \\\n --tty \\\n --cap-add=NET_ADMIN \\\n --net=simcoin-network \\\n --cap-add=NET_ADMIN \\\n --name {self.name} \\\n simcoin2:v0.0.3 \\\n \")\n\n self.load(self.files_to_load)\n return self\n\n def load(self, files):\n if files:\n files_str = files.split(\" \")\n for ffile in files_str:\n self.bash(f\"docker cp {ffile} {self.name}:/ \")\n\n\n def start(self):\n # ~780ms\n return self.bash(f\"docker start {self.name} \")\n\n def stop(self):\n # ~730ms\n self.bash(f\"docker stop {self.name} \")\n return self\n\n def rm(self):\n # ~390ms\n return self.bash(f\"docker rm {self.name}\")\n\n def nop(self):\n return NotImplemented\n\n def run_process(self,file_to_call):\n self.exec(f\"{file_to_call}\")\n return \n\n def exec(self,file=\"ls\"):\n # only on started conatiners\n # ~350ms\n self.p = subprocess.Popen( f\"docker exec -it {self.name} {file} \".split())\n return self.p\n \n def wait(self):\n self.p.wait()\n\n def check(self):\n container_id = self.bash(f\"docker ps -q -a -f name={self.name}\")\n if \"\" != container_id:\n raise Exception(\"Docker instance already exists, cleanup first\")\n else:\n return True\n\n def get_output_tape(self):\n return self.bash(f\"docker exec -it {self.name} cat output.tape\")\n\n @classmethod\n def clean(cls):\n containers = cls.bash(\"docker ps -q -a --format {{.Names}}\")\n for name in containers.splitlines():\n if (name.startswith(cls.prefix)):\n cls.bash(f\"docker rm -f {name}\")\n return containers\n\n @classmethod\n def bash(cls,cmd):\n output = subprocess.check_output(\n cmd, \n shell=True, \n executable='/bin/bash',\n stderr = subprocess.STDOUT\n )\n encoded_output = output.decode('utf-8').rstrip()\n return encoded_output\n \n @classmethod\n def image_build(cls):\n # ddocker create some base image container\n # exec installs\n cls.bash(f\"docker create --tty --name simcoin2_build_container ubuntu:xenial-20170119 \")\n cls.bash(f\"docker start simcoin2_build_container \")\n\n cls.bash(f\"docker exec -it simcoin2_build_container apt-get update \")\n\n cls.bash(f\"docker exec -it simcoin2_build_container apt-get -y install software-properties-common \")\n cls.bash(f\"docker exec -it simcoin2_build_container add-apt-repository -y ppa:bitcoin/bitcoin \")\n cls.bash(f\"docker exec -it simcoin2_build_container add-apt-repository -y ppa:deadsnakes/ppa \") # python3.6\n\n cls.bash(f\"docker exec -it simcoin2_build_container apt-get update \")\n\n\n cls.bash(\n \"\"\" \n docker exec -it simcoin2_build_container \\\n apt-get -y install \\\n build-essential \\\n libtool \\\n autotools-dev \\\n automake \\\n pkg-config \\\n libssl-dev \\\n libevent-dev\\\n bsdmainutils \\\n git \\\n \\\n libboost-system-dev \\\n libboost-filesystem-dev \\\n libboost-chrono-dev \\\n libboost-program-options-dev \\\n libboost-test-dev \\\n libboost-thread-dev \\\n \\\n libdb4.8-dev \\\n libdb4.8++-dev \\\n \\\n libssl-dev \\\n\n \"\"\"\n ## notest for the last two blocks:\n # from the ppa\n # for python-bitcoinli\n\n )\n\n cls.bash(f\"docker exec -it simcoin2_build_container git clone --branch 0.17 https://github.com/bitcoin/bitcoin.git /bitcoin\")\n cls.bash(f\"docker exec -it simcoin2_build_container sh -c ' cd /bitcoin; ./autogen.sh' \")\n cls.bash(f\"docker exec -it simcoin2_build_container sh -c ' cd /bitcoin; ./configure' \")\n cls.bash(f\"docker exec -it simcoin2_build_container sh -c ' cd /bitcoin; make -j4 ' \")\n cls.bash(f\"docker exec -it simcoin2_build_container sh -c ' cd /bitcoin; make install ' \") \n ### this install `bitcoin-cli` `/usr/local/bin/bitcoin-cli`\n\n cls.bash(f\"docker exec -it simcoin2_build_container apt-get -y install python3.6 python3-pip\")\n cls.bash(f\"docker exec -it simcoin2_build_container python3.6 -mpip install --upgrade pip\")\n cls.bash(f\"docker exec -it simcoin2_build_container pip3.6 install fire backtrace bitcoin\")\n\n cls.bash(f\"docker stop simcoin2_build_container \")\n cls.bash(f\"docker commit simcoin2_build_container simcoin2:v0.0.1 \")\n cls.bash(f\"docker rm simcoin2_build_container \")\n\n\n ## add missing python bitcoin rpc lib\n cls.bash(f\"docker create --tty --name simcoin2_build_container simcoin2:v0.0.1 \")\n cls.bash(f\"docker start simcoin2_build_container \")\n\n cls.bash(f\"docker exec -it simcoin2_build_container pip3.6 install python-bitcoinrpc python-bitcoinlib\")\n\n cls.bash(f\"docker stop simcoin2_build_container \")\n cls.bash(f\"docker commit simcoin2_build_container simcoin2:v0.0.2\")\n cls.bash(f\"docker rm simcoin2_build_container \")\n\n ## add iproute2 to enable tc (traffic control)\n\n cls.bash(f\"\"\"\n docker create --tty --name simcoin2_build_container simcoin2:v0.0.2\n docker start simcoin2_build_container\n\n docker exec -it simcoin2_build_container apt install iproute2\n\n docker stop simcoin2_build_container\n docker commit simcoin2_build_container simcoin2:v0.0.3\n docker rm simcoin2_build_container\n \"\"\")\n\n return True\n \n @classmethod\n def image_rm(cls):\n return cls.bash(\"docker rmi simcoin\")\n\n @classmethod\n def info(cls):\n msg = []\n msg.extend([\n \"#containers\"\n , cls.bash(f\"docker ps --quiet --all --filter name={cls.prefix} --format \" + \"\\\"({{.ID}}) {{.Names}}: {{.Status}} {{.Command}}\\\"\")\n ])\n msg.append(\"#images\")\n msg.extend( cls.bash(f\"docker images simcoin2 --quiet --all --format \" + \"\\\"({{.ID}}) {{.Repository}}: {{.Tag}}\\\"\").split('\\n'))\n return msg\n\n def debug_start(self): \n self.create(demo=True)\n self.load(\"p.py\")\n self.start()\n\n def debug_stop(self):\n self.stop()\n self.rm()\n\n def test(self):\n self.debug_start()\n self.debug_stop()\n return True\n\n\nif __name__ == '__main__':\n fire.Fire(Machine)\n","sub_path":"code/ipm/m.py","file_name":"m.py","file_ext":"py","file_size_in_byte":7884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"445431414","text":"# -*- coding: utf-8 -*-\n# @Time : 5/24/2018\n# @Author : CarrieChen\n# @File : get_you_need_label_img.py\n# @Software: ZJ_AI\n# this code is for read some labels from excel and find according imgs and put the imgs into a word.\n\nimport xlrd\nimport docx\nfrom PIL import Image\nimport os\nimport xlwt\nimport io_utils\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\n\n#some paths\nparent_path=\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\"\nexcel_path=parent_path+\"\\\\本批商品列表.xls\"\nimg_path=parent_path+\"\\\\pic+lab166\"\nrefer_166classes=parent_path+\"\\\\166_classes_list.xls\"\nthis_batch_imgs_path=parent_path+\"\\\\本批商品图例\"\n\n\ndef get_labels(input_path):\n data=xlrd.open_workbook(input_path)\n table=data.sheets()[0]\n labels=table.col_values(0)\n return labels\n\ndef get_chinese(input_path,pointlabel): #excel\n data=xlrd.open_workbook(input_path)\n table=data.sheets()[0]\n labels=table.col_values(4)\n if pointlabel in labels: #else\n row=labels.index(pointlabel)\n product=table.cell(row,0).value\n taste = table.cell(row, 1).value\n weight=table.cell(row,2).value\n package=table.cell(row,3).value\n return product,taste,weight,package\n\n\ndef find_imgs_and_write_word(labels,parent_path):\n file=docx.Document()\n for i in range(len(labels)):\n img=img_path+\"\\\\\"+labels[i]+\".jpg\"\n product,taste,weight,package=get_chinese(refer_166classes,labels[i])\n file.add_picture(img)\n file.add_paragraph(product+taste+weight+package+\" \"+labels[i])\n file.add_paragraph(\"\\n\")\n file.save(parent_path+\"\\\\\"+\"本批商品图例.doc\") #这是生成的word文档的名字\n\n\ndef find_imgs_and_save_as_imgs(labels, parent_path):\n io_utils.mkdir(this_batch_imgs_path)\n for i in range(len(labels)):\n background = Image.open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\data_processing_carriechen\\\\count_all_annotations\\\\pure_white_background.jpg\")\n img = img_path + \"\\\\\" + labels[i] + \".jpg\"\n product, taste, weight, package = get_chinese(refer_166classes, labels[i])\n img=Image.open(img)\n background.paste(img,[100,50])\n draw = ImageDraw.Draw(background)\n width, height = background.size\n setFont = ImageFont.truetype('C:\\Windows\\Fonts\\\\simfang.ttf', 30)\n fillColor = \"black\"\n draw.text((10, height - 100), u\"\\\"\"+labels[i]+\"\\\"\", font=setFont, fill=fillColor)\n draw.text((10, height - 50), u\"\\\"\" + product+taste+weight+package + \"\\\"\", font=setFont, fill=fillColor)\n background.save(this_batch_imgs_path+\"\\\\\"+labels[i]+\".jpg\")\n\nif __name__ ==\"__main__\":\n labels=get_labels(excel_path)\n #find_imgs_and_write_word(labels,parent_path)\n find_imgs_and_save_as_imgs(labels,parent_path)\n\n\n\n\n\n","sub_path":"development/server/algorithm/tf_faster_rcnn/data_processing/small_tools/get_you_need_label_img.py","file_name":"get_you_need_label_img.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"355038641","text":"from thinc.neural.util import get_array_module\nfrom dataclasses import dataclass\nimport numpy\n\nfrom .util import List, Array\nfrom .util import lengths2mask\n\n\n@dataclass\nclass RaggedArray:\n data: Array\n lengths: List[int]\n\n @property\n def size(self) -> int:\n return self.data.size\n\n @property\n def xp(self):\n return get_array_module(self.data)\n\n @property\n def dtype(self):\n return self.data.dtype\n\n @classmethod\n def blank(cls, xp=numpy) -> \"RaggedArray\":\n return RaggedArray(xp.zeros((0,), dtype=\"f\"), [])\n\n @classmethod\n def from_truncated(cls, square: Array, lengths: List[int]) -> \"RaggedArray\":\n if len(lengths) != square.shape[0]:\n raise ValueError(\"Truncated array must have shape[0] == len(lengths)\")\n width = square.shape[1]\n max_len = max(lengths, default=0)\n extra_dims = square.shape[2:]\n if width == max_len:\n return RaggedArray(square, lengths)\n elif width > max_len:\n raise ValueError(\"Expected width < max_len. Got {width} > {max_len}\")\n xp = get_array_module(square)\n expanded = xp.zeros((sum(lengths),) + extra_dims, dtype=square.dtype)\n # TODO: I know there's a way to do this without the loop :(. Escapes\n # me currently.\n start = 0\n for i, length in enumerate(lengths):\n # We could have a row that's actually shorter than the width,\n # if the array was padded. Make sure we don't get junk values.\n row_width = min(width, length)\n expanded[start : start + row_width] = square[i, :row_width]\n start += length\n return cls(expanded, lengths)\n\n @classmethod\n def from_padded(cls, padded: Array, lengths: List[int]) -> \"RaggedArray\":\n if max(lengths, default=0) > padded.shape[1]:\n return cls.from_truncated(padded, lengths)\n mask = lengths2mask(lengths)\n assert sum(mask) == sum(lengths)\n all_rows = padded.reshape((-1,) + padded.shape[2:])\n xp = get_array_module(all_rows)\n data = xp.ascontiguousarray(all_rows[mask])\n assert data.shape[0] == sum(lengths)\n return cls(data, lengths)\n\n def to_padded(self, *, value=0, to: int = -1) -> Array:\n assert sum(self.lengths) == self.data.shape[0]\n max_len = max(self.lengths, default=0)\n if to >= 1 and to < max_len:\n raise ValueError(f\"Cannot pad to {to}: Less than max length {max_len}\")\n to = max(to, max_len)\n # Slightly convoluted implementation here, to do the operation in one\n # and avoid the loop\n shape = (len(self.lengths), to) + self.data.shape[1:]\n values = self.xp.zeros(shape, dtype=self.dtype)\n if value != 0:\n values.fill(value)\n if self.data.size == 0:\n return values\n mask = lengths2mask(self.lengths)\n values = values.reshape((len(self.lengths) * to,) + self.data.shape[1:])\n values[mask] = self.data\n values = values.reshape(shape)\n return values\n\n def get(self, i: int) -> Array:\n start = sum(self.lengths[:i])\n end = start + self.lengths[i]\n return self.data[start:end]\n\n\n@dataclass\nclass Activations:\n lh: RaggedArray\n po: RaggedArray\n\n @classmethod\n def blank(cls, *, xp=numpy):\n return cls(RaggedArray.blank(xp=xp), RaggedArray.blank(xp=xp))\n\n @property\n def xp(self):\n return self.lh.xp\n\n @property\n def has_lh(self) -> bool:\n return bool(self.lh.data.size)\n\n @property\n def has_po(self) -> bool:\n return bool(self.po.data.size)\n","sub_path":"spacy_transformers/activations.py","file_name":"activations.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"92371104","text":"import acm\n\ndef get_InsOverride(trade):\n addInfo = acm.FAdditionalInfo.Select01('recaddr = %i addInf = %i' % (trade.Oid(), 1011), ' ')\n if addInfo:\n return addInfo.FieldValue()\n\ndef objectSatisfiesQueryFolder(object, queryFolderName):\n queryFolder = acm.FStoredASQLQuery[queryFolderName]\n assert queryFolder is not None, 'Cannot find query folder named %s' % queryFolderName\n assert queryFolder.IsKindOf(acm.FStoredASQLQuery), 'Cannot find query folder with name %s' % queryFolderName\n query = queryFolder.Query()\n return query.IsSatisfiedBy(object)\n","sub_path":"Extensions/ABSA Specific 4.3/FPythonCode/InsOverride.py","file_name":"InsOverride.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"265234541","text":"condition = True\r\n\r\nsoma = 0\r\nnumero = []\r\n\r\nwhile condition:\r\n num=int(input('Digite o numero: '))\r\n if (num) != 0 and 0 < num <= 1000 :\r\n soma += (num)\r\n numero.append(num)\r\n else:\r\n break\r\nprint('Soma: ' +str(soma))\r\nprint('menor valor: %d' %(min(numero)))\r\nprint('maior valor: %d' %(max(numero)))\r\ninput()","sub_path":"atvd19.py","file_name":"atvd19.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"241586938","text":"#$language = \"python\"\n#$interface = \"1.0\"\nimport os\nimport sys\nimport ConfigParser\nsys.path.append('../../../')\n\n\n# config.read_file(open(r'../'))\n\n\nRG_Prompt = \"RG]#\"\nLG_Prompt = \"login:\"\nPW_Prompt = \"Password:\"\n\nRG_Prompt = \"RG]#\"\n# IPv4_Prompt = \"inet addr:\"\nIPv4_Prompt=\"Bcast:\"\n# IPv6_Prompt = \"inet6 addr:\"\nIPv6_Prompt= \"Scope:\"\nUser_Prompt = \"\"\nPw_Prompt = \"\"\n\nMIN_COLUMNS = 1\nMAX_COLUMNS = crt.Screen.Columns\nMIN_ROW = 1\nMAX_ROW = crt.Screen.Rows\n\ncmd_ifconfig = \"ifconfig br-lan\"\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ndef main():\n login_rg_console()\n\n save_config('IFCONFIG', 'ipv4', str(get_IPv4_addr()))\n save_config('IFCONFIG', 'ipv6', str(get_IPv6_Link_addr()))\n save_config('IFCONFIG', 'ipv6_global', str(get_IPv6_Global_addr()))\n end()\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ndef login_rg_console():\n US = \"root\"\n PW = \"humax@!0416\"\n\n crt.Screen.Send('\\r')\n if (crt.Screen.WaitForString(LG_Prompt, 1) == True):\n crt.Screen.Send(US + '\\r')\n if (crt.Screen.WaitForString(PW_Prompt, 1) == True):\n crt.Screen.Send(PW + '\\r')\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ndef get_IPv4_addr():\n crt.Screen.Clear()\n crt.Screen.Send('\\r')\n crt.Screen.Send(cmd_ifconfig + '\\r')\n\t\n if (crt.Screen.WaitForString(IPv4_Prompt, 1) == True):\n cur_row = crt.Screen.CurrentRow\n cur_col = crt.Screen.CurrentColumn\n\n crt.Sleep(500)\n data = crt.Screen.Get(cur_row, MIN_ROW, cur_row+1, MAX_COLUMNS)\n\n data = data.split('inet addr:')[1].split('Bcast:')[0]\n data = data.strip()\n return data\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ndef get_IPv6_Link_addr():\n crt.Screen.Clear()\n crt.Screen.Send('\\r')\n crt.Screen.Send(cmd_ifconfig + '\\r')\n\n if (crt.Screen.WaitForString(IPv6_Prompt, 1) == True):\n cur_row = crt.Screen.CurrentRow\n cur_col = crt.Screen.CurrentColumn\n\n crt.Sleep(500)\n data = crt.Screen.Get(cur_row, MIN_ROW, cur_row +1, MAX_COLUMNS)\n data = data.split('inet6 addr: ')[1].split('Scope:Link')[0]\n data = data.strip().split('/')[0]\n\n return data\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ndef get_IPv6_Global_addr():\n crt.Screen.Clear()\n crt.Screen.Send('\\r')\n crt.Screen.Send(cmd_ifconfig + '\\r')\n\n if (crt.Screen.WaitForString(IPv6_Prompt, 1) == True):\n cur_row = crt.Screen.CurrentRow\n cur_col = crt.Screen.CurrentColumn\n\n crt.Sleep(500)\n\n data = crt.Screen.Get(cur_row, MIN_ROW, cur_row + 1, MAX_COLUMNS)\n data = data.split('inet6 addr: ')[1].split('Scope:Global')[0]\n data = data.strip().split('/')[0]\n\n return data\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# def get_user_pw():\n# crt.Screen.Clear()\n# crt.Screen.Send('\\r')\n# crt.Screen.Send(cmd_user_pw + '\\r')\n#\n# if (crt.Screen.WaitForString(User_Prompt, 1) == True):\n# cur_row = crt.Screen.CurrentRow\n# user = crt.Screen.Get(cur_row, MIN_ROW, cur_row, MAX_COLUMNS)\n# user = user.strip()\n# user = user.split('')[1].split('')[0]\n#\n# if (crt.Screen.WaitForString(Pw_Prompt, 1) == True):\n# cur_row = crt.Screen.CurrentRow\n# password = crt.Screen.Get(cur_row , MIN_ROW, cur_row +1 , MAX_COLUMNS)\n# password = password.strip()\n# password = password.split('')[1].split('')[0]\n#\n# return user, password\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nfrom path import root_dir\nconfig_path = os.path.join(root_dir, \"Config\", \"ifconfig.txt\")\ndef save_config(section, option, value):\n\n config = ConfigParser.RawConfigParser()\n config.read(config_path)\n\n if not config.has_section(str(section).upper()):\n config.add_section(str(section).upper())\n\n config.set(str(section).upper(), str(option), str(value))\n\n with open(config_path, 'r+') as config_file:\n config.write(config_file)\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ndef end():\n os.system(\"taskkill /f /im SecureCRT.exe\")\n\nmain()","sub_path":"test/networkbu/Test/WEB_UI/NOVA_UI/get_info_hgj310_1.py","file_name":"get_info_hgj310_1.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"283659006","text":"#!/usr/bin/env python\n# -*-coding:utf-8-*-\nimport os\nimport time\nimport zipfile\n\n__author__ = 'SamWoo'\n\nclass ZipManager:\n 'zip the given folder automatically'\n\n def __init__(self):\n pass\n\n @staticmethod\n def zip_dir(dir_name, zip_file_name):\n '''\n compress the directory into a compressed package\n :param dir_name: folder to be compressed\n :param zip_file_name: compressed package name\n :return:\n '''\n file_list = []\n if os.path.isfile(dir_name):\n file_list.append(dir_name)\n else:\n for root, dirs, files in os.walk(dir_name):\n for file in files:\n if file.endswith('.zip'):\n continue\n file_list.append(os.path.join(root, file))\n\n zf = zipfile.ZipFile(zip_file_name, 'w', zipfile.zlib.DEFLATED)\n for tar in file_list:\n arcname = tar[len(dir_name):]\n zf.write(tar, arcname)\n zf.close()\n print('compress succeed!!')\n\n @staticmethod\n def unzip_file(zip_file_name, unzip_to_dir):\n '''\n unzip the package to the specified directory\n :param zip_file_name: package name to be decompressed\n :param unzip_to_dir: unzip the target directory\n :return:\n '''\n if not os.path.exists(unzip_to_dir):\n os.mkdir(unzip_to_dir)\n zfobj = zipfile.ZipFile(zip_file_name)\n for name in zfobj.namelist():\n name = name.replace('\\\\', '/')\n if name.endswith('/'):\n os.mkdir(os.path.join(unzip_to_dir, name))\n else:\n ext_filename = os.path.join(unzip_to_dir, name)\n ext_dir = os.path.dirname(ext_filename)\n if not os.path.exists(ext_dir):\n os.mkdir(ext_dir)\n outfile = open(ext_filename, 'wb')\n outfile.write(zfobj.read(name))\n outfile.close()\n print('extract succeed!!')\n\n\nif __name__ == \"__main__\":\n folder = r'.\\font'\n target = r'.\\font_' + time.strftime('%Y%m%d%H%M%S') + '.zip'\n ZipManager.zip_dir(folder, target)\n ZipManager.unzip_file(target, r'.\\zipfont')\n","sub_path":"auto_send_email/zip_manager.py","file_name":"zip_manager.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"96446244","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy_redis.spiders import RedisSpider\nfrom gm_work.items import GmWorkItem\nfrom tools.tools_r.header_tool import headers_todict\nimport re\n\n\nclass AmazongsSpider(RedisSpider):\n name = 'amazon_goodstoshop'\n allowed_domains = ['alibaba.com']\n start_urls = ['http://www.amazon.com/']\n redis_key = \"amazon_goodstoshop:start_url\"\n headers = headers_todict('''accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\n accept-encoding: gzip, deflate, br\n accept-language: zh-CN,zh;q=0.9\n upgrade-insecure-requests: 1\n user-agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36''')\n\n def start_requests(self):\n url = \"https://www.baidu.com/\"\n yield scrapy.Request(url=url,method=\"GET\",callback=self.seed_request,headers=self.headers,dont_filter=True)\n def seed_request(self,response):\n\n path = r\"C:/Users\\admin/Desktop/\"\n file_name = \"{6_1排行榜_offer有效}[goodsid].txt_去重 - 副本.txt\"\n with open(path+file_name,\"r\",encoding=\"utf-8\") as f:\n for i in f:\n i = i.strip()\n page_num = 0\n url = \"https://www.amazon.co.uk/gp/aw/ol/{}\".format(i)\n yield scrapy.Request(url=url, method=\"GET\", headers=self.headers,meta={\"page_num\":page_num,\"key\":i})\n\n def parse(self, response):\n youxiao = re.search(\"(olpOfferList|olpProduct)\",response.text)\n key = response.meta.get(\"key\")\n if youxiao:\n item_s = GmWorkItem()\n item_s[\"key\"] = key\n item_s[\"source_code\"] = response.text\n yield item_s\n shop_list = response.css(\".a-section.a-spacing-double-large\").xpath(\n \"./div//h3[@class='a-spacing-none olpSellerName']/a\")\n if not shop_list:\n item = GmWorkItem()\n item[\"key\"] = key\n item[\"name\"] = \"\"\n item[\"url\"] = \"\"\n item[\"seller_id\"] = \"\"\n yield item\n for i in shop_list:\n name = i.xpath(\"./text()\").get()\n if name:\n name = name.strip()\n url = i.xpath(\"./@href\").get()\n seller_id = \"\"\n match = re.search('(s|seller)=(.*?)($|[&])',url)\n if match:\n seller_id = match.group(2)\n item = GmWorkItem()\n item[\"key\"] = key\n item[\"name\"] = name\n item[\"url\"] = url\n item[\"seller_id\"] = seller_id\n yield item\n next_url = response.css(\"li.a-last\").xpath(\"./a/@href\").get()\n if next_url:\n next_url = \"https://www.amazon.co.uk\"+next_url\n yield scrapy.Request(url=next_url, method=\"GET\", headers=self.headers, meta={\"key\": key})\n else:\n try_result = self.try_again(response,key)\n yield try_result\n\n def try_again(self,rsp,key):\n max_num = 10\n meta = rsp.meta\n try_num = meta.get(\"try_num\",0)\n if try_num < max_num:\n try_num += 1\n request = rsp.request\n request.dont_filter = True\n request.meta[\"try_num\"] = try_num\n return request\n else:\n item_e = GmWorkItem()\n item_e[\"error_id\"] = 1\n item_e[\"key\"] = key\n return item_e","sub_path":"gm_work/gm_work/spiders/amazon_goodtoshop.py","file_name":"amazon_goodtoshop.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"548399249","text":"from .testapp.app import app\nfrom high_templar.test import TestCase, Client, MockWebSocket\n\n\nclass TestKeepalive(TestCase):\n def test_ping_pong(self):\n ws = MockWebSocket()\n ws.mock_incoming_message('ping')\n self.client.open_connection(ws)\n\n self.assertEqual(2, len(ws.outgoing_messages))\n self.assertEqual('pong', ws.outgoing_messages[1])\n","sub_path":"tests/test_keepalive.py","file_name":"test_keepalive.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"22998015","text":"from django.conf.urls import include, url\n\nurlpatterns = [\n url(r'^login/$', 'authsys.views.login'),\n url(r'^logout/$', 'authsys.views.logout'),\n url(r'^isauth/$', 'authsys.views.is_auth'),\n url(r'^userid/$', 'authsys.views.user_id'),\n url(r'^reg/$', 'authsys.views.reg'),\n]\n\n","sub_path":"authsys/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"310545126","text":"import qt\r\nimport numpy as np\r\nimport shutil\r\nimport sys\r\nimport os\r\nimport time\r\nimport progressbar\r\nfrom constants import *\r\n\r\n\r\ndef copy_script(once):\r\n if once:\r\n shutil.copy2('%s'%sys.argv[0],'%s/%s'%(data.get_filepath()[:-(len(data.get_filename())+1)],os.path.basename(sys.argv[0])))\r\n \r\ncenter = 3.583328*GHz #VNA_Centre\r\nspan = 1*MHz\r\n\r\nstart_freq = center - span/2.0 # 6.148908*GHz\r\nstop_freq = center + span/2.0 # 6.153908*GHz\r\npower = -3 #with -20db attenuation\r\nnum_points=2001\r\nif_bw = 100\r\n \r\n# pump\r\ncenter_SMF= 3.569866*GHz #(10.101 MHz drum)\r\nspan_SMF = 1*MHz\r\npump_start = center_SMF - span_SMF/2\r\npump_stop = center_SMF + span_SMF/2\r\npump_points =51\r\npump_power = 22\r\n\r\n\r\n\r\nznb = qt.instruments.create('ZNB20', 'RhodeSchwartz_ZNB20', address='TCPIP0::192.168.1.3::INSTR')\r\nsmf = qt.instruments.create('SMF100', 'RhodeSchwartz_SMF100', address = 'TCPIP0::192.168.1.4::INSTR', reset=False)\r\nrigol = qt.instruments.create('DP832A', 'Rigol_DP832A', address='TCPIP0::192.168.1.5::INSTR')\r\n# rigol.output_on(1)\r\n\r\ns_params=['S21']\r\nznb.reset()\r\nznb.add_trace('S21')\r\nznb.set_external_reference(True)\r\nznb.set_start_frequency(start_freq)\r\nznb.set_stop_frequency(stop_freq)\r\nznb.set_source_power(power)\r\nznb.set_sweep_mode('single')\r\nznb.set_if_bandwidth(if_bw)\r\nznb.set_numpoints(num_points)\r\nznb.rf_on()\r\n\r\nqt.mstart()\r\n\r\ndata_file_name = raw_input('Enter name of data file: ')\r\ndata=qt.Data(name=data_file_name)\r\ndata.add_coordinate('pump', units='Hz')\r\ndata.add_coordinate('frequency', units='Hz')\r\nfor s_param in s_params:\r\n data.add_value('%s real' % s_param.strip().upper())\r\n data.add_value('%s imag' % s_param.strip().upper())\r\n data.add_value('%s abs' % s_param.strip().upper())\r\n data.add_value('%s phase' % s_param.strip().upper())\r\n\r\n# data.add_value('S21 phase')\r\n\r\n\r\npump_list = np.linspace(pump_start, pump_stop, pump_points)\r\nfreq_array = np.linspace(start_freq, stop_freq, num=num_points)\r\n\r\n##########\r\n# Take care of Meta\r\n\r\nin_meta = [start_freq, stop_freq, num_points, 'Probe (Hz)']\r\nout_meta = [pump_start, pump_stop, pump_points,'Pump (Hz)']\r\n\r\nonce = True\r\nsmf.rf_on()\r\nsmf.set_source_power(pump_power)\r\n\r\n\r\nprogress_bar = progressbar.ProgressBar(maxval=len(pump_list), \\\r\n widgets=['Progress: ', progressbar.Bar('.', '', ''), ' ', progressbar.Percentage(), ' (', progressbar.ETA(), ') '])\r\nprogress_bar.start()\r\n\r\n\r\n\r\nfor index, pump in enumerate(pump_list):\r\n smf.set_frequency(pump)\r\n znb.send_trigger(wait=True)\r\n trace= znb.get_data('S21')\r\n znb.autoscale()\r\n pump_array = np.linspace(pump, pump, num_points)\r\n traces = []\r\n for s_param in s_params:\r\n trace = znb.get_data(s_param)\r\n traces.append(np.real(trace))\r\n traces.append(np.imag(trace))\r\n traces.append(np.absolute(trace))\r\n traces.append(np.angle(trace))\r\n data.add_data_point(pump_array, freq_array, *traces, meta=True)\r\n #data.add_data_point(pump_array, freq_array, np.absolute(trace))\r\n data.metagen2D(in_meta, out_meta)\r\n progress_bar.update(index+1)\r\n\r\n copy_script(once); once = False\r\n # qt.msleep(60)\r\nprogress_bar.finish()\r\n\r\n\r\ndata.close_file()\r\nsmf.rf_off()\r\nznb.rf_off()\r\n#rigol.output_off(2)","sub_path":"scripts/Drums/S21_omit_every_quantity.py","file_name":"S21_omit_every_quantity.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"474721843","text":"#-----------------------------------------\n# agent to minimize data misfit\n#------------------------------------------\n\nimport numpy as np \nimport sys \nimport os \nfrom dolfin import *\nfrom dolfin_adjoint import * \npdir = os.path.dirname(os.getcwd()) \nsys.path.insert(0, pdir)\nfrom time import time\nimport math\nfrom util.misc import * \nfrom util.forward import *\n\n## proximal operation of data fidelity function\ndef posteriorTransProx(d_obs_, m_prior_, m_, obs_vec_, channel_params, simul_params, pnp_params): \n \n ngy = simul_params[\"ngy\"]\n ngx = simul_params[\"ngx\"] \n dx_ = simul_params[\"dx\"] \n dy_ = simul_params[\"dy\"] \n\n num_psteps = simul_params[\"num_psteps\"]\n num_wsteps = simul_params[\"num_wsteps\"]\n\n m_prior_ = m_prior_ * (channel_params[\"hperm\"] - channel_params[\"lperm\"]) + channel_params[\"lperm\"]\n m_ = m_ * (channel_params[\"hperm\"] - channel_params[\"lperm\"]) + channel_params[\"lperm\"]\n\n d_ini = simul_params[\"ini_cond\"] * np.ones((ngy * ngx, 1))\n obs_sigma = pnp_params[\"obs_sigma\"]\n reg = pnp_params[\"obs_reg\"] \n m_lb = channel_params[\"lperm\"]\n m_ub = channel_params[\"hperm\"]\n \n mesh = RectangleMesh(Point(0.0, 0.0), Point(ngx*dy_, ngy*dy_), ngy - 1, ngx - 1)\n\n V = FunctionSpace(mesh, \"Lagrange\", 1) \n m = projectFunction(V, m_) \n obs_vec = projectFunction(V, obs_vec_ ) \n m_prior = projectFunction(V, m_prior_ ) \n obs_sigma = Constant(obs_sigma) \n reg = Constant(reg) \n J = 0\n d_old = projectFunction(V, d_ini)\n \n # pre-well simulation\n for n in range(num_psteps): \n d = forwardPrevFunc( d_old, m, V, simul_params ) \n d_old = d \n d_obs = projectFunction(V, d_obs_[:, 0:1] )\n J += assemble( 0.5 * 1/(obs_sigma**2) * inner(obs_vec*(d_old - d_obs), obs_vec*(d_old - d_obs)) *dx )\n \n # after-well simulation\n for n in range(num_wsteps):\n d = forwardWellFunc(d_old, m, V, simul_params ) \n d_old = d \n d_obs = projectFunction(V, d_obs_[:, n+1:n+2] )\n J += assemble( 0.5 * 1/(obs_sigma**2) * inner(obs_vec*(d - d_obs), obs_vec*(d - d_obs)) *dx ) \n J /= (num_wsteps+1)\n\n J += assemble( 0.5 * 1/reg * inner( m - m_prior, m - m_prior) * dx ) \n \n control = Control(m) \n reduced_functional = ReducedFunctional(J, control) \n \n m_opt = minimize(reduced_functional, bounds = (m_lb, m_ub), options = {\"disp\":False}, tol=1e-4)\n\n m_out = m_opt.compute_vertex_values(mesh).reshape(ngy*ngx, 1) \n m_out = (m_out - channel_params[\"lperm\"])/(channel_params[\"hperm\"] - channel_params[\"lperm\"])\n\n return m_out\n\n","sub_path":"model/posterior.py","file_name":"posterior.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"109054000","text":"from iredis.completers import LatestUsedFirstWordCompleter\n\n\ndef test_LUF_completer_touch():\n c = LatestUsedFirstWordCompleter(3, [\"one\", \"two\"])\n c.touch(\"hello\")\n assert c.words == [\"hello\", \"one\", \"two\"]\n\n c.touch(\"foo\")\n assert c.words == [\"foo\", \"hello\", \"one\"]\n\n c.touch(\"hello\")\n assert c.words == [\"hello\", \"foo\", \"one\"]\n\n\ndef test_LUF_completer_touch_words():\n c = LatestUsedFirstWordCompleter(3, [])\n c.touch_words([\"hello\", \"world\", \"foo\", \"bar\"])\n assert c.words == [\"bar\", \"foo\", \"world\"]\n\n c.touch_words([\"one\", \"two\"])\n assert c.words == [\"two\", \"one\", \"bar\"]\n","sub_path":"tests/test_completers.py","file_name":"test_completers.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"40209970","text":"from importlib.machinery import SourceFileLoader\nfrom sklearn.datasets import fetch_mldata\nfrom classifier import KnnClassifier\nimport platform\nimport pandas as pd\nimport numpy as np\nimport os\n\ndef get_paths(current_platform, data_dir):\n\n if current_platform == \"Windows\":\n base_dir = os.path.join(\"D:/\", \"GoogleDrive/_cloudifier_data/09_tests\")\n else:\n base_dir = os.path.join(os.path.expanduser(\"~\"), \"Google Drive/_cloudifier_data/09_tests\")\n\n utils_path = os.path.join(base_dir, \"Utils\")\n data_path = os.path.join(base_dir, data_dir)\n\n return base_dir, utils_path, data_path\n\nif __name__=='__main__':\n\n _, utils_path, mnist_path = get_paths(platform.system(), \"_MNIST_data\")\n logger_lib = SourceFileLoader(\"logger\", os.path.join(utils_path, \"logger.py\")).load_module()\n logger = logger_lib.Logger(show = True)\n\n mnist = fetch_mldata('MNIST original', data_home = mnist_path)\n labels = [\"pixel_\" + str(i) for i in range(784)]\n mnist_df = pd.DataFrame(np.c_[mnist['target'], mnist['data']], \\\n columns = [\"Digit_label\"] + labels)\n logger.log(\"Finished fetching MNIST\")\n\n solver = KnnClassifier(mnist_df, 0.14, logger)\n solver.preprocess()\n vals, counts = solver.predict(k = 5)\n solver.compute_accuracy(vals, counts)","sub_path":"01_tests/05_andrei_repository/2017.08.23_RaportMnist/knn_classifier/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"467659258","text":"'''\n@author : kongweikun\n@file : logging.py\n@time : 18-7-19 下午4:03\n@contact : kongwiki@163.com\n'''\nimport logging\nimport logging.config\n\n# logging.warning('Watch out!')\n# logging.info('I told you so')\n# logging.basicConfig(filename='example.log',level=logging.INFO)\n# logging.debug('This message should go to the log file')\n# logging.info('So should this')\n# logging.warning('And this, too')\n\n# logging.config.fileConfig('logging.conf')\n#\n# # create logger\n# logger = logging.getLogger('simpleExample')\n#\n# # 'application' code\n# logger.debug('debug message')\n# logger.info('info message')\n# logger.warn('warn message')\n# logger.error('error message')\n# logger.critical('critical message')\nservice_name = \"KongWiki\"\n\nlogger = logging.getLogger(\"AppName\")\nlogger.error('%s service is down!', service_name) # 使用logger的格式化,推荐\nlogger.error('%s service is %s!', service_name, 'down') # 多参数格式化\nlogger.error('{} service is {}!'.format(service_name, 'down')) # 使用format函数,推荐","sub_path":"Basis/package/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"362373184","text":"from math import ceil\nn, h = map(int, input().split())\ndata1, data2 = [], []\nfor i in range(n):\n a, b = map(int, input().split())\n data1.append([a, b])\n data2.append([a, b])\n\ndata1 = sorted(data1, key = lambda x: x[0], reverse = True)\ndata2 = sorted(data2, key = lambda x: x[1], reverse = True)\nans = 0\nwhile h > 0 and data1[0][0] < data2[0][1]:\n h -= data2[0][1]\n ans += 1\n del data2[0]\n if len(data2) == 0:\n break\nprint(h)\nans += ceil(max(0, h) / data1[0][0])\nprint(ans)","sub_path":"boot/hard/katana.py","file_name":"katana.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"219299612","text":"import theano, scipy, sys, os.path\nimport numpy as np\nfrom PIL import Image\nfrom pylearn2.utils import serial\nfrom pylearn2.gui import patch_viewer\n\n#model_path = '/Users/kazjon/Dropbox/Documents/Research/UNCC/ComputationalCreativity/CCWorkshop/pylearn2/pylearn2/scripts/tutorials/stacked_autoencoders/dae'\n\ndef layerpath(l,model_path):\n\treturn model_path+\"_l\"+str(l)+\".pkl\"\n\ndef show_weights(model_path = \"dae\"):\n\ti = 1\n\tmodels = []\n\tweights = []\n\tXs = []\n\tYs = []\n\tencode_functs = []\n\tdecode_functs = []\n\twhile os.path.isfile(layerpath(i,model_path)):\n\t\tmodels.append(serial.load(layerpath(i,model_path)))\n\t\tI = models[i-1].get_input_space().make_theano_batch()\t\n\t\tE = models[i-1].encode(I)\n\t\tencode_functs.append(theano.function( [I], E ))\n\t\tH = models[i-1].get_output_space().make_theano_batch()\n\t\tD = models[i-1].decode(H)\n\t\tdecode_functs.append(theano.function( [H], D ))\n\t\tweights.append(models[i-1].get_weights())\n\t\ti += 1\n\n\tl1_acts = np.zeros([weights[2].shape[1],weights[0].shape[0]])\n\tfor k in range(len(weights[2].T)):\n\t\tfeature = np.zeros(len(weights[2].T))\n\t\tfeature[k] = 1.0\n\t\tl3_acts = decode_functs[2](np.atleast_2d(feature.astype(np.dtype(np.float32))))\n\t\tl2_acts = decode_functs[1](l3_acts)\n\t\tl1_acts[k] = decode_functs[0](l2_acts)\n\n\tpv = patch_viewer.make_viewer(l1_acts, patch_shape=[28,28])\n\tpv.save(\"mnist_l3_weights_decoder.png\")\n\t#scipy.misc.imsave('mnist7_l1_w0.png',l1_act.reshape([28,28]))\n\nif __name__ == \"__main__\":\n\tshow_weights()","sub_path":"scripts/sdae_l3_show_weights_decoder.py","file_name":"sdae_l3_show_weights_decoder.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"534189607","text":"\"\"\"\r\nHuba Ferenc Benzar\r\n\"\"\"\r\n#Importing packages\r\nimport os #OS\r\nimport os.path #path from OS\r\nimport gym #OpenAI Gym\r\nimport random #initially let the agent run randomly\r\nimport tflearn #tensorflow learn\r\nimport numpy as np #matrix math\r\nfrom tflearn.layers.estimator import regression #final layer\r\nfrom tflearn.layers.core import input_data, dropout, fully_connected #input layer, drop out like 20%, fully connected layer instead of CNN\r\nfrom sklearn.externals import joblib #joblit from sklearn externals\r\nfrom collections import Counter #Counter from collections\r\nfrom statistics import median, mean #illustrate how random performed\r\n\r\n#create data directory\r\nlog = 'log/'\r\nif not os.path.exists(log):\r\n os.makedirs(log)\r\nmodel = 'log/OpenAI_Model/'\r\nif not os.path.exists(model):\r\n os.makedirs(model)\r\n\r\nenv = gym.make(\"CartPole-v0\") #import game \"CartPole-v0\"\r\n\r\nlearning_rate = 1e-3 #LearningRate \r\nscore_requirement = 70 #learn from all random games that have a score of this number or greater\r\ninitial_games = 10000 #if this is too high it will brute force the methods\r\nepisodes = 200 #every frame the pole is balanced +1\r\nenv.reset() #starts environment\r\n\r\n\r\n#Creating Neural Network\r\ndef neural_network(input_size):\r\n print(\"Creating Model...\")\r\n network = input_data(shape=[None, input_size, 1], name = \"input\") #input size is 4 in this case but better if i wanted to switch games\r\n #if there is an error for failed to do something with memory, make the nodes smaller\r\n #Tree layer\r\n #1 layer\r\n network = fully_connected(network, 128, activation = \"relu\") #takes input as network, 128 nodes on the layer, actication layer is rectufied linear\r\n network = dropout(network, 0.8) #0.8 is keeprate not dropout rate\r\n #2 layer\r\n network = fully_connected(network, 256, activation = \"relu\") #takes input as network, 256 nodes on the layer, actication layer is rectufied linear\r\n network = dropout(network, 0.8) #0.8 is keeprate not dropout rate\r\n #3 layer\r\n network = fully_connected(network, 512, activation = \"relu\") #takes input as network, 512 nodes on the layer, actication layer is rectufied linear\r\n network = dropout(network, 0.8) #0.8 is keeprate not dropout rate\r\n #4 layer\r\n network = fully_connected(network, 256, activation = \"relu\") #takes input as network, 256 nodes on the layer, actication layer is rectufied linear\r\n network = dropout(network, 0.8) #0.8 is keeprate not dropout rate\r\n #5 layer\r\n network = fully_connected(network, 128, activation = \"relu\") #takes input as network, 128 nodes on the layer, actication layer is rectufied linear\r\n network = dropout(network, 0.8) #0.8 is keeprate not dropout rate\r\n #output layer takes 2 unique outputs, change this number\r\n network = fully_connected(network, 2, activation=\"softmax\")\r\n #network regression with the optimizer Adam, learning rate is the default, loss is categorical crosscentropy\r\n network = regression(network, optimizer=\"Adam\", learning_rate = learning_rate,\r\n loss = \"categorical_crossentropy\", name=\"targets\")\r\n #model is tflearn deep neural network on the network creating a new directory called log\r\n model = tflearn.DNN(network, tensorboard_dir=log)\r\n \r\n #simple model not trained\r\n print(\"Model Created\")\r\n return model\r\n\r\n#Training Model\r\ndef train_model(training_data, model=False):\r\n print(\"Training Model...\")\r\n #error if there is no training data\r\n if (len(training_data) < 1):\r\n print(\"Error no training data!\")\r\n else:\r\n #x data\r\n #numpy array i 0th for i in training data which contains observations, actions. Reshape to -1 length of training data and to whatever shape it is\r\n x = np.array([i[0] for i in training_data]).reshape(-1,len(training_data[0][0]),1)\r\n #y data\r\n #same as x data but it's 1st in the list comprehension\r\n y = [i[1] for i in training_data]\r\n\r\n #if there is no model create a new model of the length of x\r\n if not model:\r\n model = neural_network(input_size = len(x[0]))\r\n #model fit takes input which is our x and y data, number of epochs too many will overfit too many is a problem.\r\n #snapshot step is 500 show metric is true, run id creates a folder where this model is strored\r\n model.fit({'input': x}, {'targets': y}, n_epoch=3, snapshot_step=500 , show_metric=True, run_id=\"OpenAI_Model\")\r\n \r\n #return trained model\r\n print(\"Trained Model\")\r\n return model\r\n\r\ndef population():\r\n print(\"Creating Training Data...\")\r\n #actual data to train on observation and move made append data if it's above requested #Observation, Moves\r\n training_data = [] \r\n total_reward = [] #all scores\r\n accepted_scores = [] #total_reward that met the threshold\r\n #iterate through game\r\n for i in range(initial_games):\r\n score = 0 #score is set to 0\r\n game_memory = [] #moves specifically from this environment\r\n prev_obs = [] #previous observation that we saw\r\n #game\r\n for j in range(episodes):\r\n #env.render() # Comment out if i want it to go faster, renders game\r\n action = env.action_space.sample() #takes environment and takes a random action #easy to switch games with\r\n obs, reward, done, info = env.step(action)\r\n #pixel data (pole position, cart position), reward 1 or 0 info and other info #step takes action\r\n \r\n if len(prev_obs) > 0:\r\n game_memory.append([prev_obs, action])\r\n prev_obs = obs\r\n score += reward\r\n if done: break\r\n \r\n if score >= score_requirement: #if score is equal or bigger than threshold proceeds\r\n accepted_scores.append(score) #appends score let through\r\n for data in game_memory:\r\n #convert to one-hot output layer for the neural network\r\n if data[1] == 1:\r\n output = [0,1]\r\n elif data[1] == 0:\r\n output = [1,0]\r\n \r\n training_data.append([data[0], output]) #save training data\r\n #print([data[0], output]) #testing output\r\n \r\n env.reset() #reset environment to play again\r\n total_reward.append(score) #save total score for reward\r\n training_data_save = np.array(training_data) #later referencing\r\n \r\n if (len(accepted_scores) < 1):\r\n print(\"Error no number above\",score_requirement,\"in\", initial_games, \"initiations and\",episodes ,\"games.\")\r\n else:\r\n print(\"Here are some details\")\r\n print(\"Average accepted total_reward: \", mean(accepted_scores))\r\n print(\"Median accepted total_reward: \", median(accepted_scores))\r\n print(Counter(accepted_scores))\r\n \r\n #LOGGING\r\n fhand = open(log+'Training-Data.txt', 'w')\r\n fhand.write('Training Data\\n')\r\n fhand = open(log+'Training-Data.txt', 'a')\r\n fhand.write(str(accepted_scores)+'\\n')\r\n fhand.write('\\nAverage accepted score: '+(str(mean(accepted_scores)))+'\\nMedian accepted score: '+(str(median(accepted_scores))))\r\n fhand.close()\r\n np.save(log+\"Training-Data\", training_data_save)\r\n print(\"Training Data Created. Check the logs directory.\")\r\n\r\n return training_data\r\n\r\n#attempt to load in model, FAILED ATTEMPT\r\ndef load():\r\n path = ('.'+'/log/OpenAI_Model/')\r\n files = os.listdir(path)\r\n if (len(files)>0):\r\n decision = input(\"Please enter (y/Y) if you wish to load a file: \") \r\n if decision.lower() == \"y\":\r\n print(\"Hello, would you like to load a model from the following?\")\r\n for name in files:\r\n print(\"\\t\"+name+\"\\t\")\r\n data = input(\"Please enter the file you wish to load:\\n\")\r\n exist = (path+data)\r\n if os.path.exists(exist) and os.access(exist, os.R_OK):\r\n neural_network(input_size)\r\n model = dnn.load(data)\r\n trainain_model()\r\n model=True\r\n print(\"Loading: \" , data)\r\n else:\r\n print(\"Error this file does not exist!\\n\")\r\n else:\r\n print(\"Skipping to next step!\\n\")\r\n else:\r\n print()\r\n\r\n#Playing Game\r\ndef game():\r\n print(\"Playing Game with Model...\")\r\n #LOGGING\r\n fhand = open(log+'Testing-Trained-Model.txt', 'w')\r\n fhand.write('Iteration\\tReward\\n')\r\n fhand.close()\r\n #total_reward and choices are empty lists\r\n total_reward = []\r\n choices = []\r\n counter = 0\r\n #number of games we want to play\r\n for each_game in range(100):\r\n counter+=1\r\n score = 0 #total reward starts at 0\r\n game_memory = [] #creating empty lists\r\n prev_obs = []\r\n env.reset() #resets environment\r\n #iterate through the number of steps we want to make\r\n for i in range(episodes):\r\n #env.render() #renders game\r\n #if there is no previous observation we do a random move\r\n if len(prev_obs) == 0:\r\n action = env.action_space.sample()\r\n else: #otherwise when a frame is seen the action\r\n #argmax of model, we preduct the previous observation, which we reshape just like before and take 0th\r\n action = np.argmax(model.predict(prev_obs.reshape(-1, len(prev_obs), 1))[0])\r\n choices.append(action) #we append all of the actions we make shows ratio of our network on what it oes\r\n \r\n new_obs, reward, done, info = env.step(action) #new observation, reward, done, info is environemt step in action\r\n prev_obs = new_obs #previous observation is new observation\r\n game_memory.append([new_obs, action]) #for retraining the network\r\n score += reward #reward added up\r\n #ends\r\n if done: break\r\n total_reward.append(score) #appends total_reward\r\n\r\n #LOGGING\r\n fhand = open(log+'Testing-Trained-Model.txt', 'a')\r\n fhand.write(str(counter) + '\\t\\t' + str(score) + '\\n')\r\n fhand.close()\r\n\r\n #LOGGING AND PRINTING \r\n print(\"Requirment to pass:\\nOver 100 consecutive trials get over 195 reward.\")\r\n print(\"Average reward over {} games:\".format(len(total_reward)), sum(total_reward)/len(total_reward))\r\n print(\"Choice 1: {}, Choice 2: {}\".format(choices.count(1)/len(choices), choices.count(0)/len(choices)))\r\n\r\n fhand = open(log+'Testing-Trained-Model.txt', 'a')\r\n fhand.write(str(\"\\nAverage reward over {} games:\".format(len(total_reward))+ str(sum(total_reward)/len(total_reward)))+\r\n (str(\"\\n\\nChoice 1: {}\\nChoice 2: {}\".format(choices.count(1)/len(choices), choices.count(0)/len(choices)))+\r\n (str(\"\\n\\nPass\\t\\tFail\") + '\\n')))\r\n\r\n if(sum(total_reward)/len(total_reward))> 195:\r\n print(\"Pass\")\r\n fhand.write(str(\"Pass\\t\"))\r\n fhand.close()\r\n else:\r\n print(\"Fail\")\r\n fhand.write(str(\"\\t\\tFail\"))\r\n fhand.close()\r\n \r\n\r\n#Executing\r\ntraining_data = population()\r\nmodel = train_model(training_data)\r\ngame()\r\n","sub_path":"gameai.py","file_name":"gameai.py","file_ext":"py","file_size_in_byte":13077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"420607396","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 14 14:03:50 2020\n\n@author: NBOLLIG\n\"\"\"\nimport numpy as np\n\nclass Merged2Models():\n \"\"\"\n Inputs to the constructor is a list of two YoungModel objects.\n \"\"\"\n def __init__(self, M):\n assert(len(M) == 2)\n self.model_list = M\n assert(len(self.model_list[0].dataset.fs) == len(self.model_list[1].dataset.fs))\n self.feature_num = len(self.model_list[0].dataset.fs)\n self.X_test_pools = self.get_pooled_test_sets()\n self.y_concat, self.source = self.get_concatenated_labels()\n \n \"\"\"\n Merge the two test sets for each feature set. For now, we will assume the base models have disjoint\n negative classes and so we will not worry about exclusions. We also assume there will be no duplicates in the \n merged test sets.\n \n Returns:\n X_test_pools - a list of pooled test sets parallel to the models' feature sets\n \"\"\"\n def get_pooled_test_sets(self):\n X_test_pools = []\n \n ym0 = self.model_list[0]\n ym1 = self.model_list[1]\n \n # Pool test sets\n for i in range(self.feature_num):\n fs0 = ym0.dataset.fs[i]\n fs1 = ym1.dataset.fs[i]\n \n X_tst0 = fs0.X_tst\n X_tst1 = fs1.X_tst\n \n if X_tst0.shape[1] == X_tst1.shape[1]:\n X_test_pool = np.vstack((X_tst0, X_tst1))\n else:\n X_test_pool = 'N/A'\n \n X_test_pools.append(X_test_pool) \n \n return X_test_pools\n \n \"\"\"\n Get concatenated list of test labels from model 0 and model 1.\n Returns: \n y_concat - concatenated list of test labels, kept in binary form wrt original model in whose test set the instance belonged\n source - array of length X_test_pools.shape[0] with 0 if from model 0 and 1 if from model 1\n \"\"\"\n def get_concatenated_labels(self):\n \n ym1 = self.model_list[0]\n ym2 = self.model_list[1]\n \n mask1 = ym1.dataset.ds['trn/tst']=='test'\n y_test1 = np.asarray(ym1.dataset.ds[mask1]['y'],dtype=int)\n \n mask2 = ym2.dataset.ds['trn/tst']=='test'\n y_test2 = np.asarray(ym2.dataset.ds[mask2]['y'],dtype=int)\n \n s0 = np.full((y_test1.shape[0],), 0, dtype=int) # array of zeros\n s1 = np.full((y_test2.shape[0],), 1, dtype=int) # array of ones\n \n source = np.concatenate((s0, s1)) # 0 if from model 1, 1 if from model 2\n \n return np.concatenate((y_test1, y_test2)), source\n \n \"\"\"\n Get column of the confusion matrix corresponding to the given instance.\n \n The columns are:\n || model 0 host || model 1 host || neither model host ||\n \n Inputs:\n i - index of instance in pooled test set\n \"\"\"\n def get_column(self, i):\n \n source = self.source[i]\n y = self.y_concat[i]\n \n if source==0 and y==1:\n return 0\n elif source==1 and y==1:\n return 1\n else:\n return 2\n \n \"\"\"\n Get row of the confusion matrix corresponding to the given prediction.\n \n The rows are:\n || model 0 positive ||\n || model 0 negative ||\n || model 1 positive ||\n || model 1 negative ||\n \n Inputs:\n pred - prediction\n m = model\n \"\"\"\n def get_row(self, pred, m):\n assert(m==0 or m==1)\n \n if m==0 and pred==1:\n return 0\n elif m==0 and pred==0:\n return 1\n elif m==1 and pred==1:\n return 2\n else:\n return 3\n \n \"\"\"\n Test each model on the pooled test set\n \"\"\"\n def test_merged(self):\n CM = np.zeros((4,3), dtype=int)\n \n for fs in range(self.feature_num):\n if fs>1:\n X = self.X_test_pools[fs] # loop through test pools defined by all feature sets\n if type(X) != str: # set to string 'N/A' if it was not possible to create pooled test set\n for i in range(X.shape[0]): # Step through test instances\n x = X[i].reshape(1, -1) # represents a single instance\n \n for m in range(2):\n pred = self.model_list[m].models[fs].predict(x).item()\n row = self.get_row(pred, m)\n col = self.get_column(i)\n \n CM[row][col] += 1\n \n return CM\n \n \n \n \n \n ","sub_path":"code/merged_models.py","file_name":"merged_models.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"609688257","text":"class Trie:\n class Node:\n def __init__(self, key=None, data=None):\n self.key = key\n self.data = data\n self.children = {}\n\n def show(self):\n if self.data is None:\n print(\"Key: \", self.key, end = \" Children: \")\n else:\n print(\"Key: \", self.key, \"Data: \", self.data, end = \"Children: \")\n\n for key, child in self.children.items():\n print(key, \"->\", child.key, end = \" \")\n\n print()\n\n def __init__(self):\n self.root = self.Node(-1)\n self.node_id = 0\n\n def insert(self, word):\n curr = self.root\n for ch in word+'$':\n if ch not in curr.children:\n curr.children[ch] = self.Node(self.node_id)\n self.node_id += 1\n curr = curr.children[ch]\n\n def query(self, word):\n curr = self.root\n for ch in word+'$':\n if ch not in curr.children:\n return False\n curr = curr.children[ch]\n\n return True\n\n def show(self):\n self._show(self.root)\n\n def _show(self, node):\n node.show()\n for key, child in node.children.items():\n self._show(child)\n\nif __name__ == '__main__':\n trie = Trie()\n trie.insert('goodduck')\n trie.insert('goose')\n trie.insert('good')\n trie.insert('acer')\n trie.insert('banana')\n trie.insert('goose')\n trie.insert('ace')\n trie.show()\n print(trie.query('goode'))\n","sub_path":"strings/tries/tries.py","file_name":"tries.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"67363903","text":"import os\nimport random\n\nimport torch\nfrom torchvision import transforms\nimport torch.utils.data as data\nimport pandas as pd\nfrom PIL import Image, ImageFile, ImageDraw\nimport numpy as np\nimport math\n\nfrom ..utils.transforms import fliplr_joints, crop, generate_target, transform_pixel, random_erasing\nfrom scipy.ndimage.morphology import grey_dilation\n\nclass NTA(data.Dataset):\n\n def __init__(self, cfg, is_train=True, transform=None):\n if is_train:\n self.csv_file = cfg.DATASET.TRAINSET\n else:\n self.csv_file = cfg.DATASET.TESTSET\n\n self.is_train = is_train\n self.transform = transform\n self.data_root = cfg.DATASET.ROOT\n self.input_size = cfg.MODEL.IMAGE_SIZE\n self.output_size = cfg.MODEL.HEATMAP_SIZE\n self.sigma = cfg.MODEL.SIGMA\n self.scale_factor = cfg.DATASET.SCALE_FACTOR\n self.rot_factor = cfg.DATASET.ROT_FACTOR\n self.label_type = cfg.MODEL.TARGET_TYPE\n self.flip = cfg.DATASET.FLIP\n\n # load annotations\n self.landmarks_frame = pd.read_csv(self.csv_file)\n\n self.mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)\n self.std = np.array([0.229, 0.224, 0.225], dtype=np.float32)\n self.append_size = 1 / 3\n self.NUM_PTS = 194\n self.CROP_SIZE = 256\n\n\n # увеличение размера р��мки лица (для дополнительной информации)\n self.landmarks_frame['width_bbox'] = abs(self.landmarks_frame['bottom_x'] - self.landmarks_frame['top_x'])\n self.landmarks_frame['height_bbox'] = abs(self.landmarks_frame['bottom_y'] - self.landmarks_frame['top_y'])\n self.landmarks_frame['top_x'] -= self.append_size * self.landmarks_frame['width_bbox']\n self.landmarks_frame['top_y'] -= self.append_size * self.landmarks_frame['height_bbox']\n self.landmarks_frame['bottom_x'] += self.append_size * self.landmarks_frame['width_bbox']\n self.landmarks_frame['bottom_y'] += self.append_size * self.landmarks_frame['height_bbox']\n self.landmarks_frame['width_bbox'] = abs(self.landmarks_frame['bottom_x'] - self.landmarks_frame['top_x'])\n self.landmarks_frame['height_bbox'] = abs(self.landmarks_frame['bottom_y'] - self.landmarks_frame['top_y'])\n\n if self.is_train:\n # координаты меток относительно вернего угла бокса прямоугольника\n for id_point in range(self.NUM_PTS):\n self.landmarks_frame[f'Point_M{id_point}_X'] -= self.landmarks_frame['top_x']\n self.landmarks_frame[f'Point_M{id_point}_Y'] -= self.landmarks_frame['top_y']\n\n self.df_landmarks = self.landmarks_frame.drop(\n ['filename', 'top_x', 'top_y', 'bottom_x', 'bottom_y', 'width_bbox', 'height_bbox'], axis=1)\n\n def __len__(self):\n return len(self.landmarks_frame['filename'])\n\n def __getitem__(self, idx):\n\n row_bbox = self.landmarks_frame.loc[idx]\n row_landmarks = np.zeros(self.NUM_PTS * 2)\n if self.is_train:\n row_landmarks = []\n row_all_landmarks = self.df_landmarks.loc[idx]\n for id_point in range(0, 194):\n row_landmarks.append(row_all_landmarks[f'Point_M{id_point}_X'])\n row_landmarks.append(row_all_landmarks[f'Point_M{id_point}_Y'])\n row_landmarks = np.array(row_landmarks)\n\n img_file = os.path.join(self.data_root, row_bbox['filename'])\n img = Image.open(img_file)\n\n # кроп лица\n bbox = [row_bbox['top_x'], row_bbox['top_y'], row_bbox['bottom_x'], row_bbox['bottom_y']]\n img = img.crop(bbox)\n\n # ресайз кропа до размеров CROP_SIZE с сохранением соотношения сторон\n w, h = img.size\n if h > w:\n f = self.CROP_SIZE / w\n else:\n f = self.CROP_SIZE / h\n img = img.resize((int(w * f), int(h * f)))\n row_landmarks = row_landmarks * f\n\n # CropCenter\n w, h = img.size\n margin_h = (h - self.CROP_SIZE) // 2\n margin_w = (w - self.CROP_SIZE) // 2\n img = img.crop([margin_w, margin_h, self.CROP_SIZE + margin_w, self.CROP_SIZE + margin_h])\n row_landmarks = row_landmarks.astype(np.int16).reshape(-1, 2)\n row_landmarks -= np.array((margin_w, margin_h), dtype=np.int16)[None, :]\n pts = row_landmarks.astype(np.int16).reshape(-1, 2)\n\n # Random cutoff\n if self.is_train:\n img = random_erasing(img)\n\n img = np.array(img.convert('RGB'), dtype=np.float32)\n # Image.fromarray(np.uint8(img)).show()\n\n xmin = np.min(pts[:, 0])\n xmax = np.max(pts[:, 0])\n ymin = np.min(pts[:, 1])\n ymax = np.max(pts[:, 1])\n\n center_w = (math.floor(xmin) + math.ceil(xmax)) / 2.0\n center_h = (math.floor(ymin) + math.ceil(ymax)) / 2.0\n\n center_w = 256/2.0\n center_h = 256/2.0\n\n scale = max(math.ceil(xmax) - math.floor(xmin), math.ceil(ymax) - math.floor(ymin)) / 200.0\n scale = 1\n center = torch.Tensor([center_w, center_h])\n\n scale *= 1.25\n\n # if self.is_train:\n # if random.random() <= 0.5 and self.flip:\n # img = np.fliplr(img)\n # pts = fliplr_joints(pts, width=img.shape[1], dataset='300W')\n # pts[:, 0] = img.shape[1] - pts[:, 0]\n # img = Image.fromarray(np.uint8(img))\n # draw = ImageDraw.Draw(img)\n # r = 3\n # for coord in pts[:]:\n # x, y = coord\n # draw.ellipse((x - r, y - r, x + r, y + r), fill=(0, 255, 0, 0))\n # img.show()\n\n\n\n nparts = pts.shape[0]\n # print(nparts)\n target = np.zeros((nparts, self.output_size[0], self.output_size[1]))\n M = np.zeros((nparts, self.output_size[0], self.output_size[1]))\n tpts = pts.copy()\n tpts = tpts/4 # 256/4 =64\n # tpts = tpts/2 # 256/4 =64\n # print(tpts)\n\n\n for i in range(nparts):\n if tpts[i, 1] > 0:\n target[i], is_generate = generate_target(target[i], tpts[i] - 1, self.sigma,\n label_type=self.label_type)\n # if not is_generate:\n # print(pts[i,1], img_file)\n # print(row_landmarks[row_landmarks > 256])\n # Image.fromarray(np.uint8(img)).show()\n\n # print(target.shape)\n # tr = target.sum(axis=0)\n # tr = np.concatenate(target[:5], axis=1)\n # Image.fromarray(np.uint8(tr*255)).show()\n # Image.fromarray(np.uint8(target[0])).show()\n # RandomErasing = transforms.RandomErasing()\n # img = RandomErasing(img)\n # Image.fromarray(np.uint8(img)).show()\n\n\n img = img.astype(np.float32)\n img = (img / 255.0 - self.mean) / self.std\n img = img.transpose([2, 0, 1])\n target = torch.Tensor(target)\n tpts = torch.Tensor(tpts)\n # center = torch.Tensor(center)\n\n\n # M = np.zeros((self.NUM_PTS, self.output_size, self.output_size), dtype=np.float32)\n for i in range(len(M)):\n M[i] = grey_dilation(target[i], size=(3, 3))\n M = np.where(M >= 0.5, 1, 0)\n\n\n meta = {'index': idx,\n 'center': center,\n 'scale': scale,\n 'pts': torch.Tensor(pts),\n 'tpts': tpts,\n \"file_name\": row_bbox['filename'],\n \"crop_margin_x\": margin_w,\n \"crop_margin_y\": margin_h,\n \"scale_coef\": f,\n \"top_x\": row_bbox['top_x'],\n \"top_y\": row_bbox['top_y'],\n \"M\" : M\n }\n\n return img, target, meta\n\n\n\n","sub_path":"HRNet-Facial-Landmark-Detection/lib/datasets/NTA.py","file_name":"NTA.py","file_ext":"py","file_size_in_byte":7878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"510810710","text":"params = {}\nparams['epsilon'] = 0.01 #0.01\nparams['epsdecay'] = 0.01\n\n# weight-decay for L2 penalty, weight-cost for rbms (0.01, 0.00001)\nparams['l2reg'] = 0.01\nparams['pbias'] = 0.05\nparams['pbias_lambda'] = 5\nparams['sigma_start'] = 0.02\nparams['sigma_stop'] = 0.02\nparams['std_gaussian'] = params['sigma_start']\nparams['C_sigm'] = 1\n\n\n","sub_path":"src/theano/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"326370867","text":"from flask import Flask, request\nfrom flask_restful import Resource, Api\nimport json\nimport re\nfrom datetime import datetime, time\nimport os\nimport sys\nfrom flask import Response\nimport urllib.request\nfrom fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\n\napp = Flask(__name__)\napi = Api(app)\n\n# Object def to store account properties\nclass WM_Accounts(object):\n def __init__(self, app_name=None, guid=None, permalink=None):\n self.app_name = app_name\n self.guid = guid\n self.permalink = permalink\n\n# Object def to store intividual deployable properties\nclass WM_ContentItem(object):\n def __init__(self, item_url=None, app_name=None, title=None, keywords=None):\n self.item_url = item_url\n self.app_name = app_name\n self.title = title\n self.keywords = keywords\n # returns a set of results in the same form as a Zendesk search API response. Most props are ignored by WalkMe.\n def zenifiedItem(self):\n zenItem = {\n \"id\":360022174414,\n \"url\":\"https://support.zendesk.com/api/v2/help_center/en-us/articles/360022174414-Introducing-the-Zendesk-Suite-onboarding-tasks.json\",\n \"html_url\":self.item_url,\n \"author_id\":364345472387,\n \"comments_disabled\":False,\n \"draft\":False,\n \"promoted\":False,\n \"position\":1,\n \"vote_sum\":-1,\n \"vote_count\":9,\n \"section_id\":360004062094,\n \"created_at\":\"2019-04-28T15:23:11Z\",\n \"updated_at\":\"2019-09-16T16:10:22Z\",\n \"name\": self.title,\n \"title\": self.title,\n \"source_locale\":\"en-us\",\n \"locale\":\"en-us\",\n \"outdated\":False,\n \"outdated_locales\":[\n\n ],\n \"edited_at\":\"2019-09-11T23:04:07Z\",\n \"user_segment_id\":None,\n \"permission_group_id\":298067,\n \"label_names\":[\n \"wm\"\n ],\n \"body\":\"\\u003cp class=\\\"p\\\"\\u003eFrom Application: \" + self.app_name + \"\\u003c/p\\u003e\",\n \"snippet\":None,\n \"result_type\":\"article\"\n }\n return zenItem\n\n# Function inds and reads the data file based on the settings file\ndef getDataFile(settingsUrl):\n # get the settings file and check for failures\n try:\n response = urllib.request.urlopen(settingsUrl)\n except urllib.error.URLError as e: response = e.reason\n except socket.error as e: response = ''\n except socket.timeout as e: response = ''\n except UnicodeEncodeError as e: response = ''\n except http.client.BadStatusLine as e: response = ''\n except http.client.IncompleteRead as e: response = ''\n except urllib.error.HTTPError as e: response = ''\n # Assume it got a file and if it didn't, return an empty object\n try:\n data = response.read() # a `bytes` object\n except (UnicodeDecodeError, AttributeError):\n print(\"Error: \" + response)\n return None\n settings = data.decode('utf-8') # a `str`; this step can't be used if data is binary\n\n # grab the data file URL from the settings file\n query = re.search(\"'DataFiles':\\[{'url':'(.+?)',\", settings)\n dataUrl = query.group(1)\n print(\"Getting data file at \" + dataUrl)\n # get the data file and check for failures\n try:\n dataResponse = urllib.request.urlopen(dataUrl)\n except urllib.error.URLError as e: response = e.reason\n except socket.error as e: response = ''\n except socket.timeout as e: response = ''\n except UnicodeEncodeError as e: response = ''\n except http.client.BadStatusLine as e: response = ''\n except http.client.IncompleteRead as e: response = ''\n except urllib.error.HTTPError as e: response = ''\n # Assume it got a file and if it didn't, return an empty object\n try:\n data = response.read() # a `bytes` object\n except (UnicodeDecodeError, AttributeError):\n print(\"Error: \" + response)\n return None\n dataData = dataResponse.read() # a `bytes` object\n # return the file\n return dataData\n\n# function to load all valid deployables into the index\ndef loadIndex():\n contentItems = []\n global appList\n for account in appList:\n # build the settings URL based on the GUID. Assume it's .txt for now\n settingsUrl = \"https://cdn.walkme.com/users/\" + account.guid + \"/settings.txt\"\n print(\"Checking \" + account.app_name + \" settings file at \" + settingsUrl)\n\n dataContent = getDataFile(settingsUrl)\n # if nothing returned, might be using the JS settings file. Do another load attempt this time of settings.js\n if dataContent == None:\n settingsUrl = \"https://cdn.walkme.com/users/\" + account.guid + \"/settings.js\"\n print(\"Wrong data URL at settings.txt. Trying \" + account.app_name + \" settings file at \" + settingsUrl)\n dataContent = getDataFile(settingsUrl)\n if dataContent == None:\n print(\"No data file found for \" + account.app_name + \", skipping\")\n\n content = dataContent.decode('utf-8') # a `str`; this step can't be used if data is binary\n # regex to find all valid deployable & key props\n query = re.findall(\"\\\"KeyWords\\\": \\\"(.*?)\\\",\\\"Goals\\\": .+?,\\\"Id\\\": (?P\\d*),\\\"OrderIndex\\\": \\d*,\\\"Name\\\": \\\"(?P.+?)\\\",\", content)\n for item in query:\n # add matching deployables as content item objects\n contentItems.append(WM_ContentItem(account.permalink + \"?walkme=19-\" + item[1], account.app_name, item[2], item[0]))\n print(str(len(query)) + \" items added from \" + account.app_name)\n return contentItems\n\nclass Do_Search(Resource):\n\n def get(self):\n global wmIndex\n result = \"unhandled\"\n # get the query arg\n query = request.args.get('query')\n if len(wmIndex) != 0:\n items = []\n for item in wmIndex:\n items.append(item.title)\n # here's fuzzy fuzzywuzzy doing the search\n matches = process.extract(query, items, limit=int(os.environ.get('MAX_RESULTS')))\n else: wmIndex = loadIndex()\n\n zenItems = []\n for item in wmIndex:\n for match in matches:\n if item.title == match[0] and match[1] > int(os.environ.get('MAX_SCORE')):\n zenItems.append(item.zenifiedItem())\n result = {\n \"count\":1,\n \"next_page\":None,\n \"page\":1,\n \"page_count\":1,\n \"per_page\":25,\n \"previous_page\":None,\n \"results\": zenItems\n }\n\n result = json.dumps(result)\n\n return Response(str(result), mimetype='application/json')\n\nclass Do_Reindex(Resource):\n\n def get(self):\n result = \"unhandled\"\n wmIndex = []\n wmIndex = loadIndex()\n\n if len(wmIndex) != 0:\n result = \"\"\n for item in wmIndex:\n result += (item.app_name + \" - \" + item.title + \" (keywords: \" + item.keywords + \"): \" + item.item_url + \"\\n\")\n\n return {'status': 'ok', 'result': result}\n\napi.add_resource(Do_Search, '/api/v2/help_center/articles/search.json')\napi.add_resource(Do_Reindex, '/reindex')\nappList = []\n\n#look for any account config up to 100 configs in the format ACCOUNT_99\na = 1\nwhile a < 100:\n if str(os.environ.get('ACCOUNT_' + str(a))) != \"None\":\n account = str(os.environ.get('ACCOUNT_' + str(a)))\n print(\"Adding \" + account.split(\"|\")[0] + \" to config\")\n appList.append(WM_Accounts(account.split(\"|\")[0], account.split(\"|\")[1], account.split(\"|\")[2]))\n a += 1\nprint(str(len(appList)) + \" account configs found.\")\n\nwmIndex = []\nwmIndex = loadIndex()\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 80))\n app.run(host='0.0.0.0', port=port)\n # app.run(host='0.0.0.0', port=80)\n","sub_path":"wm-global-search.py","file_name":"wm-global-search.py","file_ext":"py","file_size_in_byte":7842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"520050522","text":"import math\nstr = input(\"дай строку\")\na = len(str)\ni = 0\ntimer = 1\ncout = \"\"\n\ndef mach(i, timer):\n #print(timer)\n if str[i] == ' ':\n timer = 1;\n print(\" \", \"t=\", timer, sep='', end=' ')\n return timer\n else:\n if((timer % 2 == 0) & (timer != 0)):\n print(str[i], str[i], end=' ', sep='')\n timer = timer + 1\n return timer\n else:\n timer = 1 + timer\n print(str[i], sep='', end=' ')\n return timer\n\n\n\nwhile i< len(str):\n timer = mach(i, timer)\n i = 1 + i;\n\n\n\n\n","sub_path":"zas3.py","file_name":"zas3.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"222444465","text":"import string\nimport re\nimport numpy as np\nimport pandas as pd\nfrom sklearn import tree\nimport time\ndf=pd.read_csv(\"~/Desktop/AI/hw2/reviews.csv\", sep=\"|\") #read in dataframe\nlabellist=[] #list of label of training sets\ntest=[] #list of text of test cases\ntestlabel=[] #list of label of test cases\ndrop_list=[] #list of test cases to be dropped from df\n\nfor i in range(0,len(df)):\n label=df.iloc[i,0]\n if (i+1)%5==0: #grab out every one of five for test case\n test.append(df.iloc[i,1]) #store text\n if label=='positive': #store label\n label=1\n else:\n label=0\n testlabel.append(label)\n drop_list.append(i) #record of cases to be dropped\n continue\n elif label=='positive': #store label of training sets\n labellist.append(1)\n else:\n labellist.append(0)\n \ndf=df.drop(drop_list) #drop test cases from df and training set remains\ndf['relabel']=labellist #add new label(numerical label) to training set\n\ntrain=df['text'] #pick out training set text\nprint('...data ready...')\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.pipeline import Pipeline #pipeline for data cleaning and learning\nclf = Pipeline([('vect',CountVectorizer(max_features=5000)),\n ('tfidf',TfidfTransformer()),\n ('clf',tree.DecisionTreeClassifier(min_samples_split=5000,\n max_depth=80))]) #dt classifier\nprint('...pipeline ready...')\nstart=time.time()\nclf=clf.fit(train, labellist) #train\nend=time.time()\nprint('...trained...')\ntraintime=end-start\nprint('training time=',traintime)\nstart=time.time()\nresult=clf.predict(test) #run clf model to predict test cases\nend=time.time()\ntesttime=end-start\nprint('testing time=',testtime)\nprint('...tested...')\nprecision=np.mean(result==testlabel) #precision\nprint(precision)\nprint('DT: CV.max_features=5000, classifier.min_s_s=5000,max_d=80')\n\nfrom sklearn.externals import joblib\njoblib.dump(clf,'dt_model.m') #clf model stored as dt_model.m\nprint('...model stored...')\n","sub_path":"hw2_dt.py","file_name":"hw2_dt.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"395326089","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def dfs(self, root, num):\n tmp = 0\n if root.left:\n tmp += self.dfs(root.left, num * 10 + root.val)\n if root.right:\n tmp += self.dfs(root.right, num * 10 + root.val)\n if not root.left and not root.right:\n tmp += 10 * num + root.val\n return tmp\n\n def sumNumbers(self, root):\n if not root:\n return 0\n return self.dfs(root, 0)\n","sub_path":"python/Sum Root to Leaf Numbers.py","file_name":"Sum Root to Leaf Numbers.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"166776483","text":"import copy, pyaudio, wave, os, threading\nfrom pydub import AudioSegment\n\nclass MetronomeRecorder(threading.Thread):\n def __init__(self):\n super(MetronomeRecorder, self).__init__()\n self.init_constants()\n self.init_variables()\n\n def init_constants(self):\n self.TEMP_TICKS_WAVE_FILENAME = 'temp_ticks.wav'\n self.TEMP_RECORDING_WAVE_FILENAME = 'temp_recording.wav'\n self.WAVE_FILE_FORMAT = 'wav'\n self.CHUNK_SIZE = 1024\n self.FORMAT = pyaudio.paInt16\n self.CHANNELS = 2\n self.SAMPLING_RATE = 44100\n self.TICKS_PER_SECOND = self.SAMPLING_RATE / self.CHUNK_SIZE\n self.SILENT_BYTE = '\\x00'\n self.DUMMY_QUEUE_ITEM = 1\n\n def init_variables(self):\n self.tick_wave_filename = ''\n self.output_wave_filename = ''\n self.recording_frames = []\n self.metronome_frames = []\n self.skip_ticks = 0\n self.bpm = 60\n self.current_tick = 0\n self.is_recording = False\n self.is_playing = False\n self.is_metronome_on = False\n self.thread_running = False\n self.ready_notifier = threading.Condition()\n\n def set_play_finished_notifier_queue(self, play_finished_notifier_queue):\n self.play_finished_notifier_queue = play_finished_notifier_queue\n\n def set_bpm(self, bpm):\n self.bpm = bpm\n\n def set_skip_ticks(self, skip_ticks):\n self.skip_ticks = skip_ticks\n\n def set_output_wave_filename(self, output_wave_filename):\n self.output_wave_filename = output_wave_filename\n\n def set_tick_sound_wave_filename(self, tick_wave_filename):\n self.tick_wave_filename = tick_wave_filename\n\n def run(self):\n while self.thread_running:\n with self.ready_notifier:\n self.ready_notifier.wait()\n if self.is_recording:\n self.record()\n elif self.is_playing:\n self.play()\n\n def start_thread(self):\n self.thread_running = True\n self.start()\n\n def stop_thread(self):\n if self.is_recording:\n self.stop_recording()\n elif self.is_playing:\n self.stop_playing()\n self.thread_running = False\n with self.ready_notifier:\n self.ready_notifier.notify()\n\n def record(self):\n while self.is_recording:\n self.current_tick += 1\n record_input_data = self.record_input_stream.read(self.CHUNK_SIZE)\n self.recording_frames.append(record_input_data)\n self.handle_metronome(len(record_input_data))\n self.write_final_output_and_reset()\n\n def handle_metronome(self, record_input_data_length):\n if self.current_tick % self.get_ticks_per_metronome(self.bpm) == 0:\n self.is_metronome_on = True\n if self.is_metronome_on:\n self.handle_metronome_on(record_input_data_length)\n else:\n self.handle_metronome_off(record_input_data_length)\n\n def handle_metronome_on(self, record_input_data_length):\n tick_output_data = self.tick_sound_wave.readframes(self.CHUNK_SIZE)\n if tick_output_data == '':\n self.is_metronome_on = False\n self.tick_sound_wave.rewind()\n if self.should_play_tick_out_loud():\n self.tick_output_stream.write(tick_output_data)\n tick_output_data = self.get_identical_length_tick_output_data(tick_output_data, record_input_data_length)\n self.metronome_frames.append(tick_output_data)\n \n def get_identical_length_tick_output_data(self, tick_output_data, record_input_data_length):\n missing_bytes_length = record_input_data_length - len(tick_output_data)\n if missing_bytes_length > 0:\n tick_output_data += missing_bytes_length * self.SILENT_BYTE\n return tick_output_data\n\n def handle_metronome_off(self, record_input_data_length):\n silent_frames = record_input_data_length * self.SILENT_BYTE\n self.metronome_frames.append(silent_frames)\n\n def get_ticks_per_metronome(self, bpm):\n bps = bpm / 60.0\n return int(self.TICKS_PER_SECOND / bps)\n\n def should_play_tick_out_loud(self):\n if self.skip_ticks == 0:\n return True\n ticks_per_metronome = self.get_ticks_per_metronome(self.bpm)\n ticks_to_skip = ticks_per_metronome * self.skip_ticks\n if (self.current_tick / ticks_to_skip) % 2 == 1:\n return True\n return False\n\n def play(self):\n record_frames = self.record_audio_wave.readframes(self.CHUNK_SIZE)\n while self.is_playing and not record_frames == '':\n self.record_output_stream.write(record_frames)\n record_frames = self.record_audio_wave.readframes(self.CHUNK_SIZE)\n self.play_reset()\n\n def play_reset(self):\n self.is_playing = False\n self.record_output_stream.stop_stream()\n self.record_output_stream.close()\n self.record_audio_output.terminate()\n self.play_finished_notifier_queue.put(self.DUMMY_QUEUE_ITEM)\n\n def load_tick_output_stream(self):\n self.tick_sound_wave = wave.open(self.tick_wave_filename, 'rb')\n self.tick_audio_output = pyaudio.PyAudio()\n f = self.tick_audio_output.get_format_from_width(self.tick_sound_wave.getsampwidth())\n c = self.tick_sound_wave.getnchannels()\n r = self.tick_sound_wave.getframerate()\n self.tick_output_stream = self.tick_audio_output.open(format = f, channels = c, rate = r, output = True)\n\n def load_record_input_stream(self):\n self.record_audio_input = pyaudio.PyAudio()\n f = self.FORMAT\n c = self.CHANNELS\n r = self.SAMPLING_RATE\n s = self.CHUNK_SIZE\n self.record_input_stream = self.record_audio_input.open(format = f, channels = c, rate = r, input = True, frames_per_buffer = s)\n\n def load_record_output_stream(self):\n self.record_audio_wave = wave.open(self.output_wave_filename, 'rb')\n self.record_audio_output = pyaudio.PyAudio()\n f = self.record_audio_output.get_format_from_width(self.record_audio_wave.getsampwidth())\n c = self.record_audio_wave.getnchannels()\n r = self.record_audio_wave.getframerate()\n self.record_output_stream = self.record_audio_output.open(format = f, channels = c, rate = r, output = True)\n\n def start_recording(self):\n self.is_recording = True\n self.load_record_input_stream()\n self.load_tick_output_stream()\n with self.ready_notifier:\n self.ready_notifier.notify()\n\n def stop_recording(self):\n self.is_recording = False\n\n def start_playing(self):\n self.is_playing = True\n self.load_record_output_stream()\n with self.ready_notifier:\n self.ready_notifier.notify()\n\n def stop_playing(self):\n self.is_playing = False\n\n def write_final_output_and_reset(self):\n self.record_input_stream.stop_stream()\n self.record_input_stream.close()\n self.tick_output_stream.stop_stream()\n self.tick_output_stream.close()\n self.record_audio_input.terminate()\n self.tick_audio_output.terminate()\n self.write_output_wave()\n self.cleanup_temp_files()\n self.reset_state()\n\n def write_output_wave(self):\n self.write_recording_wave()\n self.write_ticks_wave()\n self.combine_recording_and_ticks_waves()\n\n def write_recording_wave(self):\n recording_wave = wave.open(self.TEMP_RECORDING_WAVE_FILENAME, 'wb')\n recording_wave.setnchannels(self.CHANNELS)\n recording_wave.setsampwidth(self.record_audio_input.get_sample_size(self.FORMAT))\n recording_wave.setframerate(self.SAMPLING_RATE)\n recording_wave.writeframes(b''.join(self.recording_frames))\n recording_wave.close()\n\n def write_ticks_wave(self):\n ticks_wave = wave.open(self.TEMP_TICKS_WAVE_FILENAME, 'wb')\n ticks_wave.setnchannels(self.CHANNELS)\n ticks_wave.setsampwidth(self.tick_audio_output.get_sample_size(self.FORMAT))\n ticks_wave.setframerate(self.SAMPLING_RATE)\n ticks_wave.writeframes(b''.join(self.metronome_frames))\n ticks_wave.close()\n\n def combine_recording_and_ticks_waves(self):\n recording_audio_segment = AudioSegment.from_file(self.TEMP_RECORDING_WAVE_FILENAME)\n ticks_audio_segment = AudioSegment.from_file(self.TEMP_TICKS_WAVE_FILENAME)\n combined_audio_segment = recording_audio_segment.overlay(ticks_audio_segment)\n combined_audio_segment.export(self.output_wave_filename, format=self.WAVE_FILE_FORMAT)\n\n def cleanup_temp_files(self):\n if os.path.isfile(self.TEMP_TICKS_WAVE_FILENAME):\n os.remove(self.TEMP_TICKS_WAVE_FILENAME)\n if os.path.isfile(self.TEMP_RECORDING_WAVE_FILENAME):\n os.remove(self.TEMP_RECORDING_WAVE_FILENAME)\n\n def reset_state(self):\n self.recording_frames = []\n self.metronome_frames = []\n self.is_recording = False\n self.metronome_active = False\n self.current_tick = 0\n","sub_path":"metronome_recorder.py","file_name":"metronome_recorder.py","file_ext":"py","file_size_in_byte":9061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"299493967","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Oct 21 17:50:28 2018\r\n\r\nA Forest keeps track of all of trees contained within some rows x cols grid\r\nunder some predefined biological-enviornment parameters defined in config.py\r\n\r\nThe Forest class contains state functionality of a MDP.\r\n\r\n@author: Quentin Goehrig\r\n\"\"\"\r\n\r\nfrom tree import Tree\r\nimport config\r\nimport sys\r\nimport math\r\nfrom random import random, seed, shuffle\r\nfrom copy import copy, deepcopy\r\nfrom datetime import datetime\r\n\r\nclass Forest:\r\n \r\n num_healthy, num_viru, num_hypo, = 0, 0, 0\r\n # Infect maps to be used for for future work on infect_v2\r\n # Maps a point -> list of available points\r\n hv_infect_map = None\r\n v_infect_map = None\r\n \r\n def __init__(self, rows, cols, infect_version = 1, rand_seed = datetime.now()):\r\n self.rows = rows\r\n self.cols = cols\r\n self.infect_version = infect_version\r\n self.grid = [[None] * cols for i in range(rows)]\r\n seed(rand_seed)\r\n\r\n # Generates a random Tree grid based on 2002 CDF data\r\n def set_random_grid(self):\r\n new_grid = [[None] * self.cols for i in range(self.rows)]\r\n for row in range(0, self.rows):\r\n for col in range(0, self.cols):\r\n rating = 0\r\n stage = 0\r\n if random() < config.TREE_DENSITY:\r\n i = 0\r\n while i <= len(config.POP_2002_CDF):\r\n if random() < config.POP_2002_CDF[i]:\r\n rating = int(i / config.DBH_STAGE4) + 1\r\n stage = int(i % config.DBH_STAGE4) + 1\r\n break\r\n i += 1\r\n new_tree = Tree(row, col, rating, stage, config.UNTREATED)\r\n new_grid[row][col] = new_tree\r\n self.grid = new_grid\r\n \r\n def print_forest(self):\r\n grid = self.grid\r\n for row in grid:\r\n for tree in row:\r\n print(' '.join([str(tree.stage)]))\r\n # uncomment below for full tree details\r\n #tree.print_tree()\r\n print(\"---------------------------\")\r\n \r\n # Returns a 2D list of lists of trees, i.e., the new grid \r\n def get_next_year(self):\r\n prev_year = self.grid\r\n next_year = deepcopy(self.grid)\r\n \r\n coords = [(r,c) for r in range(self.rows) for c in range(self.cols)]\r\n shuffle(coords)\r\n \r\n for coord in coords:\r\n r = coord[0]\r\n c = coord[1]\r\n tree = prev_year[r][c] # original tree\r\n t_tree = next_year[r][c] # transformed tree\r\n \r\n if tree.stage != config.DEAD:\r\n next_stage_row = int(((tree.rating - 1) * config.DBH_STAGE4)) \\\r\n + (tree.stage - 1)\r\n \r\n for i in range(0, config.DBH_STAGE4 + 1):\r\n if random() < config.NEW_STAGE_CDF[next_stage_row][i]:\r\n t_tree.stage = i\r\n break\r\n \r\n next_rating_row = int(tree.treatment * (config.HEALTHY - 1)) \\\r\n + (tree.rating - 1)\r\n \r\n for i in range(0, config.HEALTHY - 1):\r\n if random() < config.NEW_RATING_CDF[next_rating_row][i]:\r\n t_tree.rating = i + 1\r\n break\r\n \r\n if tree.rating == config.V or tree.rating == config.HV:\r\n if self.infect_version == 1:\r\n self.infect_v1(tree.rating, r, c, prev_year, next_year)\r\n elif self.infect_version == 2:\r\n self.infect_v2(tree.rating, r, c, prev_year, next_year)\r\n \r\n rep = config.REPRODUCTION[tree.rating - 1][tree.stage - 1]\r\n l = math.exp(-rep)\r\n p = random()\r\n rand_poisson = 1\r\n \r\n while p > l :\r\n p = p * random()\r\n rand_poisson += 1\r\n rand_poisson -= 1\r\n \r\n sites = copy(coords)\r\n shuffle(sites)\r\n while rand_poisson > 0 and len(sites) > 0:\r\n site = sites.pop()\r\n s_r = site[0]\r\n s_c = site[1]\r\n if prev_year[s_r][s_c].stage == config.DEAD:\r\n next_year[s_r][s_c].stage = config.DBH_STAGE1\r\n next_year[s_r][s_c].rating = config.HEALTHY\r\n# self.num_births += 1\r\n rand_poisson -= 1\r\n \r\n return next_year\r\n \r\n # State transition\r\n def set_next_year(self):\r\n next_year = self.get_next_year()\r\n self.grid = next_year\r\n self.update_stats()\r\n \r\n \"\"\"\r\n The experimental infect function. This is significantly slower than the\r\n original infect function. This is due to the naive approach the function\r\n uses to select trees for infection. However, this method incorporates\r\n distance when selecting a tree. The intention is to provide a higher\r\n likelyhood of an infection for trees that are closer in distance to the\r\n source tree while still maintaining expected simulation results.\r\n \"\"\"\r\n def infect_v2(self, r, c, rating, prev_year, next_year):\r\n max_infections = int(round(math.exp(config.NUM_INF_CDF[0] * random() \\\r\n - config.NUM_INF_CDF[1])))\r\n events = int(round( random() * max_infections ))\r\n if events < 1:\r\n events = 1\r\n if rating == config.V:\r\n distribution = config.V_INFECT_RANGE_PROB_8M_INT\r\n else:\r\n distribution = config.HV_INFECT_RANGE_PROB_8M_INT\r\n infections = 0\r\n sporings = 0\r\n max_sporings = config.SPORE_SCALAR * events\r\n while infections < events and sporings < max_sporings:\r\n if rating == config.HV and random() < config.PER_HV_TO_HV:\r\n infect_type = config.HV\r\n else:\r\n infect_type = config.V\r\n infect_range = random() * len(distribution) * config.DIST_CLASS\r\n if infect_range <= config.DIST_CLASS:\r\n infect_range = config.DIST_CLASS\r\n attempt_coord = self.get_random_point_at_range(r, c, infect_range)\r\n# point_dist = self.get_distance(r, c, attempt_coord[0], attempt_coord[1])\r\n# t1 = int(point_dist / config.DIST_CLASS)\r\n# t2 = int(infect_range / config.DIST_CLASS)\r\n# print(infect_range, point_dist)\r\n land_prob_index = int(infect_range / config.DIST_CLASS)\r\n spore_land_prob = distribution[land_prob_index]\r\n attempt_tree = prev_year[attempt_coord[0]][attempt_coord[1]]\r\n if random() < spore_land_prob and attempt_tree.stage != config.DEAD:\r\n next_year[attempt_tree.r][attempt_tree.c].rating = infect_type\r\n infections += 1\r\n sporings += 1\r\n \r\n \"\"\"\r\n The original infect function. This is much faster than the newer infect\r\n function, however, it does not incorporate distance to tree when selecting\r\n new trees to be infected\r\n \"\"\"\r\n def infect_v1(self, r, c, rating, prev_year, next_year):\r\n if rating == config.V:\r\n spore_prob = config.PROB_OF_SPORE_VIRU\r\n else:\r\n spore_prob = config.PROB_OF_SPORE_HYPO\r\n num_infections = 0\r\n if random() < spore_prob:\r\n i_power = config.NUM_INF_CDF[0] * random() - config.NUM_INF_CDF[1]\r\n num_infections = int(round(math.exp(i_power)))\r\n for i in range(0, num_infections):\r\n if rating == config.HV and random() < config.PER_HV_TO_HV:\r\n infect_type = config.HV\r\n dist_coefficient = config.HV_DIST_OLD\r\n else:\r\n infect_type = config.V\r\n dist_coefficient = config.V_DIST_OLD\r\n # Calculate distance incrementally for readability\r\n coefficient_power = dist_coefficient[0] * random() - dist_coefficient[1]\r\n distance = round(math.exp(coefficient_power) * config.DIST_CLASS)\r\n distance = int( distance / config.SITE_SIZE )\r\n \r\n spore_destination = self.get_random_point_in_range(r, c, distance)\r\n dest_r = spore_destination[0]\r\n dest_c = spore_destination[1]\r\n if self.is_in_grid(dest_r, dest_c):\r\n next_year[dest_r][dest_c].rating = infect_type\r\n \r\n # Returns a tuple coordinate (r', c') of a random point at\r\n # 'p_range' distance of coordinate (r, c)\r\n def get_random_point_at_range(self, r, c, p_range):\r\n p_r = r\r\n p_c = c\r\n increment_r = random() < 0.5\r\n increment_c = random() < 0.5\r\n distance = sys.float_info.max\r\n while abs(distance - p_range) > config.SITE_SIZE and distance != 0:\r\n if random() < 0.5:\r\n p_r = p_r + 1 if increment_r else p_r - 1\r\n if not self.is_in_grid(p_r, p_c):\r\n p_r = r\r\n increment_r = not increment_r\r\n else:\r\n p_c = p_c + 1 if increment_c else p_c -1\r\n if not self.is_in_grid(p_r, p_c):\r\n p_c = c\r\n increment_c = not increment_c\r\n distance = self.get_distance(r, c, p_r, p_c)\r\n return ( p_r, p_c )\r\n \r\n # Copied straight from original java program\r\n # Does not guarantee point is in grid range\r\n def get_random_point_in_range(self, r, c, p_range):\r\n if random() < 0.5:\r\n rand_r = -int((random() * p_range) - 1)\r\n else:\r\n rand_r = int((random() * p_range) + 1)\r\n if random() < 0.5:\r\n rand_c = -int((random() * p_range) - 1)\r\n else:\r\n rand_c = int((random() * p_range) + 1)\r\n return ( rand_r + r, rand_c + c )\r\n \r\n def is_in_grid(self, r, c):\r\n return r >= 0 and c >= 0 and r < self.rows and c < self.cols\r\n\r\n def get_distance(self, r1, c1, r2, c2):\r\n return math.sqrt(math.pow((r2 - r1) * config.SITE_SIZE, 2) \\\r\n + math.pow((c2 - c1) * config.SITE_SIZE, 2))\r\n \r\n def update_stats(self):\r\n self.num_healthy, self.num_hypo, self.num_viru = 0, 0, 0\r\n for r in range(0, self.rows):\r\n for c in range(0, self.cols):\r\n if self.grid[r][c].rating == config.HEALTHY:\r\n self.num_healthy += 1\r\n elif self.grid[r][c].rating == config.HV:\r\n self.num_hypo += 1\r\n elif self.grid[r][c].rating == config.V:\r\n self.num_viru += 1\r\n \r\n# Testing\r\n#forest = Forest(50,50)\r\n#forest.set_random_grid()\r\n#forest.print_forest()\r\n#for i in range( 0, 10 ):\r\n# forest.set_next_year()\r\n# print(i)\r\n#forest.print_forest()\r\n","sub_path":"src/forest.py","file_name":"forest.py","file_ext":"py","file_size_in_byte":11038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"370786935","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:X.Ning \n\n#14. 判断是否是表示符(能否做变量使用【系统关键字也为正确,但除外】)\nname14_1 = 'x.ning'\nname14_2 = 'User_name'\nv1 = name14_1.isidentifier()\nv2 = name14_2.isidentifier()\nprint(v1)\nprint(v2)\n\n# 15. 判断是否全部为小写\nname15_1 = 'X.Ning'\nname15_2 = 'x.ning'\nv1 = name15_1.islower()\nv2 = name15_2.islower()\nprint(v1)\nprint(v2)\n\n# 16. 是否全部为大写\nname16_1 = 'X.Ning'\nname16_2 = 'X.NING'\nv1 = name16_1.isupper()\nv2 = name16_2.isupper()\nprint(v1)\nprint(v2)\n\n# 17. 是否包含隐含的符号(\\t,\\n等)\nname17_1 = \"老子天下第一\"\nname17_2 = \"谦虚 \\n\"\nv1 = name17_1.isprintable()\nv2 = name17_2.isprintable()\nprint(v1)\nprint(v2)\n\n# 18. 是否全部是空格\nname18_1 = \"老子天下第一\"\nname18_2 = \"\\n \\t\"\nname18_3 = \" \"\nv1 = name18_1.isspace()\nv2 = name18_2.isspace()\nv3 = name18_3.isspace()\nprint(v1)\nprint(v2)\nprint(v3)\n\n# 19. 字符串拼接 *****(五星级功能)\nname19_1 = \"老子天下第一\"\nname19_2 = \"ning\"\nv1 = ' '.join(name19_1)\nv2 = '_'.join(name19_2)\nprint(v1)\nprint(v2)\n\n# 20. 左右填充\n# center,ljust,rjust\nname20 = \"ning\"\nv = name20.center(20,'*')\nv1 = name20.rjust(20,'*')\nv2 = name20.ljust(10,'.')\nprint(v)\nprint(v1)\nprint(v2)","sub_path":"day02/str2.py","file_name":"str2.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"405053089","text":"# -*- coding: utf-8 -*-\n############################\n# Peicheng Lu 20190822\n############################\n#\nimport os\nfrom os import *\nimport cv2\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport math\nimport sys\nimport imutils\n\ndef getMatchNum(matches,ratio):\n '''number of matched features and relation'''\n matchesMask=[[0,0] for i in range(len(matches))]\n matchNum=0\n for i,(m,n) in enumerate(matches):\n if m.distance ratio\n matchesMask[i]=[1,0]\n matchNum+=1\n return (matchNum,matchesMask)\n\nqueryPath='./data/' #path of database\nfiles = listdir(queryPath)\nsamplePath=sys.argv[1] #input sample\n#sift extractpr\nsift = cv2.xfeatures2d.SIFT_create() \n#FLANN matching\nFLANN_INDEX_KDTREE=0\nindexParams=dict(algorithm=FLANN_INDEX_KDTREE,trees=5)\nsearchParams=dict(checks=50)\nflann=cv2.FlannBasedMatcher(indexParams,searchParams)\n\nratio_l=[]\nvis_l=[]\n\noutputPath='./Output/' #path of database\ntmpfiles = os.listdir(outputPath)\nfor f in tmpfiles:\n os.remove('./Output/'+str(f))\n \nsampleImage=cv2.imread(samplePath,0)\nsampleImage = imutils.resize(sampleImage, width = 300)\nsampleImage = cv2.GaussianBlur(sampleImage, (5, 5), 0)\n#sampleImage = cv2.Canny(sampleImage, 30, 150)\nkp1, des1 = sift.detectAndCompute(sampleImage, None) #detect the features of sample\nprint (len(files))\nfor f in files:\n f=queryPath+f\n print(str(f))\n \n queryImage=cv2.imread(f,0)\n try:\n queryImage = imutils.resize(queryImage, width = 300)\n except AttributeError:\n continue\n else:\n queryImage = cv2.GaussianBlur(queryImage, (5, 5), 0)\n #queryImage = cv2.Canny(queryImage, 30, 150)\n kp2, des2 = sift.detectAndCompute(queryImage, None) #detect the features of img in database\n \n matches=flann.knnMatch(des1,des2,k=2) #matched features, assign k=2 to return 2 matched features.\n\n (matchNum,matchesMask)=getMatchNum(matches,0.9) #set ratio = 0.9 to calculate the matching level\n matchRatio=matchNum*100/len(matches)\n drawParams=dict(matchColor=(0,255,0), singlePointColor=(0,0,255), matchesMask=matchesMask, flags=0)\n sampleImage=cv2.imread(samplePath)\n queryImage=cv2.imread(f)\n sampleImage=cv2.imread(samplePath)\n sampleImage = imutils.resize(sampleImage, width = 300)\n queryImage=cv2.imread(f)\n queryImage = imutils.resize(queryImage, width = 300)\n #(hA, wA) =sampleImage.shape[:2] \n #(hB, wB) = queryImage.shape[:2]\n comparisonImage=cv2.drawMatchesKnn(sampleImage,kp1,queryImage,kp2,matches,None,**drawParams)\n #cv2.putText(comparisonImage,str(matchRatio) + \"%\",(int(wA+wB/2.),int(3.*hB/4.)),cv2.FONT_HERSHEY_PLAIN,int(1.*hB/50.),(0,0,255),4)\n ratio_l.append(matchRatio)\n vis_l.append(comparisonImage)\n for i in range(0,len(ratio_l)-1): \n for j in range(0,len(ratio_l)-1-i): \n if ratio_l[j] < ratio_l[j+1]: \n tmp = ratio_l[j]\n ratio_l[j] = ratio_l[j+1]\n ratio_l[j+1] = tmp\n tmpv = vis_l[j]\n vis_l[j] = vis_l[j+1]\n vis_l[j+1] = tmpv\n #print (\"i = \" + str(i))\n #print (\"j= \" + str(j))\n #print (\"len(ratio_l) =\" +str(len(ratio_l)))\n if len(ratio_l) > 50:\n del ratio_l[50]\n del vis_l[50]\n\nfor k in range(0,len(ratio_l)):\n outpath = \"./Output/\" + str(k+1) + \"-\" +\"(\"+str(round(ratio_l[k],3)) + \").jpg\"\n print (\"===========================\")\n print (str(ratio_l[k]) + \"% 相似度\" )\n print (outpath)\n cv2.imwrite(outpath, vis_l[k])\n\n \n\"\"\"\ncolumn=4\nrow=5\n#绘图显示\nfigure,ax=plt.subplots(row,column)\nfor index in range(0,20):\n ax[int(index/column)][index%column].set_title('Similiarity %.2f%%' % ratio_l[index])\n ax[int(index/column)][index%column].imshow(vis_l[index])\nplt.show()\n\"\"\"","sub_path":"pic_features/_run.py","file_name":"_run.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"196214938","text":"import pandas as pd #(version 1.0.0)\r\nimport plotly #(version 4.5.4) pip install plotly==4.5.4\r\nimport plotly.express as px\r\n\r\nimport dash #(version 1.9.1) pip install dash==1.9.1\r\nimport dash_table\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\n\r\napp = dash.Dash()\r\napp\r\ndf= pd.read_excel(\"WorkingMasterdata.xlsx\")\r\ndff = df.groupby('Programme', as_index=False)[['Amount Paid','Amount Pending']].sum()\r\nprint(dff)\r\nPAGE_SIZE = 5\r\n\r\napp.layout =html.Div([\r\n\r\n\r\n\r\nhtml.Div([dash_table.DataTable(\r\n id='datatable-paging',\r\n columns=[\r\n {\"name\": i, \"id\": i} for i in sorted(df.columns)\r\n ],\r\n page_current=0,\r\n page_size=PAGE_SIZE,\r\n page_action='custom'\r\n)],className='six columns'),\r\n\r\n\r\n\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n dcc.Dropdown(id='campusdropdown',\r\n options=[\r\n {'label': 'PKD', 'value': 'PKD'},\r\n {'label': 'BBSR', 'value': 'BBSR'},\r\n {'label': 'VSKP', 'value': 'VSKP'}\r\n \r\n ],\r\n placeholder=\"Select The Campus\",\r\n value=[\"PKD\",\"BBSR\",\"VSKP\"],\r\n multi=True,\r\n style={\r\n 'height': '10%', \r\n 'width': '60%', \r\n 'font-size': \"100%\",\r\n 'display': 'inline-block', 'padding': '10px',\r\n \r\n \r\n },\r\n ),\r\n ],className='six columns'),\r\n\r\n html.Div([\r\n dcc.Dropdown(id='semdropdown',\r\n options=[\r\n {'label': 'starting semister', 'value': 'During Start of Academics'},\r\n {'label': 'end semister', 'value': 'During start of odd Semester'}\r\n ],\r\n placeholder=\"Select any Semister\",\r\n value=['During Start of Academics', 'During start of odd Semester'],\r\n multi=True,\r\n clearable=False,\r\n style={\r\n 'height': '10%', \r\n 'width': '60%', \r\n 'font-size': \"100%\",\r\n 'display': 'inline-block', 'padding': '10px',\r\n 'float': 'right','margin': '-40px',\"marginRight\": \"20px\"\r\n \r\n },\r\n ),\r\n ],className='six columns'),\r\n\r\n\r\n html.Div([\r\n dcc.Dropdown(id='schdropdown',\r\n options=[\r\n {'label': 'SCHOOL OF MANAGEMENT', 'value': 'SoM'},\r\n {'label': 'SCHOOL OF AGRICULTURE & BIO-ENGINEERING', 'value': 'SOABE'}\r\n ],\r\n placeholder=\"Select any School\",\r\n value=[\"SoM\",\"SOABE\"],\r\n multi=True,\r\n clearable=False,\r\n style={\r\n 'height': '10%', \r\n 'width': '60%', \r\n 'font-size': \"100%\",\r\n 'display': 'inline-block', 'padding': '10px'\r\n \r\n },\r\n ),\r\n ],className='six columns'),\r\n\r\n\r\n html.Div([\r\n dcc.Dropdown(id='Progdropdown',\r\n options=[\r\n {'label': 'BBA', 'value': 'BBA'},\r\n {'label': 'B.TECH. AG.', 'value': 'B.TECH. AG.'},\r\n {'label': 'B.TECH.Dairy', 'value': 'B.TECH.Dairy'}],\r\n placeholder=\"Select any Programme\",\r\n value=[\"BBA\",\"B.TECH. AG.\",\"B.TECH.Dairy\"],\r\n multi=True,\r\n clearable=False,style={\r\n 'height': '10%', \r\n 'width': '60%', \r\n 'font-size': \"100%\",\r\n 'display': 'inline-block', 'padding': '10px',\r\n 'display': 'inline-block', 'padding': '10px',\r\n 'float': 'right','margin': '-40px',\"marginRight\": \"20px\"\r\n \r\n },\r\n ),\r\n ],className='six columns'),\r\n\r\n html.Div([\r\n dcc.Dropdown(id='amountdropdown',\r\n options=[\r\n {'label': 'Amount Paid', 'value': 'Amount Paid'},\r\n {'label': 'Amount Pending', 'value': 'Amount Pending'}],\r\n placeholder=\"Select your Requirement\",\r\n value='Amount Paid',\r\n multi=False,\r\n clearable=False,\r\n style={\r\n 'height': '10%', \r\n 'width': '60%', \r\n 'font-size': \"100%\",\r\n 'display': 'inline-block', 'padding': '10px',\r\n \r\n },\r\n ),\r\n ],className='six columns'),\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n ],className='row'),\r\n ]),\r\n html.Div([\r\n html.Div([\r\n dcc.Graph(id='barchart'),\r\n ],className='six columns'),\r\n\r\n ],className='row')\r\n\r\n\r\n])\r\n######################################################################\r\n@app.callback(\r\n Output('barchart', 'figure'),\r\n [Input('campusdropdown', 'value'),\r\n Input('semdropdown', 'value'),\r\n Input('schdropdown', 'value'),\r\n Input('Progdropdown', 'value'),\r\n Input('amountdropdown', 'value')]\r\n)\r\ndef update_data(campusdropval,semdropval,schdropval,Progdropval,amountdropval):\r\n print(campusdropval)\r\n df_filterd = df[df[\"Campus\"].isin(campusdropval)]\r\n df_filterd = df_filterd[df_filterd[\"Fee Collection Checkpoint\"].isin(semdropval)]\r\n print( df_filterd)\r\n df_filterd = df_filterd[df_filterd[\"School\"].isin(schdropval)]\r\n df_filterd = df_filterd[df_filterd[\"Programme\"].isin(Progdropval)]\r\n\r\n## df = px.data.tips()\r\n## pie_chart=px.pie(df, values='tip', names='day')\r\n bar_chart=px.bar(\r\n data_frame=df_filterd,\r\n x=\"Programme\",\r\n y=amountdropval,\r\n labels={'Programme':'Programme'}\r\n )\r\n \r\n \r\n return (bar_chart)\r\n@app.callback(\r\n Output('datatable-paging', 'data'),\r\n [Input('datatable-paging', \"page_current\"),\r\n Input('datatable-paging', \"page_size\")])\r\ndef update_table(page_current,page_size):\r\n return df.iloc[\r\n page_current*page_size:(page_current+ 1)*page_size\r\n ].to_dict('records')\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=False,use_reloader=False)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":5817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"66851550","text":"from student.models import Student\nfrom django.contrib import admin\n\nclass StudentAdmin(admin.ModelAdmin):\n fieldsets = [\n\t('User Info', {'fields': ['user']}),\n\t('email', {'fields': ['email']}),\n\t('nick', {'fields': ['nick']}),\n ]\n readonly_fields = ('user',)\n\nadmin.site.register(Student, StudentAdmin)","sub_path":"bluemoon/student/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"548082090","text":"\"\"\"\nMini-application: Buttons on a Tkinter GUI tell the robot to:\n - Go forward at the speed given in an entry box.\n\nAlso: responds to Beacon button-presses by beeping, speaking.\n\nThis module runs on the ROBOT.\nIt uses MQTT to RECEIVE information from a program running on the LAPTOP.\n\nAuthors: David Mutchler, his colleagues, and Alexander Harris.\n\"\"\"\n# ------------------------------------------------------------------------------\n# DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.\n# ------------------------------------------------------------------------------\n\n# ------------------------------------------------------------------------------\n# DONE: 2. With your instructor, review the \"big picture\" of laptop-robot\n# TODO: communication, per the comment in mqtt_sender.py.\n# TODO: Once you understand the \"big picture\", delete this TODO.\n# ------------------------------------------------------------------------------\n\nimport rosebotics_new as rb\nimport time\nimport mqtt_remote_method_calls as com\nimport ev3dev.ev3 as ev3\nimport math\n\n\ndef main():\n # --------------------------------------------------------------------------\n # DONE: 3. Construct a Snatch3rRobot. Test. When OK, delete this TODO.\n # --------------------------------------------------------------------------\n robot = rb.Snatch3rRobot()\n\n # --------------------------------------------------------------------------\n # TODO: 4. Add code that constructs a com.MqttClient that will\n # TODO: be used to receive commands sent by the laptop.\n # TODO: Connect it to this robot. Test. When OK, delete this TODO.\n # --------------------------------------------------------------------------\n rc = RemoteControlETC(robot)\n mqtt_client = com.MqttClient(rc)\n mqtt_client.connect_to_pc()\n\n # --------------------------------------------------------------------------\n # TODO: 5. Add a class for your \"delegate\" object that will handle messages\n # TODO: sent from the laptop. Construct an instance of the class and\n # TODO: pass it to the MqttClient constructor above. Augment the class\n # TODO: as needed for that, and also to handle the go_forward message.\n # TODO: Test by PRINTING, then with robot. When OK, delete this TODO.\n # --------------------------------------------------------------------------\n\n # --------------------------------------------------------------------------\n # TODO: 6. With your instructor, discuss why the following WHILE loop,\n # TODO: that appears to do nothing, is necessary.\n # TODO: When you understand this, delete this TODO.\n # --------------------------------------------------------------------------\n while True:\n # ----------------------------------------------------------------------\n # TODO: 7. Add code that makes the robot beep if the top-red button\n # TODO: on the Beacon is pressed. Add code that makes the robot\n # TODO: speak \"Hello. How are you?\" if the top-blue button on the\n # TODO: Beacon is pressed. Test. When done, delete this TODO.\n # ----------------------------------------------------------------------\n time.sleep(0.01) # For the delegate to do its work\n if robot.beacon_button_sensor.is_top_red_button_pressed():\n ev3.Sound.beep().wait()\n\nclass RemoteControlETC(object):\n \"\"\"\n Stores the robot\n :type robot: rb.Snatch3rRobot\n \"\"\"\n def __init__(self, robot):\n self.robot = robot\n self.direction = 0\n self.multiplier = 1\n self.speed = 100\n self.toggle = 0\n\n def speed_setup(self, speed_string):\n try:\n speed = int(speed_string)\n except:\n speed = 100\n\n self.speed = speed\n\n def multiplier_setup(self, multiplier_string):\n try:\n multiplier = int(multiplier_string)\n except:\n multiplier = 1\n\n self.multiplier = multiplier\n\n def change_value(self, value):\n try:\n value = int(value)\n except:\n value = 0\n\n self.toggle = value\n print(self.toggle)\n\n\n\n def coordinate_setup(self, coordinate_list):\n print(\"*****\")\n print(coordinate_list)\n print(\"*****\")\n\n list = []\n for k in range(0, len(coordinate_list)-1, 2):\n pointlist = []\n pointlist = pointlist + [coordinate_list[k]]\n pointlist = pointlist + [coordinate_list[k+1]]\n list = list + [pointlist]\n\n print(\"***Finished List***\")\n print(list)\n print(\"***Finished List***\")\n\n self.drive_start(list)\n\n def drive_start(self, list):\n if len(list) < 2:\n print(list)\n print(\"error\")\n return\n\n for k in range(len(list)-1):\n xpos = list[k][0]\n ypos = list[k][1]\n xfpos = list[k+1][0]\n yfpos = list[k+1][1]\n x = xfpos - xpos\n y = yfpos - ypos\n if (x == 0) or (y == 0):\n if (x == 0):\n self.robot.drive_system.go_straight_inches(((y/111)*self.multiplier), self.speed)\n else:\n if (x < 0):\n self.robot.drive_system.spin_in_place_degrees(-90)\n self.robot.drive_system.go_straight_inches(((x/111)*self.multiplier), self.speed)\n self.robot.drive_system.spin_in_place_degrees(90)\n else:\n self.robot.drive_system.spin_in_place_degrees(90)\n self.robot.drive_system.go_straight_inches(((x/111)*self.multiplier), self.speed)\n self.robot.drive_system.spin_in_place_degrees(-90)\n else:\n print(\"X move and Y move\")\n print(\"X:\", x)\n print(\"Y:\", y)\n print(\"*****************\")\n distance = math.sqrt(((x**2)+(y**2)))\n distance = ((distance/111) * self.multiplier)\n print(\"***Distance Traveling (in inches)***\")\n print(distance)\n print(\"************************************\")\n if (yfpos < ypos):\n theta = math.atan(((abs(y)) / (abs(x))))\n theta = ((theta * 180) / math.pi)\n theta = 90 - theta\n else:\n theta = math.atan(((abs(x)) / (abs(y))))\n theta = ((theta * 180) / math.pi)\n theta = 180 - theta\n print(\"***Turning Angle (in degrees)***\")\n print(theta)\n print(\"********************************\")\n dis = distance/80\n i = 0\n if (x < 0):\n self.robot.drive_system.spin_in_place_degrees(-theta)\n if self.toggle == 1:\n while True:\n if i < distance:\n self.robot.drive_system.go_straight_inches(dis,self.speed)\n i = i + dis\n else:\n break\n\n if (70 * ((self.robot.proximity_sensor.get_distance_to_nearest_object())/100)) < 10:\n self.robot.drive_system.stop_moving()\n ev3.Sound.speak('There is an object in my path!')\n return\n else:\n self.robot.drive_system.go_straight_inches(distance, self.speed)\n self.robot.drive_system.spin_in_place_degrees(theta)\n else:\n self.robot.drive_system.spin_in_place_degrees(theta)\n if self.toggle == 1:\n while True:\n if i < distance:\n self.robot.drive_system.go_straight_inches(dis, self.speed)\n i = i + dis\n else:\n break\n\n if (70 * ((self.robot.proximity_sensor.get_distance_to_nearest_object()) / 100)) < 10:\n self.robot.drive_system.stop_moving()\n ev3.Sound.speak('There is an object in my path!')\n return\n else:\n self.robot.drive_system.go_straight_inches(distance, self.speed)\n self.robot.drive_system.spin_in_place_degrees(-theta)\n\n\n def go_forward(self, speed_string):\n try:\n speed = int(speed_string)\n except:\n speed = 100\n self.robot.drive_system.start_moving(speed,speed)\n\n\n def go_backward(self, speed_string):\n try:\n speed = int(speed_string)\n speed = -speed\n except:\n speed = -100\n\n self.robot.drive_system.start_moving(speed,speed)\n\n\n def spin_left(self, degree_string):\n try:\n degrees = int(degree_string)\n degrees = -degrees\n except:\n degrees = -90\n\n self.robot.drive_system.spin_in_place_degrees(degrees)\n\n\n def spin_right(self, degree_string):\n try:\n degrees = int(degree_string)\n degrees = degrees\n except:\n degrees = 90\n\n self.robot.drive_system.spin_in_place_degrees(degrees)\n\n\n def stop(self):\n self.robot.drive_system.stop_moving()\n\n\n def move_inches(self, inches_string):\n try:\n inches = int(inches_string)\n except:\n inches = 10\n\n self.robot.drive_system.go_straight_inches(inches)\n\n\n # def move_arm(self, position_string):\n # try:\n # position = int(position_string)\n # except:\n # position = 14.2\n #\n # if position > 14.2:\n # position = 14.2\n # elif position < 0:\n # position = 0\n #\n # self.robot.arm.move_arm_to_position(position, 100)\n\n\nmain()","sub_path":"src/capstone_1_runs_on_robot.py","file_name":"capstone_1_runs_on_robot.py","file_ext":"py","file_size_in_byte":10120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"529569809","text":"#:::IMPORTS:::\n\nimport os\nimport sys\nimport my_utils\nimport requests\nimport ebaysdk\nfrom ebaysdk.finding import Connection\nimport printer_library\nimport time\nfrom datetime import datetime\nfrom UTILS import ebay_data_lib\n\n#:::OBJECTS:::\n\np = printer_library.lib()\n\ndb = ebay_data_lib.db()\n\n#:::DEFINITIONS:::\n\n#: Find Nth Occurence of substring in string\n\ndef find_nth(string, substring, n):\n\tif (n == 1):\n\t\treturn string.find(substring)\n\telse:\n\t\treturn string.find(substring, find_nth(string, substring, n - 1) + 1)\n\n#: Add Request to Queue\n\ndef add_to_queue():\n\n\tqueue = db.insert_queue()\n\tfor i in range(0, 1000):\n\t\tcur_queue = db.get_queue()\n\t\tif int(cur_queue) == int(queue):\n\t\t\treturn 1\n\t\telse:\n\t\t\ttime.sleep(17.3)\n\treturn 0\n\n#: Remove Job from Queue\n\ndef remove_from_queue():\n\n\tdb.remove_queue()\n\treturn 1\n\n#: Search Class\n\nclass Search:\n\n\t#: Initialization\n\n\tdef __init__(self, _key=db.get_api_key()):\n\n\t\tself.api = Connection(appid=_key, config_file=None)\n\n\t\tself.init_all_tables()\n\n\t\tp.print_main('Search API Initialized:')\n\n\t#: Search\n\n\tdef search(self, _keywords, _condition, _min_price, _max_price, _data_collect=True, _profit=0):\n\n\t\tqueue = -1\n\n\t\tif (_data_collect == False) and _profit > 0:\n\n\t\t\tactive_search = 1\n\n\t\telse:\n\n\t\t\tactive_search = 0\n\n\t\ttry:\n\n\t\t\tp.print_bullet('Attempting to search EBay for keywords: ' + str(_keywords) + ' in ' + _condition + ' condition with a max price of $' + str(_max_price) + ':')\n\n\t\t\t#Query\n\n\t\t\tqueue = add_to_queue()\n\n\t\t\tself.api.execute('findItemsAdvanced', {\n\t\t\t\t'keywords': _keywords,\n\t\t\t\t'itemFilter': [\n\t\t\t\t\t{'name': 'Condition', 'value': _condition},\n\t\t\t\t\t{'name': 'MaxPrice', 'value': str(_max_price), 'paramName': 'Currency', 'paramValue': 'USD'},\n\t\t\t\t\t{'name': 'MinPrice', 'value': str(_min_price), 'paramName': 'Currency', 'paramValue': 'USD'},\n\t\t\t\t\t{'name': 'ListingType', 'value': 'AuctionWithBIN'}\n\t\t\t\t],\n\t\t\t\t'paginationInput': {\n\t\t\t\t\t'entriesPerPage': 200,\n\t\t\t\t\t'pageNumber': '1'\n\t\t\t\t},\n\t\t\t\t'sortOrder': 'StartTimeNewest'\n\t\t\t})\n\n\t\t\tresp_dict = self.api.response.dict()\n\n\t\t\tremove_from_queue()\n\n\t\t\titem_no = -1\n\n\t\t\tfor item in resp_dict['searchResult']['item']:\n\n\t\t\t\titem_no = item_no + 1\n\n\t\t\t\titem_id = int(item['itemId'])\n\t\t\t\tcondition = str(item['condition']['conditionDisplayName'])\n\t\t\t\tshipping_type = str(item['shippingInfo']['shippingType'])\n\t\t\t\tshipping_locations = str(item['shippingInfo']['shipToLocations'])\n\t\t\t\treturns_accepted = str(item['returnsAccepted'])\n\t\t\t\titem_link = str(item['viewItemURL'])\n\t\t\t\tstart_time = int(self.parse_start_time(item['listingInfo']['startTime']))\n\t\t\t\tshipping_calc = 1\n\t\t\t\tmatch = 0\n\t\t\t\tignore = 0\n\t\t\t\tbuy_it_now_price = 0.00\n\t\t\t\tshipping_price = 0.00\n\n\t\t\t\tp.print_tread('Item ID: ' + str(item_id))\n\n\t\t\t\ttry:\n\t\t\t\t\tbuy_it_now_price = float(str(item['listingInfo']['buyItNowPrice']['value']))\n\t\t\t\t\tp.print_arrow('Price: $' + str(buy_it_now_price))\n\t\t\t\texcept:\n\t\t\t\t\tp.print_arrow('Buy it Now not available!', 'warning')\n\n\t\t\t\ttry:\n\t\t\t\t\tshipping_price = float(str(item['shippingInfo']['shippingServiceCost']['value']))\n\t\t\t\t\tshipping_currency = str(item['shippingInfo']['shippingServiceCost']['_currencyId'])\n\t\t\t\t\tp.print_arrow('Shipping Cost: $' + str(shipping_price) + shipping_currency)\n\t\t\t\texcept:\n\t\t\t\t\tp.print_arrow('Could not get shipping info!', 'warning')\n\t\t\t\t\tshipping_calc = 0\n\n\t\t\t\tp.print_arrow('Condition: ' + condition)\n\t\t\t\tp.print_arrow('Shipping Type: ' + shipping_type)\n\t\t\t\tp.print_arrow('Shipping Locations: ' + shipping_locations)\n\t\t\t\tp.print_arrow('Returns Accepted: ' + returns_accepted)\n\t\t\t\tp.print_arrow('Item Link: ' + item_link)\n\t\t\t\tp.print_arrow('Start Time: ' + str(start_time))\n\t\t\t\tp.print_arrow('Fetched data for ' + str(item_id) + '.', 'success')\n\n\t\t\t\tif db.get_search_result(item_id) == -1:\n\t\t\t\t\tdb.insert_search_result(item_id,\n\t\t\t\t\t\t\t\t\t\t\tbuy_it_now_price,\n\t\t\t\t\t\t\t\t\t\t\tshipping_price,\n\t\t\t\t\t\t\t\t\t\t\t(buy_it_now_price + shipping_price),\n\t\t\t\t\t\t\t\t\t\t\tshipping_calc,\n\t\t\t\t\t\t\t\t\t\t\tcondition,\n\t\t\t\t\t\t\t\t\t\t\tshipping_type,\n\t\t\t\t\t\t\t\t\t\t\tshipping_locations,\n\t\t\t\t\t\t\t\t\t\t\treturns_accepted,\n\t\t\t\t\t\t\t\t\t\t\titem_link,\n\t\t\t\t\t\t\t\t\t\t\tstart_time,\n\t\t\t\t\t\t\t\t\t\t\tstr(_keywords),\n\t\t\t\t\t\t\t\t\t\t\tmatch,\n\t\t\t\t\t\t\t\t\t\t\tignore\n\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tdb.update_search_result(item_id,\n\t\t\t\t\t\t\t\t\t\t\tbuy_it_now_price,\n\t\t\t\t\t\t\t\t\t\t\tshipping_price,\n\t\t\t\t\t\t\t\t\t\t\t(buy_it_now_price + shipping_price),\n\t\t\t\t\t\t\t\t\t\t\tshipping_calc,\n\t\t\t\t\t\t\t\t\t\t\tcondition,\n\t\t\t\t\t\t\t\t\t\t\tshipping_type,\n\t\t\t\t\t\t\t\t\t\t\tshipping_locations,\n\t\t\t\t\t\t\t\t\t\t\treturns_accepted,\n\t\t\t\t\t\t\t\t\t\t\titem_link,\n\t\t\t\t\t\t\t\t\t\t\tstart_time,\n\t\t\t\t\t\t\t\t\t\t\tstr(_keywords),\n\t\t\t\t\t\t\t\t\t\t\tmatch,\n\t\t\t\t\t\t\t\t\t\t\tignore\n\t\t\t\t\t\t\t\t\t\t\t)\n\n\t\t\t\tsaved_search_res = db.get_saved_search(str(_keywords))\n\n\t\t\t\tif saved_search_res == -1 and item_no == 0:\n\t\t\t\t\tdb.insert_saved_search(str(_keywords), str(condition), int(_min_price), int(_max_price), int(_profit), float(0.00), int(active_search))\n\n\t\t\t\telif saved_search_res != -1 and item_no == 0:\n\t\t\t\t\tdb.update_saved_search(str(_keywords), str(condition), int(_min_price), int(_max_price), int(_profit), float(saved_search_res), int(active_search))\n\n\t\t\tif (len(resp_dict['searchResult']['item']) > 0) and (_data_collect == False) and (_profit > 0):\n\n\t\t\t\tself.calculate_average_market_price(_keywords, True, _profit)\n\n\t\t\tp.print_bullet('Ebay Search for ' + str(_keywords) + ' complete.', 'success')\n\t\t\treturn 1\n\n\t\texcept:\n\n\t\t\tp.print_tread('Could not complete search.', 'error', True)\n\t\t\tif queue == -1:\n\t\t\t\tremove_from_queue()\n\t\t\treturn 0\n\n\t#: Initialize all Tables\n\n\tdef init_all_tables(self):\n\n\t\ttable_data = db.get_tables()\n\t\ttables = []\n\t\tfor i in range(0, len(table_data)):\n\n\t\t\ttables.append(table_data[i][0])\n\n\t\tif 'queue' not in tables:\n\t\t\tdb.initialize_queue_table()\n\t\tif 'api_key' not in tables:\n\t\t\tdb.initialize_api_key_table()\n\t\tif 'login_creds' not in tables:\n\t\t\tdb.initialize_login_creds_table()\n\t\tif 'search_results' not in tables:\n\t\t\tdb.initialize_search_results_table()\n\t\tif 'saved_searches' not in tables:\n\t\t\tdb.initialize_saved_searches_table()\n\n\t\treturn 1\n\n\t#: Calculate Average Market Price\n\n\tdef calculate_average_market_price(self, keywords, update_match=False, profit=0):\n\n\t\tp.print_bullet('Attempting to get Average Market Price for \"' + str(keywords) + '\":')\n\n\t\tdata_keys = db.get_all_item_search_results(keywords)\n\n\t\ttotal_price = 0\n\n\t\tfor i in range(0, len(data_keys)):\n\n\t\t\ttotal_price = total_price + float(data_keys[i]['total_price'])\n\n\t\taverage_price = total_price / (len(data_keys) - 1)\n\n\t\tp.print_tread('Average Price: $' + format(average_price, '.2f'), 'success')\n\n\t\tif update_match == True:\n\n\t\t\tfor i in range(0, len(data_keys)):\n\n\t\t\t\tif (float(average_price) - float(data_keys[i]['total_price'])) >= profit:\n\n\t\t\t\t\tdb.set_search_match(int(data_keys[i]['item_id']), 1)\n\n\t\t\t\telif (float(average_price) - float(data_keys[i]['total_price'])) < profit:\n\n\t\t\t\t\tdb.set_search_match(int(data_keys[i]['item_id']), 0)\n\n\t\tdb.set_search_average_price(str(keywords), average_price)\n\n\t\treturn average_price\n\n\t#: Parse Ebay Start Time\n\n\tdef parse_start_time(self, _time):\n\n\t\tday, month, year, hours, minutes, seconds = 0, 0, 0, 0, 0, 0\n\n\t\tyear = int(_time[:_time.find('-')])\n\t\tmonth = int(_time[(_time.find('-') + 1):find_nth(_time, '-', 2)])\n\t\tday = int(_time[(find_nth(_time, '-', 2) + 1):_time.find('T')])\n\t\thours = int(_time[(_time.find('T') + 1):_time.find(':')])\n\t\tminutes = int(_time[(_time.find(':') + 1):find_nth(_time, ':', 2)])\n\t\tseconds = int(_time[(find_nth(_time, ':', 2) + 1):_time.find('.')])\n\n\t\ttemp_time = datetime(year, month, day, hours, minutes, seconds)\n\t\ttemp_time = int(time.mktime(temp_time.timetuple()))\n\n\t\treturn temp_time\n\n\t#: Check login credentials\n\n\tdef check_login_credentials(self, username, password):\n\n\t\treturn db.check_login_creds(username, password)\n\n#:::END PROGRAM:::\n\n","sub_path":"bay_ghost.py","file_name":"bay_ghost.py","file_ext":"py","file_size_in_byte":7656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"238493699","text":"import PythonRegistryDiffer.base_classes as bc\n\n\nclass Key(bc.RegistryObject):\n def __init__(self, **kwargs):\n \"\"\"\n Creates a new Key object.\n :param kwargs: required: dbid, key_path, values, modified, name. A dbid of 0 indicates that the key is not in a\n database.\n \"\"\"\n self._key_path = str # key_path\n self._value_list = [] # values\n self._windows_time = int # modified\n self._name = str # name\n # self.has_values is derived from self.values' length\n self.key_path = str(kwargs.get('key_path'))\n if kwargs.get('values') is not None:\n self.values = list(kwargs.get('values'))\n self.modified = int(kwargs.get('modified'))\n self.name = str(kwargs.get('name'))\n super().__init__(**kwargs)\n\n def __eq__(self, other):\n if self.name == other.name and \\\n self.key_path == other.key_path and \\\n self.modified == other.modified:\n for val in self.values:\n if val not in other.values:\n return False\n for val in other.values:\n if val not in self.values:\n return False\n return True\n else: # Yea, it's not needed. But for clarity's sake.\n return False\n\n @property\n def key_path(self):\n return self._key_path\n\n @key_path.setter\n def key_path(self, key_path):\n self._key_path = key_path\n\n @property\n def values(self):\n return self._value_list\n\n @values.setter\n def values(self, new_list):\n self._value_list = new_list\n\n @property\n def modified(self):\n return self._windows_time\n\n @modified.setter\n def modified(self, new_time):\n self._windows_time = new_time\n\n @property\n def has_values(self):\n if len(self.values) > 0:\n return True\n else:\n return False\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, new_name):\n self._name = new_name\n","sub_path":"PythonRegistryDiffer/key.py","file_name":"key.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"10391118","text":"import os\nimport sys\nimport libra_py.packages.cp2k.methods as CP2K_methods\n\n\nrun_slurm = True\nsubmit_template = 'submit_template.slm'\nrun_python_file = 'run_template.py'\nistep = 0\nfstep = 3 #1402\nnjobs = 1\nsubmission_exe = 'sbatch'\n# Removing the previous folders if existed. You can keep them as well \n# but Libra will overwrite some of the data if their names are the same\n#os.system('rm -rf res job* all_*')\n\nprint('Distributing jobs...')\nCP2K_methods.distribute_cp2k_libint_jobs(submit_template, run_python_file, istep, fstep, njobs, run_slurm, submission_exe)\n\n","sub_path":"6_dynamics/2_nbra_workflows/7_step2_cp2k/1_DFT/2_hpc/1_example_TiO2/distribute_jobs.py","file_name":"distribute_jobs.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"4557010","text":"import asyncio\nfrom datetime import datetime\n\nfrom balebot.models.base_models import Peer\nfrom balebot.models.constants.peer_type import PeerType\nfrom balebot.models.messages import TextMessage, PhotoMessage\nfrom balebot.utils.logger import Logger\nfrom bot_config import BotConfig\nfrom constant.message import ReadyMessage, LogMessage\nfrom db.db_handler import db, get_accept_content, get_category_by_id, get_logo_by_id, \\\n change_is_sent, get_type_by_id\n\n\nclass MessageSender:\n def __init__(self):\n\n self.logger = Logger.get_logger()\n self.async_loop = asyncio.get_event_loop()\n self.my_bot = None\n self.bot = None\n self.dispatcher = None\n self.updater = None\n self.database_handler = db\n\n self.check_next = True\n self.running = True\n\n self.perform_check_failure_counter = 0\n self.total_send_failure_counter = 0\n\n def check(self):\n if self.running:\n now = datetime.now().time()\n now_hour = now.hour\n if self.check_next and BotConfig.stop_publish_hour >= now_hour >= BotConfig.start_publish_hour \\\n and self.database_handler.connect() and self.updater.network_connected():\n self.check_next = False\n stmt = get_accept_content()\n if stmt:\n rows = stmt\n if len(rows) == 0:\n self.check_next = True\n\n def send_message(id_index, loop):\n if id_index >= len(rows):\n self.check_next = True\n return 0\n row = rows[id_index]\n content_to_category_obj = row.content_to_category[0]\n category = get_category_by_id(content_to_category_obj.category_id)\n logo = get_logo_by_id(row.logo_id)\n content_type = get_type_by_id(category.type_id)\n text_message = TextMessage(ReadyMessage.content_template.format(row.name,\n row.description,\n category.name.replace(\" \", \"_\"),\n content_type.name.replace(\" \",\n \"_\"),\n row.nick_name,\n row.nick_name))\n photo_message = PhotoMessage(logo.file_id, logo.access_hash, \"channel\", logo.file_size,\n \"image/jpeg\", None, 250,\n 250, file_storage_version=1, caption_text=text_message)\n user_peer = Peer(PeerType.group, peer_id=BotConfig.channel_id,\n access_hash=BotConfig.channel_access_hash)\n kwargs = {\"message\": text_message, \"content_id\": row.id, \"user_peer\": user_peer, \"try_times\": 1}\n self.bot.send_message(message=photo_message, peer=user_peer,\n success_callback=self.success_sent_message,\n failure_callback=self.failure_sent_message, kwargs=kwargs)\n id_index += 1\n loop.call_later(BotConfig.send_delay, send_message, id_index, loop)\n\n my_send_loop = asyncio.get_event_loop()\n my_send_loop.call_soon(send_message, 0, my_send_loop)\n else:\n self.check_next = True\n else:\n pass\n # self.logger.debug(\"db connected: {}\".format(\"ff\", extra={\"tag\": \"info\"}))\n # self.logger.debug(\n # \"network connected: {}\".format(\"dfd\", extra={\"tag\": \"info\"}))\n # self.logger.debug(\"check_next: {}\".format(self.check_next), extra={\"tag\": \"info\"})\n\n self.async_loop.call_later(BotConfig.check_interval, self.check)\n\n def start(self):\n self.database_handler.connect()\n self.check()\n self.logger.debug(\"start run\", extra={\"tag\": \"info\"})\n\n def stop(self):\n self.running = False\n self.logger.warning(\"PollBank bot stoped\", extra={\"tag\": \"info\"})\n\n def success_sent_message(self, response, user_data):\n user_data = user_data['kwargs']\n user_peer = user_data[\"user_peer\"]\n content_id = user_data[\"content_id\"]\n change_is_sent(content_id, \"1\")\n self.logger.info(LogMessage.success_send_message, extra={\"user_id\": user_peer.peer_id, \"tag\": \"info\"})\n\n def failure_sent_message(self, response, user_data):\n user_data = user_data['kwargs']\n user_peer = user_data[\"user_peer\"]\n content_id = user_data[\"content_id\"]\n try_times = int(user_data[\"try_times\"])\n message = user_data[\"message\"]\n if try_times < BotConfig.max_total_send_failure:\n try_times += 1\n self.logger.error(LogMessage.fail_send_message, extra={\"user_id\": user_peer.peer_id, \"tag\": \"error\"})\n kwargs = {\"message\": message, \"content_id\": content_id, \"user_peer\": user_peer, \"try_times\": try_times}\n self.bot.send_message(message, user_peer, success_callback=self.success_sent_message,\n failure_callback=self.failure_sent_message, kwargs=kwargs)\n else:\n change_is_sent(content_id, \"2\")\n self.logger.error(LogMessage.max_fail_retried, extra={\"tag\": \"error\"})\n","sub_path":"message_sender.py","file_name":"message_sender.py","file_ext":"py","file_size_in_byte":5909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"189071499","text":"import Pyro4\nimport time\nimport json\nimport sys\nimport socket\nfrom colorama import init, Fore, Back, Style\nfrom threading import Thread\n\n\nPyro4.config.REQUIRE_EXPOSE = True\n\norder_json=None\nmy_uri = \"\"\nmy_name = \"\"\n\nclass Server(object):\n @Pyro4.expose\n def take_order(self):\n global order_json\n response_json = order_json\n response_json['message'] = \"Hey thanks for waiting. This is your order. Have a good day.\"\n order_json = None\n print_title(\"Hey Thanks! Please take your order!\")\n return response_json\n\ndef serverCustomer():\n clear_screen()\n global my_name\n print_main_title(\"NEW SERVER \" + str(my_name).upper())\n print_success(\"Time to work!\")\n global order_json\n queue_address = \"PYRONAME:lavic.queue.readyOrders\"\n print_network(\"Connecting to \" + queue_address)\n try:\n orders = Pyro4.Proxy(queue_address)\n while True:\n try:\n if int(orders.get_size()) > 0:\n parsed_json = orders.get_order()\n if parsed_json is not None:\n message_type = parsed_json['messageType']\n if message_type == \"ORDER_COMPLETE\":\n print_info(\"I've got an order to announce\")\n token_number = parsed_json['order']['tokenNumber']\n print_title(\"Announcing token \"+ str(token_number))\n parsed_json['messageType'] = \"ORDER_PREPARED\"\n order_json = parsed_json\n broadcast_customers()\n time.sleep(6)\n except:\n print_error(\"Some problem getting json\")\n except:\n print_error(\"Could not connect to queue.\")\n\n\ndef broadcast_customers():\n global my_uri\n global order_json\n ns=Pyro4.locateNS()\n customer_list = ns.list(\"lavic.customer.\")\n shout_limit = 3\n count = 0\n for count in range(0, shout_limit):\n if not order_json is None:\n token_number = str(order_json['order']['tokenNumber'])\n print_info(\"Token number \" + token_number + \" Please take your order - Announcement: \" + str(count + 1))\n for customer in customer_list:\n pyro_address = customer_list[customer]\n print_network(\"Announcing tokens to \" + str(customer) + \" : \" + pyro_address)\n try:\n global my_uri\n customer_obj = Pyro4.Proxy(pyro_address)\n order_json['message'] = \"Announcing token \" + str(token_number)\n customer_obj.listen(\"server\", my_uri, order_json )\n print_success(\"Sent\")\n except:\n print_error(\"Some problem connecting to customers :\" + str(customer))\n else:\n order_json = None\n return\n time.sleep(1)\n print_info(\"No takers. Sorry, disposing order.\")\n\ndef print_success( message):\n print(Back.GREEN + Style.BRIGHT + Fore.BLACK + \" SUCCESS \" + Fore.RESET + Back.RESET + Style.RESET_ALL + Fore.WHITE + Style.BRIGHT + \" : \" + str(message))\n print(Fore.RESET + Back.RESET + Style.RESET_ALL)\n\ndef print_info( message):\n print(Back.BLUE + Style.BRIGHT + Fore.WHITE + \" INFO \" + Fore.RESET + Back.RESET + Style.RESET_ALL + Fore.WHITE + Style.BRIGHT + \" : \" + str(message))\n print(Fore.RESET + Back.RESET + Style.RESET_ALL)\n\ndef print_error( message):\n print( Back.RED + Style.BRIGHT + Fore.WHITE + \" ERROR \" + Fore.RESET + Back.RESET + Style.RESET_ALL + Fore.WHITE + Style.BRIGHT + \" : \" + str(message))\n print(Fore.RESET + Back.RESET + Style.RESET_ALL)\n\ndef print_network( message):\n print( Back.YELLOW + Style.BRIGHT + Fore.BLACK + \" NETWORK \" + Fore.RESET + Back.RESET + Style.RESET_ALL + Fore.WHITE + Style.BRIGHT + \" : \" + str(message))\n print(Fore.RESET + Back.RESET + Style.RESET_ALL)\n\ndef print_title(message):\n print(Fore.GREEN + Style.BRIGHT)\n print(\"- - - - - - - - - - - - - - - - - - - - - - - - - - - -\")\n print(\" \" + str(message))\n print(\"- - - - - - - - - - - - - - - - - - - - - - - - - - - -\")\n print(Fore.RESET + Back.RESET + Style.RESET_ALL)\n\ndef print_main_title(message):\n print(Fore.CYAN + Back.WHITE + Style.BRIGHT)\n print(\"= = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\")\n print(Back.RESET)\n print( \" \" + str(message).upper()+ \" \")\n print(Back.WHITE)\n print(\"= = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\")\n print(Fore.RESET + Back.RESET + Style.RESET_ALL)\n\ndef clear_screen():\n print(\"\\033c\")\n\n\ndef activate():\n server = Server()\n myIp = str(socket.gethostbyname(socket.gethostname()))\n daemon=Pyro4.Daemon(myIp) \n ns=Pyro4.locateNS()\n uri=daemon.register(server)\n global my_uri\n my_uri = uri\n global my_name\n print_success(\"My Url \" + str(uri))\n daemon.requestLoop()\n\nif __name__ == \"__main__\":\n try:\n global my_name\n my_name = str(sys.argv[1])\n server_activate = Thread(target=activate)\n server_start = Thread(target=serverCustomer)\n try:\n server_start.start()\n server_activate.start()\n server_activate.join()\n server_start.join()\n except:\n print_error(\"Could not connect to name server..\")\n\n\n except:\n print(Back.RED + Style.BRIGHT + Fore.WHITE + \" ERROR \" + Fore.RESET + Back.RESET + Style.RESET_ALL + Fore.MAGENTA + Style.BRIGHT + \" : Please specify a server name as an argument. Exiting...\")\n print(Fore.RESET + Back.RESET + Style.RESET_ALL)\n exit()\n","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"65732288","text":"import setuptools\n\nwith open(\"requirements.txt\", mode=\"r\") as file:\n requirements = [line.split(\"#\")[0] for line in file.read().split(\"\\n\") if not line.startswith(\"#\")]\n\nsetuptools.setup(\n name=\"statslib\",\n description=\"\",\n version=\"0.0.1\",\n packages=setuptools.find_packages(),\n install_requires=requirements\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"188109326","text":"# Создать список из десяти целых чисел.\n# Вывести на экран каждое число, увеличенное на 1.\nnumber_list = [1,3,123,42,11,77,5,-42,0,-1]\nfor number in number_list:\n print(number + 1)\n\n# Ввести с клавиатуры строку.\n# Вывести эту же строку вертикально: по одному символу на строку консоли.\ninput_str = input(\"Введите любую строку: \")\nfor letter in input_str:\n print(letter)\n \n# Создать список из словарей с оценками учеников разных классов школы вида [{'school_class': '4a', 'scores': [3,4,4,5,2]}, ...]\n# Посчитать и вывести средний балл по всей школе.\n# Посчитать и вывести средний балл по каждому классу.\ndef main(school_scores):\n school_scores_sum = 0\n for school_class in school_scores:\n class_scores_sum = 0\n for score in school_class['scores']:\n class_scores_sum += score\n class_score_ave = class_scores_sum/len(school_class['scores'])\n class_number = school_class['school_class']\n print(f'Средний балл в {class_number} классе: {round(class_score_ave,1)}')\n school_scores_sum += class_score_ave\n school_score_ave = school_scores_sum/len(school_scores)\n print(f'Средний балл в школе: {round(school_score_ave,1)}')\n\nschool1_scores = [\n {'school_class': '4А', 'scores': [4,5,3,3,4]},\n {'school_class': '4Б', 'scores': [3,4,4,3,2]},\n {'school_class': '5А', 'scores': [4,5,5,5,3,5]},\n {'school_class': '6В', 'scores': [2,3,3,2,3,4]},\n {'school_class': '6А', 'scores': [5,3,4,5,2,3]},\n {'school_class': '11Б', 'scores': [5,4,3]},\n {'school_class': '10Д', 'scores': [4,5,3,2,2,2]},\n {'school_class': '7Г', 'scores': [5,4,3,5,4,3]},\n {'school_class': '3А', 'scores': [5,5,5,4,5,5,5,4,5,3,5]},\n {'school_class': '8Б', 'scores': [4,3,5,3,4,4,3,]},\n {'school_class': '9А', 'scores': [4,5,5,4,3,5,2,5]},\n] \nif __name__ == \"__main__\":\n main(school1_scores)\n","sub_path":"homework1/3_for.py","file_name":"3_for.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"378779057","text":"\"\"\"\nThere are a total of n courses you have to take, labeled from 0 to n - 1.\n\nSome courses may have prerequisites, for example to take course 0 you have to\nfirst take course 1, which is expressed as a pair: [0,1]\n\nGiven the total number of courses and a list of prerequisite pairs, is it\npossible for you to finish all courses?\n\nFor example:\n\n 2, [[1,0]]\n There are a total of 2 courses to take. To take course 1 you should have finished course 0. So it is possible.\n \n 2, [[1,0],[0,1]]\n There are a total of 2 courses to take. To take course 1 you should have finished course 0, and to take course 0 you should also have finished course 1. So it is impossible.\n\nNote:\n The input prerequisites is a graph represented by a list of edges, not\n adjacency matrices. Read more about how a graph is represented.\n\nHints:\n This problem is equivalent to finding if a cycle exists in a directed graph.\n If a cycle exists, no topological ordering exists and therefore it will be\n impossible to take all courses.\n \n Topological Sort via DFS - A great video tutorial (21 minutes) on Coursera\n explaining the basic concepts of Topological Sort.\n \n Topological sort could also be done via BFS.\n\"\"\"\nclass Solution(object):\n def canFinish(self, numCourses, prerequisites):\n \"\"\"\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: bool\n \"\"\"\n inDegs = {i : 0 for i in range(numCourses)}\n outEdges = {i : [] for i in range(numCourses)}\n for edge in prerequisites:\n head, tail = edge[0], edge[1]\n inDegs[head] += 1\n outEdges[tail].append(head)\n\n zeroInDegs = [i for i in range(numCourses) if inDegs[i] == 0]\n sortCount = 0\n while len(zeroInDegs) > 0:\n tail = zeroInDegs.pop()\n for head in outEdges[tail]:\n inDegs[head] -= 1\n if inDegs[head] == 0:\n zeroInDegs.append(head)\n sortCount += 1\n return sortCount == numCourses\n\na = Solution()\nprint(a.canFinish(2, [[1, 0]]) == True)\nprint(a.canFinish(2, [[1, 0], [0, 1]]) == False)\nprint(a.canFinish(3, [[0, 1], [0, 2], [1, 2]]) == True)\n","sub_path":"python/207_course_scchedule.py","file_name":"207_course_scchedule.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"570787272","text":"# Finds & prints all triplets of pairs \n# through which triangular arbitrage can be done \n\n# EXAMPLE:\n\n# ids: 158 179 206\n# ETH / ZKS\n# ETH / HT\n# HT / ZKS\n#\n# ids: 158 181 208\n# ETH / ZKS\n# ETH / REN\n# REN / ZKS\n\nfrom ZKSwapClient import ZKSwapClient\n\nclient = ZKSwapClient(network = 1, address = '000x0')\nall_pairs = client.get_complete_info_all_pairs()\ntriangular_pairs = []\n\n# Loop through the dict of pairs trice\nfor pair_id, pair in all_pairs.items():\n for pair_id_2, pair_2 in all_pairs.items():\n for pair_id_3, pair_3 in all_pairs.items():\n if pair_id != pair_id_2 \\\n and pair_id != pair_id_3 \\\n and pair_id_2 != pair_id_3:\n\n # check if the number of unique symbols in these 3 pairs is 3\n all_symbols = set(sorted([\n pair['symbol_a'], pair['symbol_b'], \n pair_2['symbol_a'], pair_2['symbol_b'], \n pair_3['symbol_a'], pair_3['symbol_b']]))\n \n # print if new triplet found\n if len(all_symbols) == 3 and all_symbols not in triangular_pairs:\n triangular_pairs.append(all_symbols)\n print('Found Triangular Arbitrage Pairs:')\n print('ids: ', pair_id, pair_id_2, pair_id_3)\n print(pair['symbol_a'], '/', pair['symbol_b'])\n print(pair_2['symbol_a'], '/', pair_2['symbol_b'])\n print(pair_3['symbol_a'], '/', pair_3['symbol_b'])\n","sub_path":"example_find_triplets.py","file_name":"example_find_triplets.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"394453858","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDesarrolle un procedimiento para obtener el elemento máximo de una matriz de ristras y su posición.\n\"\"\"\n\nfrom pymatriz.matriz import *\n\n\ndef maximo(matriz):\n ''' Devuelve el elemento máximo y su posición de una matriz\n param:\n matriz - Objeto de tipo Matriz\n return:\n elemento_máximo, posición\n '''\n max = 0\n pos = None\n\n for i in range(matriz.filas):\n for j in range(matriz.columnas):\n if matriz.get(i, j) > max:\n max = matriz.get(i,j)\n pos = (i, j)\n return max, pos\n\n\n\ndef main():\n # Crea una matriz aleatoria de 10x10 y la imprime por consola\n FILAS = 10\n COLUMNAS = 10\n VALOR_MAXIMO = 99\n m = Matriz.random(FILAS, COLUMNAS, VALOR_MAXIMO)\n m.print()\n\n # Devuelve el elemento máximo y su posición de una matriz\n max, pos = maximo(m)\n print('Valor máximo: ' + str(max))\n print('Posición: ' + str(pos))\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"03.elemento_maximo_matriz/python/elemento_maximo_matriz.py","file_name":"elemento_maximo_matriz.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"650888336","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n# CODE NAME HERE\n\n# CODE DESCRIPTION HERE\n\nCreated on 2019-06-10 at 10:49\n\n@author: cook\n\"\"\"\nfrom __future__ import division\nimport sys\n\nfrom SpirouDRS import spirouReprocess\nfrom SpirouDRS import spirouConfig\nfrom SpirouDRS import spirouCore\nfrom SpirouDRS import spirouStartup\n\n\n# =============================================================================\n# Define variables\n# =============================================================================\n# Name of program\n__NAME__ = 'reprocess.py'\n# Get version and author\n__version__ = spirouConfig.Constants.VERSION()\n__author__ = spirouConfig.Constants.AUTHORS()\n__date__ = spirouConfig.Constants.LATEST_EDIT()\n__release__ = spirouConfig.Constants.RELEASE()\n# Get Logging function\nWLOG = spirouCore.wlog\n# Get plotting functions\nsPlt = spirouCore.sPlt\n# Get param dictionary\nParamDict = spirouConfig.ParamDict\n\n\n# =============================================================================\n# Define functions\n# =============================================================================\ndef main(runfile=None):\n # ----------------------------------------------------------------------\n # Set up\n # ----------------------------------------------------------------------\n # get parameters from config files/run time args/load paths + calibdb\n p = spirouStartup.Begin(recipe=__NAME__)\n # get parameters from configuration files and run time arguments\n p = spirouStartup.LoadArguments(p, require_night_name=False)\n # deal with run file\n p, runtable = spirouReprocess.RunFile(p, runfile)\n # reset sys.argv so it doesn't mess with recipes\n sys.argv = [__NAME__]\n # send email if configured\n spirouReprocess.SendEmail(p, kind='start')\n\n # ----------------------------------------------------------------------\n # Deal with reset options\n # ----------------------------------------------------------------------\n spirouReprocess.ResetFiles(p)\n\n # ----------------------------------------------------------------------\n # find all files\n # ----------------------------------------------------------------------\n WLOG(p, 'info', 'Finding all raw files')\n rawtable, rawpath = spirouReprocess.FindRawFiles(p)\n # WLOG(p, 'info', 'Finding all pp files')\n # tmptable, tmppath = spirouReprocess.FindTmpFiles(p)\n # WLOG(p, 'info', 'Finding all reduced files')\n # redtable, redpath = spirouReprocess.FindRedFiles(p)\n # store in lists\n tables = [rawtable] # , tmptable, redtable]\n paths = [rawpath] # , tmppath, redpath]\n\n # ----------------------------------------------------------------------\n # Generate run list\n # ----------------------------------------------------------------------\n runlist = spirouReprocess.GenerateRunList(p, tables, paths, runtable)\n\n # ----------------------------------------------------------------------\n # Process run list\n # ----------------------------------------------------------------------\n outlist = spirouReprocess.ProcessRunList(p, runlist)\n\n # ----------------------------------------------------------------------\n # Print timing\n # ----------------------------------------------------------------------\n # get header\n header = spirouConfig.Constants.HEADER()\n WLOG(p, '', '')\n WLOG(p, '', header)\n WLOG(p, '', 'Timings:')\n WLOG(p, '', header)\n WLOG(p, '', '')\n # loop around timings (non-errors only)\n for key in outlist:\n cond1 = len(outlist[key]['ERROR']) == 0\n cond2 = outlist[key]['TIMING'] is not None\n if cond1 and cond2:\n wmsg = 'ID={0:05d} Time = {1}'\n WLOG(p, '', wmsg.format(key, outlist[key]['TIMING']))\n WLOG(p, 'warning', '\\t{0}'.format(outlist[key]['RUNSTRING']),\n wrap=False)\n\n # ----------------------------------------------------------------------\n # Print out any errors\n # ----------------------------------------------------------------------\n # get header\n header = spirouConfig.Constants.HEADER()\n WLOG(p, '', '')\n WLOG(p, '', header)\n WLOG(p, '', 'Errors:')\n WLOG(p, '', header)\n WLOG(p, '', '')\n # loop around each entry of outlist and print any errors\n for key in outlist:\n if len(outlist[key]['ERROR']) > 0:\n WLOG(p, '', '', colour='red')\n WLOG(p, '', header, colour='red')\n WLOG(p, 'warning', 'Error found for ID={0:05d}'.format(key),\n colour='red', wrap=False)\n WLOG(p, 'warning', '\\t{0}'.format(outlist[key]['RUNSTRING']),\n colour='red', wrap=False)\n WLOG(p, '', header, colour='red')\n WLOG(p, '', '', colour='red')\n WLOG(p, 'warning', outlist[key]['ERROR'], colour='red', wrap=False)\n WLOG(p, '', '', colour='red')\n WLOG.printmessage(p, outlist[key]['TRACEBACK'], colour='red')\n WLOG(p, '', '', colour='red')\n WLOG(p, '', header, colour='red')\n\n # send email if configured\n spirouReprocess.SendEmail(p, kind='end')\n\n # ----------------------------------------------------------------------\n # End Message\n # ----------------------------------------------------------------------\n p = spirouStartup.End(p)\n\n # return a copy of locally defined variables in the memory\n return dict(locals())\n\n\n\n# =============================================================================\n# Start of code\n# =============================================================================\nif __name__ == \"__main__\":\n # run main with no arguments (get from command line - sys.argv)\n ll = main()\n # exit message if in debug mode\n spirouStartup.Exit(ll, has_plots=False)\n\n# =============================================================================\n# End of code\n# =============================================================================\n","sub_path":"old_code/INTROOT/SpirouDRS/spirouReprocess/reprocess.py","file_name":"reprocess.py","file_ext":"py","file_size_in_byte":5947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"244522949","text":"def solve(sz_arr, arr):\n if sz_arr % 2 == 0:\n return 0\n ret = 0\n for i in range(0, sz_arr, 2):\n if i % 2 == 0:\n ret ^= arr[i]\n return ret\n\nif __name__ == '__main__':\n for _ in range(int(input())):\n print(solve(int(input()),\n [int(x) for x in input().split()]))\n","sub_path":"HackerRank/Algorithms/BitManipulation/sansaAndXor/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"130911800","text":"import KratosMultiphysics\nimport math\n\ndef Factory(settings, Model):\n if(type(settings) != KratosMultiphysics.Parameters):\n raise Exception(\"expected input shall be a Parameters object, encapsulating a json string\")\n\n return DefineWakeProcess(Model, settings[\"Parameters\"])\n\n\nclass DefineWakeProcess(KratosMultiphysics.Process):\n def __init__(self, Model, settings ):\n KratosMultiphysics.Process.__init__(self)\n\n default_settings = KratosMultiphysics.Parameters(\"\"\"\n {\n \"mesh_id\" : 0,\n \"model_part_name\" : \"please specify the model part that contains the kutta nodes\",\n \"fluid_part_name\" : \"MainModelPart\",\n \"direction\" : [1.0,0.0,0.0],\n \"epsilon\" : 1e-9\n }\n \"\"\")\n\n settings.ValidateAndAssignDefaults(default_settings) \n \n self.direction = KratosMultiphysics.Vector(3)\n self.direction[0] = settings[\"direction\"][0].GetDouble()\n self.direction[1] = settings[\"direction\"][1].GetDouble()\n self.direction[2] = settings[\"direction\"][2].GetDouble()\n dnorm = math.sqrt(self.direction[0]**2 + self.direction[1]**2 + self.direction[2]**2)\n self.direction[0] /= dnorm\n self.direction[1] /= dnorm\n self.direction[2] /= dnorm\n print(self.direction)\n \n self.epsilon = settings[\"epsilon\"].GetDouble()\n\n self.kutta_model_part = Model[settings[\"model_part_name\"].GetString()]\n self.fluid_model_part = Model[settings[\"fluid_part_name\"].GetString()]\n \n def Execute(self):\n #mark as STRUCTURE and deactivate the elements that touch the kutta node\n for node in self.kutta_model_part.Nodes:\n node.Set(KratosMultiphysics.STRUCTURE)\n #node.Fix(KratosMultiphysics.POSITIVE_FACE_PRESSURE)\n\n #compute the distances of the elements of the wake, and decide which ones are wak \n if(self.fluid_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] == 2): #2D case\n \n xn = KratosMultiphysics.Vector(3)\n \n self.n = KratosMultiphysics.Vector(3)\n self.n[0] = -self.direction[1]\n self.n[1] = self.direction[0]\n self.n[2] = 0.0\n print(\"normal =\",self.n)\n \n for node in self.kutta_model_part.Nodes:\n x0 = node.X\n y0 = node.Y\n for elem in self.fluid_model_part.Elements:\n \n \n \n #check in the potentially active portion\n potentially_active_portion = False\n for elnode in elem.GetNodes():\n xn[0] = elnode.X - x0\n xn[1] = elnode.Y - y0\n xn[2] = 0.0\n dx = xn[0]*self.direction[0] + xn[1]*self.direction[1]\n if(dx > 0): \n potentially_active_portion = True\n break\n if(elnode.Is(KratosMultiphysics.STRUCTURE)): ##all nodes that touch the kutta nodes are potentiallyactive\n potentially_active_portion = True\n break\n \n \n if(potentially_active_portion): \n distances = KratosMultiphysics.Vector(len(elem.GetNodes()))\n \n \n counter = 0\n for elnode in elem.GetNodes():\n xn[0] = elnode.X - x0\n xn[1] = elnode.Y - y0\n xn[2] = 0.0\n d = xn[0]*self.n[0] + xn[1]*self.n[1]\n if(abs(d) < self.epsilon):\n d = self.epsilon\n distances[counter] = d\n counter += 1\n\n npos = 0\n nneg = 0\n for d in distances:\n if(d < 0):\n nneg += 1\n else:\n npos += 1\n \n if(nneg>0 and npos>0):\n elem.Set(KratosMultiphysics.MARKER,True)\n counter = 0\n for elnode in elem.GetNodes():\n elnode.SetSolutionStepValue(KratosMultiphysics.DISTANCE,0,distances[counter])\n counter+=1\n #elem.SetValue(ELEMENTAL_DISTANCE,distances)\n \n #for elnode in elem.GetNodes():\n #if elnode.Is(KratosMultiphysics.STRUCTURE):\n #elem.Set(KratosMultiphysics.ACTIVE,False)\n #elem.Set(KratosMultiphysics.MARKER,False)\n \n\n \n else: #3D case\n raise Exception(\"wake detection not yet implemented in 3D\")\n \n def ExecuteInitialize(self):\n self.Execute()\n \n\n","sub_path":"applications/CompressiblePotentialFlowApplication/python_scripts/define_wake_process.py","file_name":"define_wake_process.py","file_ext":"py","file_size_in_byte":5381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"350691338","text":"from selenium import webdriver\r\n#from selenium.webdriver.support.ui import Select\r\nimport time \r\nimport math\r\n\r\ndef calc(x):\r\n\treturn str(math.log(abs(12*math.sin(int(x)))))\r\n\r\nlink = \"http://SunInJuly.github.io/execute_script.html\"\r\n\r\ntry:\r\n\tbrowser = webdriver.Chrome()\r\n\tbrowser.get(link)\r\n\tx = browser.find_element_by_css_selector(\"#input_value\").text\r\n\ty = calc(x)\r\n\tinput_field = browser.find_element_by_css_selector(\"#answer\")\r\n\tbrowser.execute_script(\"return arguments[0].scrollIntoView(true);\", input_field)\r\n\tinput_field.send_keys(y)\r\n\toption1 = browser.find_element_by_css_selector(\"#robotCheckbox\")\r\n\toption1.click()\r\n\toption2 = browser.find_element_by_css_selector(\"#robotsRule\")\r\n\toption2.click()\r\n\tbutton = browser.find_element_by_xpath(\"//button[text()='Submit']\")\r\n\tbutton.click()\r\n\r\nfinally:\r\n\ttime.sleep(10)\r\n\tbrowser.quit()\r\n\r\n# не забываем оставить пустую строку в конце файла","sub_path":"lesson2-2_step6.py","file_name":"lesson2-2_step6.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"198327449","text":"import datetime\r\nimport os\r\nfrom pyPythonRPA.Robot import pythonRPA\r\nimport pyautogui\r\nfrom datetime import datetime\r\ndef change_row(log_table,row1,name,system,status,comment,time):\r\n for row in log_table:\r\n if row==row1:\r\n row[0] = name\r\n row[1] = system\r\n row[2] = status\r\n row[3] = comment\r\n row[4] = time\r\ndef ColvirAdmin_block(names,log_table):\r\n n = 2\r\n for i in range(n):\r\n # open ColvirAdmin\r\n login_cbs_adm = \"arnurt\"\r\n password_cbs_adm = \"arnur_010203\"\r\n pythonRPA.application(\"C:\\CBS_T_новый\\CBSADM.exe\").start()\r\n\r\n #Введение данных для входа\r\n log_in = pythonRPA.bySelector([{\"title\": \"Вход в систему\", \"class_name\": \"TfrmLoginDlg\", \"backend\": \"uia\"}])\r\n log_in.wait_appear(30)\r\n log_in.click()\r\n log_in.set_focus()\r\n pythonRPA.keyboard.write(login_cbs_adm)\r\n pythonRPA.keyboard.press(\"Enter\")\r\n pythonRPA.keyboard.write(password_cbs_adm)\r\n pythonRPA.keyboard.press('Enter')\r\n\r\n cbs_adm_main_window = pythonRPA.bySelector([{\"class_name\": \"TfrmCssApplAdm\", \"backend\": \"uia\"}])\r\n cbs_adm_main_window.wait_appear(10)\r\n cbs_adm_main_window.set_focus()\r\n\r\n #Сохранение селекторов Колвир Админ\r\n filter = pythonRPA.bySelector([{\"title\": \"Фильтр\", \"class_name\": \"TfrmFilterParams\", \"backend\": \"uia\"}])\r\n filter_name_line = pythonRPA.bySelector([{\"title\": \"Фильтр\", \"class_name\": \"TfrmFilterParams\", \"backend\": \"uia\"},\r\n {\"ctrl_index\": 0}, {\"ctrl_index\": 0}, {\"ctrl_index\": 0}])\r\n\r\n filter_cbs_ok = pythonRPA.bySelector([{\"title\": \"Фильтр\", \"class_name\": \"TfrmFilterParams\", \"backend\": \"uia\"},\r\n {\"ctrl_index\": 1}, {\"ctrl_index\": 0}, {\"ctrl_index\": 2}])\r\n for name in names:\r\n\r\n # openning the filter by cliking the Пользователи\r\n try:\r\n blocked_codes=[]\r\n unblocked_codes = []\r\n for iterr in range(50):\r\n try:\r\n x, y = pyautogui.locateCenterOnScreen(r'.\\Utils\\users.png')\r\n pyautogui.doubleClick(x, y)\r\n print('Пользователи pressed')\r\n break\r\n except Exception as e:\r\n print(e)\r\n pythonRPA.sleep(1)\r\n pythonRPA.sleep(2)\r\n filter.set_focus()\r\n\r\n # Clearing the FILTER\r\n for iterr in range(50):\r\n try:\r\n x, y = pyautogui.locateCenterOnScreen(r'.\\Utils\\clear_filter_button.png')\r\n pyautogui.doubleClick(x, y)\r\n print('clear_filter_button pressed')\r\n break\r\n except Exception as e:\r\n print(e)\r\n pythonRPA.sleep(1)\r\n filter_name_line.click()\r\n # fill name_line\r\n pythonRPA.keyboard.write(name)\r\n\r\n # clicking OK to search\r\n filter_cbs_ok.click()\r\n pythonRPA.sleep(3)\r\n code = \"npos\"\r\n #Проверка на результат\r\n warming = pythonRPA.bySelector([{\"title\":\"Подтверждение\",\"class_name\":\"TMessageForm\",\"backend\":\"uia\"}])\r\n warming.wait_appear(1)\r\n unfound = warming.is_exists()\r\n if unfound:\r\n if i:\r\n for row in log_table:\r\n if row[0]==name and row[1] == 'CBSADM':\r\n change_row(log_table,row,name, \"CBSADM\", \"Не заблокирован\", \"Сотрудник не найден в системе\",datetime.now().strftime(\"%H:%M:%S\"))\r\n else:\r\n log_table.append([name, \"CBSADM\", \"Не заблокирован\", \"Сотрудник не найден в системе\",datetime.now().strftime(\"%H:%M:%S\")])\r\n print(\"Not found in system\")\r\n pythonRPA.keyboard.press('enter')\r\n continue\r\n else:\r\n used = False\r\n while 1:\r\n # Getting the CODE of the user\r\n # i = 1\r\n if(used):\r\n pythonRPA.keyboard.press('down')\r\n pythonRPA.keyboard.press('Enter')\r\n used = True\r\n user_detail_window = pythonRPA.bySelector([{\"class_name\": \"TfrmAdmUsrDetail\", \"backend\": \"uia\"}])\r\n user_detail_window.wait_appear(2)\r\n CODE_line = pythonRPA.bySelector([{\"class_name\": \"TfrmAdmUsrDetail\", \"backend\": \"uia\"}, {\"ctrl_index\": 0},\r\n {\"ctrl_index\": 0}, {\"ctrl_index\": 0}, {\"ctrl_index\": 2},\r\n {\"ctrl_index\": 0}])\r\n\r\n # check do we blocked this account in past itteration??\r\n if (CODE_line.get_value() == code):\r\n break\r\n\r\n code = CODE_line.get_value()\r\n # Флаг проеверки должна ли программа нажать кнопку архив, изначально думаем что должна\r\n archive_сlick = True\r\n\r\n # Флаг проеверки должна ли программа нажать кнопку блокировки, изначально думаем что должна\r\n block_click =True\r\n\r\n try:\r\n x, y = pyautogui.locateCenterOnScreen(r'.\\Utils\\archive.png')\r\n archive_сlick = False\r\n # Флаг проеверки становится ложной если он и так нажат\r\n except Exception as e:\r\n print(e)\r\n n+=1\r\n print('archive ', archive_сlick)\r\n\r\n try:\r\n x, y = pyautogui.locateCenterOnScreen(r'.\\Utils\\blocked.png')\r\n block_click = False\r\n print('block_click ', block_click)\r\n # Флаг проеверки становится ложной если он и так нажат\r\n except Exception as e:\r\n print(e)\r\n print('block_click ', block_click)\r\n # Кнопка \"Пользователь\"\r\n user_button = pythonRPA.bySelector(\r\n [{\"class_name\": \"TfrmAdmUsrDetail\", \"backend\": \"uia\"}, {\"ctrl_index\": 4}, {\"ctrl_index\": 0}])\r\n # В сулачае если галочки нету или же кнопка блокировки не нажата мы должны ввести изминения\r\n if (archive_сlick):\r\n # Нажимаем кнопку корректировки\r\n user_button.wait_appear(2)\r\n user_button.click()\r\n for i in range(3):\r\n pythonRPA.keyboard.press('down')\r\n pythonRPA.keyboard.press('Enter')\r\n\r\n # making enable the checkbox of the archive\r\n try:\r\n x, y = pyautogui.locateCenterOnScreen(r'.\\Utils\\arxiv.png')\r\n pyautogui.click(x + (x / 2), y)\r\n print('Архив нажат')\r\n except Exception as e:\r\n print(e)\r\n # Saving the changes\r\n user_button.wait_appear(2)\r\n user_button.click()\r\n for i in range(2):\r\n pythonRPA.keyboard.press('down')\r\n pythonRPA.keyboard.press('Enter')\r\n # Blocking the user\r\n if block_click:\r\n user_button.wait_appear(2)\r\n user_button.click()\r\n for i in range(9):\r\n pythonRPA.keyboard.press('down')\r\n for i in range(3):\r\n pythonRPA.sleep(0.5)\r\n pythonRPA.keyboard.press('Enter')\r\n user_detail_window.close()\r\n print(code, \"sucsessfully blocked in CBS/ADM.exe\")\r\n if not i:\r\n log_table.append([name, \"CBSADM\",\"Заблокирован\", \"\", datetime.now().strftime(\"%H:%M:%S\")])\r\n else:\r\n for row in log_table:\r\n if row[0] == name and row[1] == 'CBSADM':\r\n change_row(log_table,row, name,\"CBSADM\",\"Заблокирован\", \"\", datetime.now().strftime(\"%H:%M:%S\"))\r\n except:\r\n if i:\r\n for row in log_table:\r\n if row[0] == name and row[1] == 'CBSADM':\r\n change_row(log_table,row, name,\"CBSADM\", \"Не заблокирован\", \"Техническая ошибка\", datetime.now().strftime(\"%H:%M:%S\"))\r\n else:\r\n log_table.append([name, \"CBSADM\", \"Не заблокирован\", \"Техническая ошибка\", datetime.now().strftime(\"%H:%M:%S\")])","sub_path":"ColvirAdmin.py","file_name":"ColvirAdmin.py","file_ext":"py","file_size_in_byte":10753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"620822064","text":"from tkinter import *\r\nfrom time import gmtime, strftime\r\n\r\n\r\nroot = Tk()\r\nroot.title('Diary')\r\nroot.geometry(\"700x300\")\r\n\r\ne=Entry(root,show=None,width=50,borderwidth=5)\r\ne.pack()\r\n\r\n\r\ndef Insert():\r\n #open('diary.txt','r').close()\r\n text=e.get()+'\\n'\r\n time=strftime('%Y-%m-%d %H:%M:%S',gmtime())\r\n with open('diary.txt','a') as f:\r\n f.write(time+': '+text)\r\n f.close()\r\n with open('diary.txt') as f:\r\n diary_content=f.read()\r\n t.delete(1.0, END)\r\n t.insert('insert',diary_content)\r\n e.delete(0,END)\r\n\r\nb1=Button(root,text='Write',width=15, height=2, command=Insert)\r\nb1.pack()\r\n\r\ns=Scrollbar(root)\r\nt=Text(root,height=2)\r\nwith open('diary.txt') as f:\r\n diary_content=f.read()\r\n t.insert('insert',diary_content)\r\n f.close()\r\ns.pack(side=RIGHT,fill=Y)\r\nt.pack(side=LEFT,fill=Y)\r\ns.config(command=t.yview)\r\n#t.config(ysrollcommand=s.set)\r\n\r\nroot.mainloop()\r\n","sub_path":"diary-project/diary_GUI.py","file_name":"diary_GUI.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"427253354","text":"# Open File\ndef openFile():\n # Open .txt file\n studentsDat = open('list.txt', 'r')\n teacherDat = open('teachers.txt', 'r')\n # Make array list\n fullDatList = []\n i = 0\n # enumerate give index as num and value as line\n for num, line in enumerate(studentsDat):\n # Strip a current line\n fields = line.strip().split(\",\")\n newFields = getTeacherVals(fields)\n # Append to a double array\n fullDatList.append(newFields)\n # Return double array\n return(fullDatList)\ndef openClassroomsNum():\n teacherDat = open('teachers.txt', 'r')\n indexList = []\n for index, val in enumerate(teacherDat):\n fields = val.strip().split(\",\")\n indexList.append(int(fields[2]))\n return indexList\ndef getTeacherVals(studentList):\n myList = studentList\n teacherDat = open('teachers.txt', 'r')\n\n classNum = int(studentList[3])\n for teacher in teacherDat:\n teacher = teacher.strip().split(\",\")\n if(classNum == int(teacher[2])):\n tmpList = [teacher[0].strip(), teacher[1].strip()]\n break\n myList.extend(tmpList)\n return myList\n# Get grade command\n# Takes: studentList(2DArr)\n# gradeNumber(int)\n# typeGpa(String)\ndef getGradeSearch(studentList, gradeNumber, typeGpa = \"none\"):\n if(typeGpa != \"none\"):\n # Find the highest/lowest gpa\n gradeStudents = getGradeValues(studentList, gradeNumber)\n # Return row of high/low gpa\n gpaStudent = getTypeGpa(gradeStudents, typeGpa)\n else:\n # Find all students with that gradeNumber\n gradeStudents = getGradeValues(studentList, gradeNumber)\n # Return rows of all\n gpaStudent = getFirstLast(gradeStudents)\n return(gpaStudent)\n\n# Returns a 2DArr with students in that gradeNumber\n# Takes: studentList(2DArr)[row][col]\n# gradeNumber(int)\ndef getGradeValues(studentList, gradeNumber):\n tmpList = list(studentList)\n indexList = []\n for num, student in enumerate(studentList):\n if int(student[2]) != gradeNumber: \n indexList.append(num)\n for index in sorted(indexList, reverse=True):\n del tmpList[index]\n return(tmpList)\n\n# Return row of highest/lowest gpa\n# Takes: studentList(2DArr)\n# typeGpa(String)\ndef getTypeGpa(studentList, typeGpa):\n outList = list(studentList)\n for num, student in enumerate(studentList):\n outList[num] = student[5]\n if (typeGpa == \"h\") | (typeGpa == \"high\"):\n maxGpa = outList.index(max(outList))\n gpaStudent = studentList[maxGpa]\n elif (typeGpa == \"l\") | (typeGpa == \"low\"):\n minGpa = outList.index(min(outList))\n gpaStudent = studentList[minGpa]\n return(gpaStudent)\n\n# Returns 2DArr[Row][0=last,1=first]\n# Takes studentList(2DArr[Row][Factor])\ndef getFirstLast(studentList):\n outList = list(studentList)\n for num, student in enumerate(studentList):\n outList[num] = [student[0], student[1]]\n return(outList)\n\n# Returns 2DArr[grade][grade=0,n=1]\n# Takes: studentList(2DArr)\ndef getInfo(studentList):\n outList = []\n for grade in range(7):\n n = 0\n for student in studentList:\n if int(student[2]) == grade:\n n = n + 1\n tmpList = [grade, n]\n outList.append(tmpList)\n return outList\n\n# Returns Arr[0=gradeNumber,1=avgGpa]\n# Takes studentList(2DArr)\n# gradeNumber(int)\ndef getAverage(studentList, gradeNumber):\n gradeList = getGradeValues(studentList, gradeNumber)\n # Make deep copy\n outList = list(gradeList)\n for num, student in enumerate(gradeList):\n outList[num] = student[5]\n outList = list(map(float, outList))\n avgGpa = sum(outList)/float(len(outList))\n return([gradeNumber, avgGpa])\n\n# Return 2DArr[row][0=last,1=first]\n# Take: studentList(2DArr)\n# busNum(int)\ndef getBus(studentList, busNum):\n indexList = []\n for num, student in enumerate(studentList):\n if int(student[4]) == busNum: \n indexList.append(student)\n tmpList = getFirstLast(indexList)\n return(tmpList)\n\n# Return 2DArr[row][0=last,1=first]\n# Takes: studentList(2DArr)\n# teachName(String)\ndef getTeacher(studentList, teachName):\n indexList = []\n for num, student in enumerate(studentList):\n if student[6] == teachName.upper(): \n indexList.append(student)\n tmpList = getFirstLast(indexList)\n return(tmpList)\n\n# Returns a 2DArr[row][factors depends on bus]\n# Takes: studentList(2DArr)\n# studentName(String)\n# bus(String) - Optional B/b or BUS/bus\ndef getStudent(studentList, studentName, bus = \"none\"):\n if(bus == \"none\"):\n # Find the highest/lowest gpa\n gradeStudents = getNameValues(studentList, studentName)\n # Return row of high/low gpa\n gpaStudent = getS(gradeStudents)\n else:\n if(bus==\"b\")|(bus==\"bus\"):\n # Find all students with that gradeNumber\n gradeStudents = getNameValues(studentList, studentName)\n # Return rows of all\n gpaStudent = getSb(gradeStudents)\n return(gpaStudent)\n\n# Returns a 2DArr with factor 0 (Last name) matching to name\n# Takes: studentList(2DArr)\n# name(String)\ndef getNameValues(studentList, name):\n indexList = []\n for num, student in enumerate(studentList):\n if student[0] == name.upper(): \n indexList.append(student)\n return(indexList)\n\n# Returns a 2DArr with factors for student\n# Takes: studentList(2DArr)\ndef getS(studentList):\n outList = list(studentList)\n for num, student in enumerate(studentList):\n outList[num] = [student[0], student[1],student[2],student[3], student[6],student[7]]\n return(outList)\n\n# Returns a 2DArr with factors for student + bus\n# Takes: studentList(2DArr)\ndef getSb(studentList):\n outList = list(studentList)\n for num, student in enumerate(studentList):\n outList[num] = [student[0], student[1],student[4]]\n return(outList)\ndef getAllStudentsInClass(studentList, classNum):\n indexList = []\n for num, student in enumerate(studentList):\n if (int(student[3].strip()) == int(classNum)): \n indexList.append(student)\n return(indexList)\ndef getAllTeachersInClass(studentList, classNum):\n indexList = []\n for num, student in enumerate(studentList):\n if (int(student[3].strip()) == int(classNum)): \n indexList.append([student[6].strip(), student[7].strip()])\n for index, val in enumerate(indexList):\n indexList[index] = \"-\".join(val)\n indexList = list(set(indexList))\n for index, val in enumerate(indexList):\n indexList[index] = val.split(\"-\")\n return(indexList)\ndef getTeachersInGrade(studentList, gradeNum):\n newListTmp = getGradeValues(studentList, int(gradeNum))\n newList = []\n for index, val in enumerate(newListTmp):\n newList.append([val[6].strip(), val[7].strip()])\n for index, val in enumerate(newList):\n newList[index] = \"-\".join(val)\n newList = list(set(newList))\n for index, val in enumerate(newList):\n newList[index] = val.split(\"-\")\n return(newList)\ndef getEnrollment(studentList):\n outList = []\n for classNum in openClassroomsNum():\n n = 0\n for student in studentList:\n if int(student[3]) == classNum:\n n = n + 1\n tmpList = [classNum, n]\n outList.append(tmpList)\n return outList\ndef getGpaVal(studentList, valNum):\n outList = []\n for student in studentList:\n outList.append(float(student[valNum]))\n return outList\ndef getMeanGpaGrade(studentList):\n meanList = []\n for grade in range(7):\n gradeList = getGradeValues(studentList, grade)\n gpaList = getGpaVal(gradeList, 5)\n if(len(gpaList) == 0):\n meanGpa = \"NULL\"\n else:\n meanGpa = round(sum(gpaList)/float(len(gpaList)), 3)\n meanList.append([grade, meanGpa])\n return meanList\ndef getUniqueTeachers(studentList):\n newList = []\n newListTmp = studentList\n for index, val in enumerate(newListTmp):\n newList.append([val[6].strip(), val[7].strip()])\n for index, val in enumerate(newList):\n newList[index] = \"-\".join(val)\n newList = list(set(newList))\n for index, val in enumerate(newList):\n newList[index] = val.split(\"-\")\n return(newList)\ndef getTeacherStudents(studentList, lstName, fstName):\n indexList = []\n for num, student in enumerate(studentList):\n if (student[6] == lstName.upper()) & (student[7] == fstName.upper()): \n indexList.append(student)\n return(indexList)\ndef getMeanGpaTeacher(studentList):\n newTeachers = getUniqueTeachers(studentList)\n meanList = []\n for teacher in newTeachers:\n lstName = teacher[0]\n fstName = teacher[1]\n studentsTeacher = getTeacherStudents(studentList, lstName, fstName)\n gpaList = getGpaVal(studentsTeacher, 5)\n meanGpa = round(sum(gpaList)/float(len(gpaList)), 3)\n myAppend = teacher\n myAppend.append(meanGpa)\n meanList.append(myAppend)\n return meanList\ndef getUniqueBusRoute(studentList):\n newList = []\n newListTmp = studentList\n for index, val in enumerate(newListTmp):\n newList.append(int(val[4].strip()))\n newList = sorted(list(set(newList)))\n newList = [str(x) for x in newList]\n return(sorted(newList))\ndef getStudentsBus(studentList, busNum):\n indexList = []\n for num, student in enumerate(studentList):\n if(student[4] == busNum): \n indexList.append(student)\n return(indexList)\ndef getMeanBus(studentList):\n busList = getUniqueBusRoute(studentList)\n meanList = []\n for bus in busList:\n studentBus = getStudentsBus(studentList, bus)\n gpaList = getGpaVal(studentBus, 5)\n meanGpa = round(sum(gpaList)/float(len(gpaList)), 3)\n meanList.append([bus, meanGpa])\n return meanList\n\n\n\n","sub_path":"lab1-2 copy/sFunctions.py","file_name":"sFunctions.py","file_ext":"py","file_size_in_byte":9489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"379686244","text":"import cv2\nfrom keras.models import load_model\nfrom slack_msg import dbgout\nimport numpy as np\n\n## 이미지 전처리\ndef preprocessing(frame):\n # 크기 조정\n size = (224, 224)\n frame_resized = cv2.resize(frame, size, interpolation=cv2.INTER_AREA)\n\n # 이미지 정규화\n frame_normalized = (frame_resized.astype(np.float32) / 127.0) - 1\n\n # 이미지 차원 재조정 - 예측을 위해 reshape 해줍니다.\n frame_reshaped = frame_normalized.reshape((1, 224, 224, 3))\n\n return frame_reshaped\n\n## 학습된 모델 불러오기\nmodel = load_model('converted_keras/keras_model.h5')\n\n# 카메라 캡처 객체, 0=내장 카메라\ncapture = cv2.VideoCapture(0)\n\n# 캡처 프레임 사이즈 조절\ncapture.set(cv2.CAP_PROP_FRAME_WIDTH, 320)\ncapture.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)\ndbgout(\"시작합니다\")\nsleep_cnt = 1 # 30초간 \"졸림\" 상태를 확인하기 위한 변수\nwhile True:\n ret, frame = capture.read()\n if ret == True:\n print(\"read success!\")\n\n # 이미지 뒤집기\n frame_fliped = cv2.flip(frame, 1)\n\n # 이미지 출력\n cv2.imshow(\"VideoFrame\", frame_fliped)\n\n # 1초마다 검사하며, videoframe 창으로 아무 키나 누르게 되면 종료\n if cv2.waitKey(200) > 0:\n break\n\n # 데이터 전처리\n preprocessed = preprocessing(frame_fliped)\n\n # 예측\n prediction = model.predict(preprocessed)\n #print(prediction) # [[0.00533728 0.99466264]]\n state = \"깨어있는 상태\"\n message = \"60초간 졸고 있네요!!!\"\n\n if prediction[0,0] < prediction[0,1]:\n #print('졸림 상태')\n sleep_cnt += 1\n\n # 졸린 상태가 60초간 지속되면 소리 & 카카오톡 보내기\n if state == '깨어있는 상태' and sleep_cnt % 60 == 0:\n sleep_cnt = 1\n state = '졸림 상태'\n print(message)\n dbgout(message)\n # beepsound()\n # send_music_link()\n # break ## 1번만 알람이 오면 프로그램을 정지 시킴 (반복을 원한다면, 주석으로 막기!)\n else:\n state = \"깨어있는 상태\"\n print('깨어있는 상태')\n sleep_cnt = 1\n\n# 카메라 객체 반환\ncapture.release()\n# 화면에 나타난 윈도우 창을 종료\ncv2.destroyAllWindows()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"64340362","text":"# coding: utf-8\n\"\"\"\n圧縮する行列と圧縮後の次元数を指定し、\n多次元尺度構成法(MDS)による次元圧縮を行う。\n\"\"\"\nimport sys\nimport numpy as np\nimport scipy as sp\nfrom scipy import io\nfrom ..tools.util import myloadmat, measure_time\n\nclass MDS(object):\n def __init__(self):\n pass\n\n def dimreduce(self, load_mat, save_mat, dimension, overturn=False):\n \"\"\"\n 多次元尺度構成法(MDS)による次元削減を行う\n \"\"\"\n matrix, terms = myloadmat(load_mat)\n if type(matrix) != type(np.array([])):\n # 疎行列を密行列に変換\n print('translating sparse matrix to dense matrix.')\n matrix = matrix.toarray()\n\n if overturn == True:\n print('matrix overturning...')\n # 類似度をひっくり返す(最大値を最小に、最小値を最大に)\n lambda_value = 0.1 # 0を適当に小さい値に置き換える\n matrix[matrix==0] = lambda_value\n matrix = np.log(matrix)\n matrix = -matrix + matrix.max()\n\n # SVDの実行\n sys.stderr.write(\"Start MDS\\n\")\n Y, eig_value, eig_vector = self._MDS(matrix, dimension)\n sys.stderr.write(\"End MDS\\n\")\n\n # 行列ファイルの保存\n io.savemat(save_mat, {'fmatrix':Y, 'terms':terms})\n\n @measure_time\n def _MDS(self, D, d):\n \"\"\"\n MDSにより距離行列を低次元に埋め込む\n @p D: 密な距離行列\n @p d: 埋め込み後の次元数\n \"\"\"\n N = len(D)\n S = D*D # 距離の2乗\n H = np.eye(N) - np.ones((N,N))/N # 中心化行列\n P = -0.5 * H.dot(S).dot(H) # ヤング・ハウスホルダー変換(-1/2*H*S*H)\n #np.savetxt(\"P_isomap\", P) # 行列保存\n S = None # メモリ解放\n H = None # メモリ解放\n\n # 固有値計算\n sys.stderr.write(\"Start Eigen Computation\\n\")\n eig_value, eig_vector = np.linalg.eigh(P)\n sys.stderr.write(\"End Eigen Computation\\n\")\n P = None # メモリ解放\n\n ind = np.argsort(eig_value)[::-1] # 固有値の大きい順にソート\n p = ind[:d]\n\n W = eig_value[p] # 上位p個の固有値\n X = eig_vector[:,p] # 上位p個の固有ベクトル\n Y = np.sqrt(W)*X\n\n # 保存用に固有値と固有ベクトルも返却\n dmax = 1000\n if dmax > N: dmax = N\n return Y, eig_value[ind[:dmax]], eig_vector[:,ind[:dmax]]\n","sub_path":"mypkg/dimreduce/mds.py","file_name":"mds.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"374814878","text":"test=int(input().strip())\r\nwhile test>0:\r\n test-=1\r\n m=int(input().strip())\r\n n=int(input().strip())\r\n arr=[int(x) for x in input().strip().split(' ')]\r\n for k in range(0,n):\r\n newarr=arr[:k]+arr[k+1:n]\r\n if((m-arr[k]) in newarr):\r\n print(k+1,end=' ')\r\n if(arr.index(m-arr[k])>arr.index(arr[k])):\r\n print(arr.index(m-arr[k])+1)\r\n else:\r\n print(newarr.index(m-arr[k])+2)\r\n break","sub_path":"New folder/Ice Cream Parlor.py","file_name":"Ice Cream Parlor.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"342588151","text":"from __future__ import unicode_literals\nfrom django.db import models\n\n# Create your models here.\nclass Player(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n name = models.TextField()\n cardid = models.CharField(max_length=32, unique=True)\n profile_picture = models.URLField(blank=True, default='')\n games_played = models.IntegerField(default=0)\n games_won = models.IntegerField(default=0)\n rating = models.IntegerField(default=1000)\n\n def save(self, *args, **kwargs):\n self.cardid = self.cardid.upper()\n super(Player, self).save(*args, **kwargs)\n \n def __str__(self):\n return \"%s (%s)\" % (self.name, self.cardid)\n","sub_path":"api/players/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"53317586","text":"# coding: utf-8\n'''\nCreated on Aug 13, 2013\n\n@author: dinhpham\n'''\n\nimport logging\nimport ujson as json\nimport requests\n\n# See http://blog.codepainters.com/2012/11/20/gevent-monkey-patching-versus-sniffer-nose/\nfrom fscore.helper import string_utils\nfrom fscore.app import app\n\n\nlogger = logging.getLogger(\"fscore.app.notification\")\n\n\nclass CloudTransport(object):\n\n def __init__(self):\n pass\n\n def google_push(self, message, custom, registration_ids, collapse_key='offline'):\n \"\"\"\n Push a JSON message like\n\n {\n \"registration_ids\": [\"\", \"\", ... ],\n \"data\": {\n \"room_id': \"\",\n \"alert\": \"\",\n \"title\": \"\", -- new\n \"from\": \"\", -- new\n \"url\": \"\", -- new\n \"collapse_key\": \"\"\n }\n }\n http://developer.android.com/google/gcm/index.html\n message containing up to 4kb of payload data\n\n http://developer.android.com/google/gcm/gs.html\n http://developer.chrome.com/trunk/apps/gcm_server.html\n @note https://bitbucket.org/skunkworksvn/klamr_backend/wiki/API-Push-Notification\n \"\"\"\n message = {'alert': message}\n message.update(custom)\n return gcm_push(message, registration_ids, collapse_key)\n\n def apple_push(self, message, custom, tokens):\n \"\"\"\n @see https://github.com/simonwhitaker/PyAPNs\n http://www.cktsoi.com/2012/02/sending-apple-push-notification-in-python/\n http://www.raywenderlich.com/3443/apple-push-notification-services-tutorial-part-12\n \n http://stackoverflow.com/questions/9182572/iphone-push-notification-character-limit\n http://stackoverflow.com/questions/4681833/php-apple-enhanced-notification \n http://cocoaheads.tumblr.com/post/1610035724/enhanced-notification-format-enhanced-version\n \n UIAlertView display limit is 107 characters\n \n You may establish multiple, parallel connections to the same gateway or to multiple gateway instances. \n http://developer.apple.com/library/mac/#documentation/NetworkingInternet/Conceptual/RemoteNotificationsPG/CommunicatingWIthAPS/CommunicatingWIthAPS.html\n http://stackoverflow.com/questions/9086956/what-is-the-number-of-persistent-connections-to-apns-allowed\n http://iphonedevsdk.com/forum/iphone-sdk-development/57520-performance-push-notification-service.html\n \n @param tokens: A list of tuple (device token hex, environ) where environ = [sandbox, production]\n @return Number of message failed to send\n \"\"\"\n from fscore.helper.apns_utils import apns_push\n sandbox_tokens = []\n production_tokens = []\n for token in tokens:\n if token[1] == 'sandbox':\n sandbox_tokens.append(token[0].replace(' ', ''))\n else:\n production_tokens.append(token[0].replace(' ', ''))\n if sandbox_tokens:\n apns_push(message, custom, 'sandbox', sandbox_tokens)\n if production_tokens:\n apns_push(message, custom, 'production', production_tokens)\n return 1\n\nclass GCMConnection(object):\n '''Google Cloud Messaging client'''\n\n def __init__(self, key, url):\n self.key = key\n self.url = url\n\n def send(self, payload):\n \"\"\"\n Connects to the Google Cloud Messaging server and send a push notification message\n \n @param payload: A dict\n @see http://developer.android.com/google/gcm/gcm.html\n @return a tuple(number_of_good_messages, error_message) where\n number_of_good_messages:\n + > 0: Number of message succeeded in sending\n + 0: No success\n + -2: Bad request because of malformed JSON\n + -3: Bad authentication\n \"\"\"\n headers = {'Content-Type': 'application/json', 'Authorization': 'key=%s' % self.key}\n r = requests.post(self.url, json.dumps(payload), headers=headers)\n # print r.content\n # {\"multicast_id\":8790869038670923110,\"success\":1,\"failure\":0,\"canonical_ids\":0,\"results\":[{\"message_id\":\"0:1360143266412828%6f86e7e2f9fd7ecd\"}]}\n # {\"multicast_id\":7843831450212605390,\"success\":0,\"failure\":1,\"canonical_ids\":0,\"results\":[{\"error\":\"MismatchSenderId\"}]}\n if r.status_code == 200:\n json_resp = r.json()\n if json_resp['failure'] > 0:\n if 'collapse_key' in payload:\n logger.warn(\"Failed to send GCM message (%s) using key %s: %s - %s\", payload['collapse_key'], self.key, r.text, payload)\n else:\n logger.warn(\"Failed to send GCM message using key %s: %s - %s\", self.key, r.text, payload)\n return len(payload['registration_ids']) - json_resp['failure'], None\n return len(payload['registration_ids']), None\n if r.status_code == 400:\n return -2, None # The request could not be parsed as JSON\n if r.status_code == 401:\n return -3, None # There was an error authenticating the sender account\n return -4, r.text # Exception: r.text\n\n def create_payload(self, registration_ids, data=None, collapse_key=None, delay_while_idle=False, time_to_live=None):\n \"\"\"\n Construct the dictionary mapping of parameters.\n Encodes the dictionary into JSON if for json requests.\n Helps appending 'data.' prefix to the plaintext data: 'hello' => 'data.hello'\n \n :param data Must be a list or dict\n :return a JSON string\n :raises Exception: if time_to_live is invalid\n :raises CollapseKeyException: if collapse_key is missing when time_to_live is used\n \"\"\"\n if time_to_live:\n if time_to_live > 2419200 or time_to_live < 0:\n raise BaseException(\"Invalid time-to-live value\")\n if collapse_key is None:\n raise BaseException(\"collapse_key is required when time_to_live is provided\")\n payload = {'registration_ids': registration_ids, 'time_to_live': time_to_live}\n else:\n payload = {'registration_ids': registration_ids}\n if data:\n payload['data'] = data\n if delay_while_idle:\n payload['delay_while_idle'] = delay_while_idle\n if collapse_key:\n payload['collapse_key'] = collapse_key\n # http://developer.android.com/google/gcm/gcm.html\n return payload\n\ndef gcm_push(data, registration_ids, collapse_key='offline'):\n '''Push messages to GCM\n @param data: A dict like {\"room_id\": room_id, \"alert\": message}\n '''\n connection = GCMConnection(app.config['GCM_KEY'], app.config['GCM_URL'])\n payload = connection.create_payload(registration_ids,\n data=data,\n collapse_key=collapse_key,\n delay_while_idle=None,\n time_to_live=None)\n return connection.send(payload)\n\n\nif __name__ == '__main__':\n # registration_ids = [\"APA91bH6gu6Nyk6Y0zFYqoS35nnY1MgdZhOE_ojjGfA2XSS2eNeDLjWHFU8G3vfAzrdtTEMXJEVdOCmyrtp6i9JApA2T3AIJIy22Dge345jxt_HsHuDDoa2odVIIilNswD1VW0p39dbZrtQy5XZCQdlWIdamWfzWzw\", ]\n # PushNotification().google_push({'message': \"Low battery\"}, registration_ids)\n device_tokens = ['19c2219763acf11ad3daaa8befb9113d14b1e85a1ee665003d5761819fc97b80', 'sandbox', 'wert_84823456789@xmpp.klamr.it', 'ios']\n CloudTransport().apple_push(\"Low battery\", {\"room_id\": \"aere_84923456789894969506@xmpp.klamr.it\"}, device_tokens)\n msg_template = ''\n CloudTransport().push(msg_template % {'plan_name': token['plan_name']},\n {'topic_id': token['topic_id'], 'room_id': token['room_id']},\n [token, ], collapse_key='plan_reminder')\n","sub_path":"fscore/helper/notification_utils.py","file_name":"notification_utils.py","file_ext":"py","file_size_in_byte":7921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"114541430","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/eliotberriot/Seafile/kii/kii_main/kii/permission/tests/test_user_permission.py\n# Compiled at: 2014-12-14 11:41:07\n# Size of source mod 2**32: 3044 bytes\nimport django\nfrom guardian.shortcuts import assign\nfrom guardian.models import UserObjectPermission\nfrom kii.stream.tests import base\nfrom kii.tests import test_base_models\nfrom .. import forms\n\nclass TestUserPermission(base.StreamTestCase):\n\n def test_view_permission(self):\n u2 = self.users[2]\n stream = self.streams[0]\n stream.assign_perm('read', u2)\n self.assertEqual(stream.readable_by(u2), True)\n self.assertEqual(stream.writable_by(u2), False)\n self.assertEqual(stream.deletable_by(u2), False)\n\n def test_edit_permission(self):\n u2 = self.users[2]\n stream = self.streams[0]\n stream.assign_perm('write', u2)\n self.assertEqual(stream.readable_by(u2), True)\n self.assertEqual(stream.writable_by(u2), True)\n self.assertEqual(stream.deletable_by(u2), False)\n\n def test_remove_permission(self):\n u2 = self.users[2]\n stream = self.streams[0]\n stream.assign_perm('delete', u2)\n self.assertEqual(stream.readable_by(u2), True)\n self.assertEqual(stream.writable_by(u2), True)\n self.assertEqual(stream.deletable_by(u2), True)\n\n def test_can_remove_permission(self):\n i = self.streams[0]\n i.assign_perm('read', self.anonymous_user)\n self.assertEqual(i.readable_by(self.anonymous_user), True)\n i.remove_perm('read', self.anonymous_user)\n self.assertEqual(i.readable_by(self.anonymous_user), False)\n\n def test_owner_gets_all_permissions(self):\n stream = self.streams[0]\n self.assertEqual(stream.deletable_by(stream.owner), True)\n\n def test_permission_is_deleted_when_stream_is_deleted(self):\n u2 = self.users[2]\n stream = self.streams[0]\n perm = stream.assign_perm('delete', u2)\n u2.delete()\n with self.assertRaises(UserObjectPermission.DoesNotExist):\n UserObjectPermission.objects.get(pk=perm.pk)\n\n def test_permission_mixin_form_populate_fields_correctly(self):\n i = self.streams[0]\n i.assign_perm('read', self.anonymous_user)\n form = forms.PermissionMixinForm(instance=i)\n self.assertEqual(form.fields['readable_by'].initial, 'everybody')\n\n def test_permission_mixin_form__save_create_permission(self):\n i = self.streams[0]\n form = forms.PermissionMixinForm(data={'readable_by': 'everybody'}, instance=i)\n self.assertEqual(form.is_valid(), True)\n form.save()\n self.assertEqual(i.readable_by(self.anonymous_user), True)\n\n def test_permission_mixin_form_delete_obsolete_permission(self):\n i = self.streams[0]\n i.assign_perm('read', self.anonymous_user)\n form = forms.PermissionMixinForm(data={'readable_by': 'owner'}, instance=i)\n self.assertEqual(form.is_valid(), True)\n form.save()\n self.assertEqual(i.readable_by(self.anonymous_user), False)","sub_path":"pycfiles/kii-0.8.tar/test_user_permission.cpython-34.py","file_name":"test_user_permission.cpython-34.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"444165607","text":"from threading import Thread\nfrom random import randint\nimport time\ndef countdown(m):\n n = randint(1,10) #随机生成睡眠时间\n print(\"进程{}开始执行\".format(m))\n while n > 0:\n print('进程{0}正在执行,剩余时间:{1}'.format(m,n))\n n -= 1\n time.sleep(1) #每执行一次循环,休眠一秒\n print(\"进程{}结束.\".format(m))\n\nt1 = Thread(target=countdown,args=(1,)) #建立一个Thread实例,跟踪目标为countdown函数,关键词args接受元组传递参数到函数\nt1.start() #当调用start()方法,线程立即开始执行\nt2 = Thread(target=countdown,args=(2,))\n\nif t2.is_alive(): #通过is_alive()方法判断线程是否在执行\n print('Alive.')\nelse:\n print(\"Has not started.\")\nt2.start() #只有调用start()方法,Thread才正式开始执行\nt2.join() #将t2加入到进程中\n","sub_path":"任务1/6.1.py","file_name":"6.1.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"234346605","text":"from socket import *\nimport sys\nimport os\nserverName = '127.0.0.1'\nserverPort = 3999\n\nimport time\nclient = []\ndatacache = []\ncnt = 0\nclientSocket = socket(AF_INET, SOCK_STREAM)\nclient.append(clientSocket)\ncnt+=1\ntry:\n clientSocket.connect((serverName, serverPort))\n print(cnt)\n # client.append(clientSocket)\nexcept Exception as e:\n print(e)\n sys.exit()\ntry:\n while True:\n sentence = input('Input lowercase sentence:').encode()\n print(type(sentence))\n msg = bytearray(sentence)\n clientSocket.send(msg)\n modifiedSentence = clientSocket.recv(1024)\n print('From Server:{}'.format(modifiedSentence))\nexcept Exception as e:\n print(e)\n clientSocket.close()\n","sub_path":"client1.py","file_name":"client1.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"233086367","text":"#Ex6_6.py\nimport jieba\nimport wordcloud\nfrom scipy.misc import imread\nimport matplotlib.pyplot as plt\n\nmask = imread(\"chinamap2.png\")\nexcludes = { }\nf = open(\"武汉加油.txt\", \"r\", encoding=\"utf-8\")\nt = f.read()\nf.close()\nls = jieba.lcut(t)\ntxt = \" \".join(ls)\nw = wordcloud.WordCloud(\\\n width = 400, height = 200,\\\n background_color = \"white\",\n font_path = \"msyh.ttc\", mask = mask\n )\nbmp = w.generate(txt)\n\nplt.imshow(bmp)\nplt.axis('off')\nplt.show()\n\n\n","sub_path":"Python_WorkSpace/pythonshiyan6/实验6代码/Ex6_6.py","file_name":"Ex6_6.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"545939034","text":"from time import sleep\nfrom stats_utils import *\nfrom scrape_utils import *\nfrom pytz import timezone\nfrom datetime import datetime, date, timedelta\nimport pandas as pd\n\nstart_date = date(2019, 10, 22)\nend_date = date(2019, 11, 1)\n\nurl = 'https://www.espn.com/nba/scoreboard/_/date/'\n\ndef daterange(start_date, end_date):\n for n in range(int ((end_date - start_date).days)):\n yield start_date + timedelta(n)\n\ndef scrape(url):\n games = get_games(url)\n \n print(\"games: \", games)\n\n print('calculating live games...')\n for g in games['live']:\n sleep(randint(5, 10))\n print(\"waiting...\")\n process_gamelogs(g, url)\n print(\"done\")\n\n print('calculating static games...')\n for g in games['static']:\n sleep(randint(5, 10))\n print(\"waiting...\")\n process_gamelogs(g, url)\n print(\"done\")\n\n\nfor single_date in daterange(start_date, end_date):\n d = single_date.strftime(\"%Y%m%d\")\n url_d = url + d\n today = get_today(url_d)\n scrape(url_d)\n print(\"logs: \", logs)\n if len(logs) > 0:\n print(requests.post(\"https://bilalsattar24.pythonanywhere.com/nbastatline/\", json={'gameLogs':logs}))\n logs.clear()\n\n\n#(optional) write to csv\n#logs = pd.DataFrame(logs)\n#logs.to_csv(\"gamelogs.csv\")\n\n\n#print(\"sorted logs: \", sorted_logs)\n","sub_path":"espn/espn_gamelogs_historical.py","file_name":"espn_gamelogs_historical.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"382387568","text":"#!/usr/local/bin/python3\n\nimport socket\nimport time\nimport threading\n\ndef tcplink(sock, addr):\n print('accept new connection from [%s:%s]...' % addr)\n while True:\n data = sock.recv(1024).decode()\n if not data:\n break\n sock.send(data.upper().encode())\n sock.close()\n print('connection from [%s:%s] closed.' % addr)\n\ndef main():\n host = socket.gethostname()\n port = 54321\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((host, port))\n s.listen(10)\n while True:\n sock, addr = s.accept()\n t = threading.Thread(target=tcplink, args=(sock, addr))\n t.start()\n\nif __name__ == '__main__':\n main()\n","sub_path":"study/py/echo/svr.py","file_name":"svr.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"107125533","text":"from selenium import webdriver\nimport time\nimport re\nimport datetime\n\n# Windows users need to specify the path to chrome driver you just downloaded.\n# You need to unzip the zipfile first and move the .exe file to any folder you want.\n# driver = webdriver.Chrome(r'path\\to\\the\\chromedriver.exe')\ndriver = webdriver.Chrome(r'..\\chromedriver_win32\\chromedriver.exe')\n\n# date arithmetic for datetime indexing\nmax=datetime.date(2018,2,1)\nmin=datetime.date(2017,9,15)\n#print(min,max)\n\nn= int((max-min+datetime.timedelta(days=1))/datetime.timedelta(days=7))\n#print(n)\n\nstart_date = min\nend_date = start_date + datetime.timedelta(days=6)\n\ndatelist_sta = [str(min)]\ndatelist_end = [str(end_date)]\n\ntmp = min\ntmp2 = end_date\n\nfor i in range(n-1):\n\ttmp += datetime.timedelta(days=7)\n\tdatelist_sta.append(str(tmp)) \n\ttmp2 += datetime.timedelta(days=7)\n\tdatelist_end.append(str(tmp2))\n#print(datelist_sta, datelist_end)\n\na = list(map(lambda s: \"\".join(s.split('-')), datelist_sta))\nb = list(map(lambda s: \"\".join(s.split('-')), datelist_end))\n\n# %3A20180119%3A20180125%3Aus\npages = list(map(lambda x, y, z, r, q: x+y+z+r+q, ['%3A']*20, a, ['%3A']*20, b, ['%3Aus']*20))\n\n\nindex = 0\nfor page in pages:\n\t# Go to the page that we want to scrape\n\ttry:\n\t\tdriver.get(\"https://artists.youtube.com/charts/tracks?chart_params_id=weekly\" + page)\n\t\ttime.sleep(7)\n\t\tprint(\"Scraping Page number \" + str(index))\n\t\tindex = index + 1\n\t\t# Find all the reviews. The find_elements function will return a list of selenium select elements.\n\t\t# Check the documentation here: http://selenium-python.readthedocs.io/locating-elements.html\n\t\t#reviews = driver.find_elements_by_xpath('//li[@class=\"bv-content-item bv-content-top-review bv-content-review\"]')\n\t\trows = driver.find_elements_by_xpath('//*[@id=\"chart-container\"]/paper-card/div[2]/iron-pages/ytma-chart[1]/div[2]//div[@class=\"row style-scope ytma-chart\"]')\n\t\t# for all row search, make sure the last directory should be searched all not noly one by using '//' instead of '/'\n\t\t# here, if you use /div[@class=\"row style-scope ytma-chart\"], this is only one row not all rows.\n\t\t# so, if all rows are share common directory and attribute name, you can use '//div[@class=\"row style-scope ytma-chart\"]' to find all as above'\n\n\t\t# print('='*50)\n\t\t# print(len(rows))\n\n\t\t# Iterate through the list and find the details of each review.\n\t\tfor row in rows:\n\t\t\t# Initialize an empty dictionary for each review\n\t\t\tlist_dict = {}\n\t\t\t# Use relative xpath to locate the title, content, username, date, rating.\n\t\t\t# Once you locate the element, you can use 'element.text' to return its string.\n\t\t\t# To get the attribute instead of the text of each element, use `element.get_attribute()`\n\t\t\trank = row.find_element_by_xpath('.//div[@class=\"rank-num style-scope ytma-chart\"]').text #div[1]\n\t\t\t# make sure to use .// not ./ (because, sub directroy tree cannot be seen directly)\n\t\t\t# And Just specify the last leaf with .//, because we don't know how may tags vary inbetween\n\t\t\t#title = row.find_element_by_xpath('.//div[3]/a[1]').text\n\t\t\ttitle = row.find_element_by_xpath('.//div[@class=\"title metadata style-scope ytma-chart\"]/a[1]').text #a[1]\n\t\t\t# make sure to use .// not ./ (because, sub directroy tree cannot be seen directly)\n\t\t\t#artist = row.find_element_by_xpath('.//div[3]/a[2]').text #a[2]\n\t\t\tartist = row.find_element_by_xpath('.//div[@class=\"title metadata style-scope ytma-chart\"]/a[2]').text #a[2]\n\t\t\t# make sure to use .// not ./ (because, sub directroy tree cannot be seen directly)\t\t\n\t\t\tview = row.find_element_by_xpath('.//div[@class=\"views style-scope ytma-chart\"]').text\n\t\t\t# make sure to use .// not ./ (because, sub directroy tree cannot be seen directly)\t\t\n\t\t\t#content = review.find_element_by_xpath('//div[@class=\"bv-content-summary-body-text\"]/p').text\n\t\t\t#content = review.find_elements_by_xpath('.//div[@class=\"bv-content-summary-body-text\"]/p')\n\t\t\t#content = ''.join([x.text for x in content])\n\n\t\t\tprint('='*50)\n\t\t\tprint(rank)\n\t\t\tprint(title)\n\t\t\tprint(artist)\n\t\t\tprint(view)\n\t\t\t#print(content)\t\t\t\n\n\t\t# Locate the next button element on the page and then call `button.click()` to click it.\n\t\t#button = driver.find_element_by_xpath('//li[@class=\"bv-content-pagination-buttons-item bv-content-pagination-buttons-item-next\"]')\n\t\t#button.click()\n\t\t#time.sleep(2)\n\n\texcept Exception as e:\n\t\tprint(e)\n\t\tdriver.close()\n\t\tbreak","sub_path":"youtube_music/youtube_music_starter.py","file_name":"youtube_music_starter.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"317122915","text":"import numpy as np\r\nimport pandas as pd\r\n'UKB/Variables/Conf_VariableList.txt'\r\n\r\ndef csv_to_column (directory, name):\r\n f = open(directory,'r')\r\n #print(f.read())\r\n ans = []\r\n data = f.readlines()\r\n for line in data:\r\n new_string = ''.join(ch for ch in line if ch.isdigit())\r\n #if new_string != '':\r\n ans.append(new_string) \r\n #print(ans)\r\n \r\n a= open(name,\"w+\")\r\n for i in ans:\r\n a.write(i + '\\n')\r\n return(None)\r\n\r\ncsv_to_column ('UKB/Variables/Conf_VariableList.txt', 'UKB/Variables/Conf_VariableList_ID_only.txt')\r\ncsv_to_column ('UKB/Variables/Geo_VariableList.txt', 'UKB/Variables/Geo_VariableList_ID_only.txt')\r\ncsv_to_column ('UKB/Variables/T1MRI_VariableList.txt', 'UKB/Variables/T1MRI_VariableList_ID_only.txt')\r\n#b = open('Conf_VariableList_ID.txt','r')\r\n#print(b.read())\r\n","sub_path":"Exercises/csv_to_column.py","file_name":"csv_to_column.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"653713959","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/eliotberriot/Seafile/kii/kii_main/kii/glue/common_settings.py\n# Compiled at: 2015-01-20 14:25:09\n\"\"\"Base settings shared by all environments\"\"\"\nfrom django.conf.global_settings import *\nfrom django.core.urlresolvers import reverse_lazy\nimport os, kii\nKII_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nTEMPLATE_CONTEXT_PROCESSORS += ('django.core.context_processors.request', 'kii.app.context_processors.user_apps',\n 'kii.stream.context_processors.streams', 'kii.stream.context_processors.stream_models',\n 'kii.glue.context_processors.kii_metadata', 'kii.glue.context_processors.tracking_code',\n 'kii.activity.context_processors.unread_notifications')\nMIDDLEWARE_CLASSES = ('django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware')\nINSTALLED_APPS = ('django.contrib.auth', 'django.contrib.staticfiles', 'django.contrib.sites',\n 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.admin',\n 'guardian', 'polymorphic', 'django_filters', 'mptt', 'rest_framework',\n 'autocomplete_light') + kii.APPS_CONFIGS + ('actstream', )\nKII_APPS = kii.APPS\nALL_USERS_GROUP = 'all_users'\nLOCALE_PATHS += (\n os.path.join(KII_DIR, 'locale'),)\nSITE_ID = 1\nSTATIC_URL = '/static/'\nTEMPLATE_LOADERS = ('django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader')\nSTATICFILES_FINDERS = ('django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder')\nANONYMOUS_USER_ID = -1\nAUTHENTICATION_BACKENDS += ('guardian.backends.ObjectPermissionBackend', )\nLOGIN_URL = 'kii:user:login'\nREVERSED_LOGIN_URL = reverse_lazy(LOGIN_URL)\nLOGIN_REDIRECT_URL = '/'\nTIME_ZONE = 'UTC'\nUSE_TZ = True\nUSE_I18N = True\nUSE_L10N = True\nLANGUAGE_CODE = 'en'\nLANGUAGES = (('en', 'English'), )\nfrom django.utils.functional import curry\nimport markdown\nfrom markdown.extensions.codehilite import makeExtension as CodeHilite\nfrom kii.markdown.inlinepatterns import makeExtension as KiiFlavoredMarkdown\nMARKDOWN_EXTENSIONS = (\n CodeHilite(css_class='code', linenums=False, noclasses=True),\n KiiFlavoredMarkdown())\nmd_filter = curry(markdown.markdown, extensions=MARKDOWN_EXTENSIONS)\nMARKDOWN_FUNCTION = md_filter\nMARKUP_FIELD_TYPES = (\n (\n 'markdown', md_filter),\n (\n 'none', lambda s: s))\nTRACKING_CODE = ''\nimport logging\nKII_LOGGER = logging.getLogger('kii')\nLOGGING = {'version': 1, \n 'disable_existing_loggers': False, \n 'handlers': {'console': {'level': 'DEBUG', \n 'class': 'logging.StreamHandler'}}, \n 'loggers': {'kii': {'handlers': [\n 'console'], \n 'level': 'DEBUG'}}}","sub_path":"pycfiles/kii-0.8.tar/common_settings.py","file_name":"common_settings.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"213214271","text":"import pandas as pd\n\n# reading csv file data\nmovie_data_orig = pd.read_csv(r\"C:\\Users\\Madhuri Yadav\\Downloads\\Final-Project-Group8-master\\Final-Project-Group8-master\\Code\\movies_metadata.csv\")\n# print(movie_data_orig) # [45466 rows x 24 columns]\n\n# removing 12 irrelevant columns\ndf_cleaned = movie_data_orig.drop([\"adult\", \"belongs_to_collection\", \"homepage\", \"original_language\",\n \"original_title\", \"overview\", \"poster_path\", \"production_countries\",\n \"spoken_languages\", \"status\", \"tagline\", \"video\" ], axis=1)\n\n# print(df_cleaned.columns) # 'budget', 'genres', 'id', 'imdb_id' 'popularity', 'production_companies', 'release_date', 'revenue', 'runtime', 'title', 'vote_average', 'vote_count']\n#df_cleaned.dtypes # release_date is of object (i.e. string data type) instead of datetime\n\n#Extracting Month from release date\ndf_cleaned['release_date_temp'] = pd.to_datetime(df_cleaned['release_date'],format='%Y-%m-%d', errors='coerce') #Converting string to datetime\ndf_cleaned['release_month'] = pd.to_datetime(df_cleaned['release_date_temp']).dt.month #extracting month from datetime(Releasedate) column\ndf_cleaned['release_month'] = pd.to_numeric(df_cleaned['release_month'],errors='coerce') #converting float to int\ndf_cleaned = df_cleaned.drop(['release_date_temp'], axis=1)","sub_path":"Code/For Models.py","file_name":"For Models.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"172718490","text":"\"\"\"Treat Code5 format antenna wave pattern data files\n\nThis is right now just a very simple version which needs more development in the future.\nBy Lei Shi, May 6, 2014\n\"\"\"\nfrom .funcs import parse_num\n\nfrom scipy.interpolate import RectBivariateSpline\nimport numpy as np\n\nclass C5_Error(Exception):\n def __init__(self,value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\nclass C5_reader:\n \"\"\"Simple reader.\n \"\"\"\n\n def __init__(this,filename,full_load = True):\n \"\"\"initiates with a file name\n\n filename: string, the full name of the Code5 file\n \"\"\"\n this.filename = filename\n if(full_load):\n this.read_header()\n this.read_Efield()\n this.setup_spline()\n\n def read_header(this):\n f = open(this.filename,'r')\n this.params = {}\n this.comment_line = 0 # count the starting comment lines\n for line in f:\n if '!' in line:\n this.comment_line += 1\n continue\n elif ':' in line:\n words = line.split(':')\n key = words[0]\n value = words[1].strip(' \\t\\n')\n if key == 'Datatype':\n this.params['Datatype'] = value.strip(' \\t\\n')\n elif key == 'Wavelength':\n values = value.split()\n num = parse_num(values[0].strip(' '))\n\n unit = values[1].strip(' \\t\\n')\n if unit == 'nm':\n num *= 1e-7\n this.params['Wavelength']=num\n else:\n raise C5_Error('Default unit for wavelength is nanometer. Please change your code5 file.')\n\n elif key == 'Grid spacing':\n values = value.split()\n dx = parse_num(values[0])\n dy = parse_num(values[1])\n unit = values[2].strip(' \\t\\n')\n if unit == 'mm':\n dx *= 0.1\n dy *= 0.1\n spacing = (dx,dy)\n this.params['Grid_spacing']=spacing\n else:\n raise C5_Error('Default unit for grid spacing is milimeter. Please change your code5 file.')\n\n elif key == 'Coordinates':\n values = value.split()\n x0 = parse_num(values[0])\n y0 = parse_num(values[1])\n z0 = parse_num(values[2])\n unit = values[3].strip(' \\t\\n')\n if unit == 'mm':\n x0 *= 0.1\n y0 *= 0.1\n z0 *= 0.1\n coords = (x0,y0,z0)\n this.params['Coordinates'] = coords\n else:\n raise C5_Error('Default unit for coordinates is milimeter. Please change your code5 file.')\n\n elif key == 'Direction':\n values = value.split()\n x_dir = parse_num(values[0])\n y_dir = parse_num(values[1])\n z_dir = parse_num(values[2])\n dirs = (x_dir,y_dir,z_dir)\n this.params['Direction'] = dirs\n elif key == 'Array size':\n values = value.split()\n nx = parse_num(values[0])\n ny = parse_num(values[1])\n this.params['Array_size']=(nx,ny)\n else:\n raise C5_Error('Unexpected keyword occured:{0}! Please double check the compatibility of the Code5 file version and this program.'.format(key) )\n\n else:\n #print 'header loading finished.'\n break\n f.close()\n\n def read_Efield(this):\n \"\"\"read the complex Efield from Code 5 file.\n Note that the result array is in the shape (ny,nx), where ny and nx are contained in this.params['Array_size'], which is read in read_header() method. And each element in the result array is a complex number.\n \"\"\"\n (nx,ny)= this.params['Array_size']\n data = np.loadtxt(this.filename,comments = '!', skiprows = this.comment_line + 6)\n if(this.params['Datatype'] == 'Complex'):\n data = data.reshape((ny,nx,2))\n this.E_field = np.copy(data[:,:,0]+ 1j* data[:,:,1])\n else:\n raise C5_Error( 'Right now, only complex data is accepted.')\n\n def setup_spline(this,method = 'RectBivariateSpline'):\n \"\"\"setup the spline interpolator for outside use\n\n The default interpolation method is the RectBivariateSpline method.\n Available methods are going to be added in the future if needed.\n\n result interpolator is named: this.E_re_interp,E_im_interp\n Note that the coordinate\n \"\"\"\n\n E_re = np.real(this.E_field)\n E_im = np.imag(this.E_field)\n\n\n\n nx,ny = this.params['Array_size']\n x0,y0,z0 = this.params['Coordinates']\n dx,dy = this.params['Grid_spacing']\n\n xmin = x0 - dx*(nx-1.)/2\n xmax = x0 + dx*(nx-1.)/2\n this.X1D = np.linspace(xmin,xmax,nx)\n ymin = y0 - dy*(ny-1.)/2\n ymax = y0 + dy*(ny-1.)/2\n this.Y1D = np.linspace(ymin,ymax,ny)\n\n if(method == 'RectBivariateSpline'):\n this.E_re_interp = RectBivariateSpline(this.Y1D,this.X1D,E_re)\n this.E_im_interp = RectBivariateSpline(this.Y1D,this.X1D,E_im)\n\n\n","sub_path":"src/python2/sdp/io/code5.py","file_name":"code5.py","file_ext":"py","file_size_in_byte":5557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"89179125","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n$Id: $ cmCC24887_3pcc_BS_Functional_129_LocalThree-Way-Add-onPartyHangsUp\n\nCopyright (c) 2016-2017 Cisco Systems, Inc.\n\nName: cmCC24887_3pcc_BS_Functional_129_LocalThree-Way-Add-onPartyHangsUp.py\n\n\nAuthor:\n Vishnu Prasad B(vishnpra@cisco.com)\n\nReferences: US24887\n\nPurpose:\n This test case verifies a local three-way conference in which the add-on\n party hangs up first.\nDescription:\n 1)As a BroadWorks group administrator, browse to Services → Music/Video\n On Hold for DUT 1’s group. Select Enable music/video during Call Hold.\n Make sure that System Defined Music/Video is selected.\n 2)Originate a call from DUT 2 to DUT 1. Answer the call. From DUT 1,\n make a call to DUT 3 by selecting the conference button on the phone\n or invoking conference on DUT 1. After DUT 3 answers, complete the\n conference to join the parties. DUT 3 hangs up. DUT 1 hangs up.\n\nTest bed requirement:\n Three 3pcc phone\n\nTest Steps:\n 1. DUT 2 dials DUT 1.\n 2. DUT 1 answers the call.\n 3. DUT 1 initiates a conference with DUT 3 by selecting the conference\n button and dialing DUT 3.\n 4. DUT 3 answers the call.\n 5. DUT 1 completes the conference to join the parties.\n 6. DUT 3 hangs up.\n 7. DUT 1 hangs up.\n\nVerify:\n 1. DUT 2 dials DUT 1.\n − DUT 1 phone rings.\n − DUT 2 hears ringback.\n 2. DUT 1 answers the call.\n − Two-way voice path is established.\n 3. DUT 1 initiates a conference with DUT 3 by selecting the conference\n button and dialing DUT 3.\n − DUT 2 hears Music On Hold.\n − DUT 3 phone rings.\n − DUT 3’s phone display shows DUT1’s caller ID.\n − DUT 1 hears ringback.\n 4. DUT 3 answers the call.\n − Two-way voice path is established between DUT 1 and DUT 3\n 5. DUT 1 completes the conference to join the parties.\n − All three parties are joined in a conference call and can\n hear each other.\n 6. DUT 3 hangs up.\n − DUT 3 is released.\n − DUT 1 and DUT 2 can still hear each other.\n 7. DUT 1 hangs up.\n − The call ends.\n\n Notes:\n\n Known Bugs:\n\"\"\"\n\nimport tng\nimport logging\nfrom tng_sl.device.endpoint.synergylite.synergylite_3pcc_extended\\\n import wait_for_ccapi_call_states, SynergyliteException\nfrom tng.frontend.timing import wait, until\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneLineRegHelper\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneConfigHelper\nfrom tng_sl.contrib.mpp.tshark_helper import TsharkHelper\nfrom tng_sl.contrib.setup_helper import SetupHelpersTestCase\nfrom tng_sl.contrib.mpp.broadsoft.broadsoft_config import BroadsoftConfig\n\nlog = logging.getLogger(\"Local_3Way_TestCase\")\n\n\nclass Local3WayTestCase(SetupHelpersTestCase, tng.api.TestCase):\n\n helpers = (PhoneConfigHelper, PhoneLineRegHelper, TsharkHelper)\n helper_num_devices = 3\n\n def setUp(self):\n log.info(\"StartOfSetup\")\n self.xsi_user_id1 = self.toolkit.get_test_env_info(\n section='bsoft', parameter_name=\"xsi_user_id1\")\n self.broadsoft = BroadsoftConfig()\n\n self.p1_fname, self.p1_lname = self.broadsoft.get_first_and_last_name(\n user_id_proxy=self.xsi_user_id1, user_id=self.user_id1)\n\n # Enabling Music on Hold\n self.broadsoft.set_music_on_hold(\n active='true', user_id_proxy=self.xsi_user_id1,\n user_id=self.user_id1)\n # Not disabling music on hold because by default MOH will be On.\n\n def subscribe_cleanup():\n self.oPhone3.ccapi.feedback_unsubscribe(\n self.oPhone3.subscribed_callback)\n self.oPhone3.unregister_call_event('VOIP_MSG_CALL_EVENT_INCOMING')\n self.addCleanup(subscribe_cleanup)\n\n log.info(\"End of setUp\")\n\n def test_conf(self):\n\n log.info(\"Start of Local 3-Way Call Testcase\")\n\n log.info('Start tshark on linux')\n filter_cmd = (\n 'port sip and (host {} or host {} or host {})'.format(\n self.oPhone1.ip, self.oPhone2.ip, self.oPhone3.ip))\n self.tshark.tshark_start(filter_cmd)\n\n log.info(\"Phone2 dial Phone1's number: {}\".format(self.user_id1))\n self.oPhone2.ccapi.dial('null', self.user_id1, '', 1, 0, 1)\n # check ophone2 ringout status and oPhone1 ringing status\n wait_for_ccapi_call_states(\n (self.oPhone2, self.oPhone1), (\"PROCEEDING\", \"RINGING\"),\n timeout=20)\n\n log.info(\"Phone1 receive the call\")\n self.oPhone1.ccapi.accept(\"0000\")\n # check two phones are in connected status\n wait_for_ccapi_call_states(\n (self.oPhone2, self.oPhone1), (\"CONNECTED\", \"CONNECTED\"))\n\n self.oPhone3.register_call_event('VOIP_MSG_CALL_EVENT_INCOMING')\n self.oPhone3.ccapi.feedback_subscribe(self.oPhone3.subscribed_callback)\n\n # Phone1 Initiates Conference\n log.info(\"Phone1 initiates Conference\")\n self.oPhone1.ccapi.conference(\"0000\")\n wait(3, 'wait for 3 seconds')\n\n log.info(\"Phone1 dials Phone3's number: {}\".format(self.user_id3))\n self.oPhone1.dial_digits('0001', self.user_id3)\n\n wait_for_ccapi_call_states(\n (self.oPhone1, self.oPhone1, self.oPhone3),\n (\"PROCEEDING\", \"HOLD\", \"RINGING\"),\n (1, 1, 1), (1, 0, 0), timeout=30)\n ph1_identity_string = '{} {}'.format(self.p1_fname, self.p1_lname)\n # checking media flow for music on hold\n self.oPhone3.check_caller_id(ph1_identity_string, self.user_id1[-4:])\n\n log.info(\"Phone3 accepts the call\")\n self.oPhone3.ccapi.accept(\"0000\")\n\n log.info(\" Phone1 press conference and all three are connected\")\n self.oPhone1.ccapi.conference(\"0001\")\n wait_for_ccapi_call_states(\n (self.oPhone1, self.oPhone1, self.oPhone2, self.oPhone3),\n (\"CONNECTED\", \"CONNECTED\", \"CONNECTED\", \"CONNECTED\"),\n (1, 1, 1, 1), (0, 1, 0, 0), timeout=10)\n\n log.info(\"Phone3 Hangs up\")\n self.oPhone3.ccapi.hangUp(\"0000\")\n wait_for_ccapi_call_states(\n (self.oPhone1, self.oPhone1, self.oPhone2, self.oPhone3),\n (\"CONNECTED\", \"IDLE\", \"CONNECTED\", \"IDLE\"),\n (1, 1, 1, 1), (0, 1, 0, 0), timeout=10)\n\n log.info(\"Phone1 and Phone2 are still CONNECTED\")\n log.info(\"Phone1 HangsUp\")\n self.oPhone1.ccapi.hangUp(\"0000\")\n wait_for_ccapi_call_states(\n (self.oPhone1, self.oPhone2, self.oPhone3),\n (\"IDLE\", \"IDLE\", \"IDLE\"), (1, 1, 1), (0, 0, 0), timeout=10)\n log.info(\"End of Local 3-Way Call Testcase\")\n\n\ndef main():\n tng.api.runner()\n\nif __name__ == '__main__':\n tng.run(main)\n","sub_path":"common/IOT/Broadsoft_Functional/cmCC24887_3pcc_BS_Functional_129_LocalThree-WayAdd-onPartyHangsUp.py","file_name":"cmCC24887_3pcc_BS_Functional_129_LocalThree-WayAdd-onPartyHangsUp.py","file_ext":"py","file_size_in_byte":6774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"349454892","text":"import base64\nimport collections\nimport datetime\nimport json\nimport os\nimport threading\nimport time\n\nfrom django.conf import settings\n\nimport opentracing\nimport opentracing.ext.tags as ext_tags\nimport wrapt\nfrom basictracer import BasicTracer\nfrom basictracer.recorder import SpanRecorder\nfrom google.auth.transport.requests import AuthorizedSession\nfrom google.oauth2 import service_account\nfrom opentracing import (\n Format,\n InvalidCarrierException,\n SpanContextCorruptedException\n)\n\n\ndef to_timestamp(t):\n return datetime.datetime.utcfromtimestamp(t).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n\nclass TracingState:\n\n _config = threading.local()\n\n @classmethod\n def activated(cls):\n return getattr(cls._config, \"activated\", False)\n\n @classmethod\n def set_config(cls, key, value):\n setattr(cls._config, key, value)\n\n\nclass Recorder(SpanRecorder):\n\n def __init__(self):\n self.mutex = threading.Lock()\n self.pending = collections.deque()\n self.disabled = False\n self.flush_thread = None\n self.setup_http()\n\n def setup_http(self):\n encoded_key = os.environ.get(\"GCP_TRACING_SERVICE_ACCOUNT\")\n if not encoded_key:\n raise RuntimeError(\"missing GCP_TRACING_SERVICE_ACCOUNT\")\n service_account_key = json.loads(base64.b64decode(encoded_key))\n self.project_id = service_account_key[\"project_id\"]\n credentials = service_account.Credentials.from_service_account_info(service_account_key)\n scoped_credentials = credentials.with_scopes([\"https://www.googleapis.com/auth/trace.append\"])\n self.http = AuthorizedSession(scoped_credentials)\n\n def record_span(self, span):\n self.maybe_flush()\n s = {\n \"spanId\": str(span.context.span_id),\n \"kind\": {\n ext_tags.SPAN_KIND_RPC_CLIENT: 1,\n ext_tags.SPAN_KIND_RPC_SERVER: 2,\n }.get(span.tags.get(ext_tags.SPAN_KIND), 0),\n \"name\": span.operation_name,\n \"startTime\": to_timestamp(span.start_time),\n \"endTime\": to_timestamp(span.start_time + span.duration),\n \"labels\": {\n {\n ext_tags.HTTP_METHOD: \"trace.cloud.google.com/http/method\",\n ext_tags.HTTP_STATUS_CODE: \"trace.cloud.google.com/http/status_code\",\n ext_tags.HTTP_URL: \"trace.cloud.google.com/http/url\",\n }.get(str(k), str(k)): str(v)\n for k, v in span.tags.items()\n },\n }\n if span.parent_id is not None:\n s[\"parentSpanId\"] = str(span.parent_id)\n t = {\n \"projectId\": self.project_id,\n \"traceId\": \"{:x}\".format(span.context.trace_id) * 2,\n \"spans\": [s],\n }\n with self.mutex:\n self.pending.append(t)\n\n def maybe_flush(self):\n thread = self.flush_thread\n if (thread is not None and not thread.is_alive()) or thread is None:\n self.flush_thread = thread = threading.Thread(target=self.flusher, name=\"flusher\")\n thread.daemon = True\n thread.start()\n\n def flusher(self):\n while not self.disabled:\n if self.pending:\n traces = list(self.pending)\n self.pending.clear()\n with self.mutex:\n url = f\"https://cloudtrace.googleapis.com/v1/projects/{self.project_id}/traces\"\n payload = {\n \"traces\": traces,\n }\n headers = {\n \"Content-Type\": \"application/json\",\n }\n r = self.http.patch(url, data=json.dumps(payload), headers=headers)\n if not r.ok:\n print(f\"trace PUT failed status={r.status_code}\")\n time.sleep(2.5)\n\n\nclass GCPTracer(BasicTracer):\n\n def __init__(self):\n recorder = None\n if settings.TRACING_ENABLED:\n recorder = Recorder()\n super(GCPTracer, self).__init__(recorder)\n self.register_required_propagators()\n\n\ndef parse_http_headers(request):\n prefix = \"HTTP_\"\n p_len = len(prefix)\n headers = {\n key[p_len:].replace(\"_\", \"-\").lower(): value\n for key, value in request.META.items()\n if key.startswith(prefix)\n }\n return headers\n\n\nclass OpenTracingMiddleware:\n\n def __init__(self, get_response=None):\n self.tracer = GCPTracer()\n self.get_response = get_response\n\n install_requests_patch(self.tracer)\n\n def __call__(self, request):\n TracingState.set_config(\"activated\", True)\n headers = parse_http_headers(request)\n tags = {\n ext_tags.SPAN_KIND: ext_tags.SPAN_KIND_RPC_SERVER,\n ext_tags.HTTP_URL: request.path,\n ext_tags.HTTP_METHOD: request.method,\n }\n span = None\n operation_name = request.path\n try:\n # check http headers to see if we should extract a parent span\n span_ctx = self.tracer.extract(Format.HTTP_HEADERS, carrier=headers)\n span = self.tracer.start_span(\n operation_name=operation_name,\n child_of=span_ctx,\n tags=tags,\n )\n except (InvalidCarrierException, SpanContextCorruptedException) as e:\n span = self.tracer.start_span(operation_name=operation_name, tags=tags)\n with span_in_context(span):\n response = self.get_response(request)\n span.set_tag(ext_tags.HTTP_STATUS_CODE, response.status_code)\n span.finish()\n return response\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n span = get_current_span()\n span.set_tag(\"django.view\", view_func.__name__)\n\n def process_exception(self, request, exc):\n span = get_current_span()\n if span is not None:\n # span.set_tag(ext_tags.ERROR, str(exc))\n span.finish()\n\n\nclass RequestContext(object):\n\n __slots__ = (\"span\",)\n\n def __init__(self, span):\n self.span = span\n\n\nclass RequestContextManager:\n\n _state = threading.local()\n _state.context = None\n\n @classmethod\n def current_context(cls):\n return getattr(cls._state, \"context\", None)\n\n def __init__(self, context):\n if isinstance(context, opentracing.Span):\n self._context = RequestContext(span=context)\n else:\n self._context = context\n\n def __enter__(self):\n self._prev_context = self.__class__.current_context()\n self.__class__._state.context = self._context\n return self._context\n\n def __exit__(self, *_):\n self.__class__._state.context = self._prev_context\n self._prev_context = None\n return False\n\n\ndef get_current_span():\n context = RequestContextManager.current_context()\n return context.span if context else None\n\n\ndef span_in_context(span):\n context = RequestContext(span)\n return RequestContextManager(context)\n\n\ndef install_requests_patch(tracer):\n try:\n import requests.sessions\n import requests.adapters\n except ImportError: # pragma: no cover\n return\n\n @wrapt.decorator\n def trace(wrapped, instance, args, kwargs):\n if TracingState.activated():\n span = tracer.start_span(\n operation_name=\"requests\",\n child_of=get_current_span(),\n )\n with span:\n resp = wrapped(*args, **kwargs)\n else:\n resp = wrapped(*args, **kwargs)\n return resp\n\n requests.adapters.HTTPAdapter.send = trace(requests.adapters.HTTPAdapter.send)\n","sub_path":"scaife_viewer/tracing.py","file_name":"tracing.py","file_ext":"py","file_size_in_byte":7684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"175330452","text":"########################################################################\r\n# program: psd.py\r\n# author: Tom Irvine\r\n# version: 1.7\r\n# date: September 12, 2013\r\n# description:\r\n#\r\n# Determine the power spectral density of a signal.\r\n# The file must have two columns: time(sec) & amplitude.\r\n#\r\n########################################################################\r\n\r\nfrom __future__ import print_function\r\nimport datetime\r\nimport os\r\nfrom scipy.signal import find_peaks\r\nimport numpy as np\r\n\r\nimport sys\r\n\r\n# if sys.version_info[0] == 2:\r\n# print(\"Python 2.x\")\r\n# import Tkinter as tk\r\n# from tkFileDialog import asksaveasfilename\r\n\r\n\r\nif sys.version_info[0] == 3:\r\n print(\"Python 3.x\")\r\n import tkinter as tk\r\n from tkinter.filedialog import asksaveasfilename\r\n\r\n\r\nfrom p_tompy import read_two_columns_from_dialog, signal_stats, sample_rate_check\r\nfrom p_tompy import GetInteger2, WriteData2\r\nfrom p_tompy import time_history_plot\r\n\r\nfrom sys import stdin\r\nfrom math import sqrt, pi, log\r\nfrom numpy import zeros, argmax, linspace, cos, mean\r\nfrom scipy.fftpack import fft\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n########################################################################\r\n\r\n\r\nclass READ_DATA_PSD:\r\n\r\n def __init__(self):\r\n pass\r\n\r\n @classmethod\r\n def check_data(cls, a, b, num, sr, dt):\r\n\r\n sample_rate_check(a, b, num, sr, dt)\r\n\r\n return sr, dt\r\n\r\n def read_and_stats(self):\r\n\r\n # label = \"Enter the input time history\"\r\n label = \"acceleration_data.csv\"\r\n\r\n a, b, num = read_two_columns_from_dialog(label)\r\n\r\n sr, dt, ave, sd, rms, skew, kurtosis, dur = signal_stats(a, b, num)\r\n\r\n sr, dt = READ_DATA_PSD.check_data(a, b, num, sr, dt)\r\n\r\n return a, b, num, sr, dt, dur\r\n\r\n########################################################################\r\n\r\n\r\ndef GetString():\r\n iflag = 0\r\n while iflag == 0:\r\n try:\r\n s = stdin.readline()\r\n iflag = 1\r\n except ValueError:\r\n print('Invalid String')\r\n return s\r\n\r\n########################################################################\r\n\r\n\r\ndef select_options(num, dt, window):\r\n\r\n # print(\" \")\r\n # print(\" Remove mean: 1=yes 2=no \")\r\n\r\n # mr_choice = GetInteger2()\r\n mr_choice = 1\r\n\r\n # print(\" \")\r\n # print(\" Select Window: 1=Rectangular 2=Hanning \")\r\n\r\n if window == \"Rectangular\":\r\n h_choice = 1\r\n elif window == \"Hann\":\r\n h_choice = 2\r\n elif window == \"Blackman\":\r\n h_choice = 3\r\n elif window == \"Hamming\":\r\n h_choice = 4\r\n\r\n # h_choice = GetInteger2()\r\n\r\n n = num\r\n\r\n ss = zeros(n)\r\n seg = zeros(n, 'f')\r\n i_seg = zeros(n)\r\n\r\n NC = 0\r\n for i in range(0, 1000):\r\n nmp = 2**(i-1)\r\n if(nmp <= n):\r\n ss[i] = 2**(i-1)\r\n seg[i] = float(n)/float(ss[i])\r\n i_seg[i] = int(seg[i])\r\n NC = NC+1\r\n else:\r\n break\r\n\r\n print(' ')\r\n print(' Number of Samples per Time per df ')\r\n print(' Segments Segment Segment(sec) (Hz) dof')\r\n\r\n for i in range(1, NC+1):\r\n j = NC+1-i\r\n if j > 0:\r\n if(i_seg[j] > 0):\r\n tseg = dt*ss[j]\r\n ddf = 1./tseg\r\n print('%8d \\t %8d \\t %10.3g %10.3g %d'\r\n % (i_seg[j], ss[j], tseg, ddf, 2*i_seg[j]))\r\n if(i == 12):\r\n break\r\n\r\n ijk = 0\r\n while ijk == 0:\r\n # print(' ')\r\n # print(' Choose the Number of Segments: ')\r\n # s = stdin.readline()\r\n s = 1\r\n NW = int(s)\r\n for j in range(0, len(i_seg)):\r\n if NW == i_seg[j]:\r\n ijk = 1\r\n break\r\n\r\n# check\r\n\r\n mmm = 2**int(log(float(n)/float(NW))/log(2))\r\n\r\n df = 1./(mmm*dt)\r\n\r\n# begin overlap\r\n\r\n mH = ((mmm/2)-1)\r\n\r\n return mmm, NW, df, mH, mr_choice, h_choice\r\n\r\n\r\n########################################################################\r\n\r\nclass PSD:\r\n\r\n def __init__(self, mmm, NW, mH, df, b, mr_choice, h_choice):\r\n self.mmm = mmm\r\n self.NW = NW\r\n self.mH = mH\r\n self.df = df\r\n self.b = b\r\n self.mr_choice = mr_choice\r\n self.h_choice = h_choice\r\n\r\n @classmethod\r\n def Hanning_initial(cls, mmm):\r\n H = zeros(mmm, 'f')\r\n tpi = 2*pi\r\n alpha = linspace(0, tpi, mmm)\r\n ae = sqrt(8./3.)\r\n H = ae*0.5*(1.-cos(alpha))\r\n return H\r\n\r\n @classmethod\r\n def magnitude_resolve(cls, mmm, mH, Y):\r\n #\r\n mHm1 = mH-1\r\n z = zeros(int(mH), 'f')\r\n mag_seg = zeros(int(mH), 'f')\r\n#\r\n z = abs(Y)/float(mmm)\r\n#\r\n mag_seg[0] = z[0]**2\r\n#\r\n mag_seg[1:int(mHm1)] = ((2*z[1:int(mHm1)])**2)/2\r\n#\r\n return mag_seg\r\n\r\n def psd_core(self):\r\n\r\n if self.h_choice == 2:\r\n H = PSD.Hanning_initial(self.mmm)\r\n\r\n print(\" \")\r\n print(\" number of segments NW= %d \" % self.NW)\r\n print(\" samples/segments mmm= %d \" % self.mmm)\r\n print(\" half samples/segment-1 mH=%d \" % self.mH)\r\n print(\" \")\r\n print(\" df=%6.3f Hz\" % self.df)\r\n\r\n full = zeros(int(self.mH), 'f')\r\n mag_seg = zeros(int(self.mH), 'f')\r\n\r\n amp_seg = zeros(self.mmm, 'f')\r\n\r\n nov = 0\r\n\r\n for ijk in range(1, 2*self.NW):\r\n\r\n amp_seg[0:self.mmm] = self.b[(0+nov):(self.mmm+nov)]\r\n\r\n nov = nov+int(self.mmm/2)\r\n\r\n if self.mr_choice == 1 or self.h_choice == 2:\r\n amp_seg -= mean(amp_seg)\r\n\r\n if self.h_choice == 2:\r\n amp_seg *= H\r\n\r\n Y = fft(amp_seg)\r\n\r\n mag_seg = PSD.magnitude_resolve(self.mmm, self.mH, Y)\r\n\r\n full += mag_seg\r\n\r\n den = self.df*(2*self.NW-1)\r\n\r\n full /= den\r\n\r\n ms = sum(full)\r\n\r\n freq = zeros(int(self.mH), 'f')\r\n\r\n maxf = (self.mH-1)*self.df\r\n\r\n freq = linspace(0, maxf, int(self.mH))\r\n\r\n tempf = freq[0:int(self.mH)-1]\r\n tempa = full[0:int(self.mH)-1]\r\n freq = tempf\r\n full = tempa\r\n\r\n rms = sqrt(ms*self.df)\r\n\r\n return rms, freq, full\r\n\r\n########################################################################\r\n\r\n\r\ndef psd_plots(a, b, freq, full, rms, idx):\r\n\r\n pmin = 10**40\r\n pmax = 10**-40\r\n\r\n fmin = 10**40\r\n fmax = 10**-40\r\n\r\n for i in range(0, len(freq)):\r\n if full[i] > 0 and freq[i] > 0 and full[i] > pmax:\r\n pmax = full[i]\r\n if full[i] > 0 and freq[i] > 0 and full[i] < pmin:\r\n pmin = full[i]\r\n if freq[i] > 0 and freq[i] > fmax:\r\n fmax = freq[i]\r\n if freq[i] > 0 and freq[i] < fmin:\r\n fmin = freq[i]\r\n\r\n xmax = 10**-30\r\n xmin = xmax\r\n\r\n for i in range(-30, 30):\r\n if(fmax < 10**i):\r\n xmax = 10**i\r\n break\r\n\r\n for i in range(30, -30, -1):\r\n if(fmin > 10**i):\r\n xmin = 10**i\r\n break\r\n\r\n ymax = 10**-30\r\n ymin = ymax\r\n\r\n for i in range(-30, 30):\r\n if(pmax < 10**i):\r\n ymax = 10**i\r\n break\r\n\r\n for i in range(30, -30, -1):\r\n if(pmin > 10**i):\r\n ymin = 10**i\r\n break\r\n\r\n # print(\" \")\r\n # print(\" Is the input data dimension Accel(G) ?\")\r\n # print(\" 1=yes 2=no\")\r\n\r\n # ind = GetInteger2()\r\n ind = 1\r\n\r\n if(ind == 1):\r\n th_label = 'Accel (G)'\r\n psd_label = 'Accel (G^2/Hz)'\r\n else:\r\n print('Enter input amplitude unit ')\r\n th_label = GetString()\r\n print('Enter PSD unit label, i.e. unit^2/Hz')\r\n psd_label = GetString()\r\n\r\n print(\" \")\r\n print(\" view plots \")\r\n\r\n time_history_plot(a, b, 1, 'Time(sec)', th_label,\r\n 'Time History', 'time_history', 'PSD')\r\n\r\n plt.gca().set_autoscale_on(False)\r\n\r\n directory = \"PSD/magnitude\"\r\n\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n\r\n ave_line = [np.mean(full)]*len(freq)\r\n thresh = (np.mean(full))*1.5\r\n\r\n peaks, _ = find_peaks(full, height=thresh)\r\n plt.figure(2, figsize=[12, 8])\r\n plt.plot(freq, full)\r\n plt.plot(freq[peaks], full[peaks], \"x\")\r\n plt.plot(freq, ave_line)\r\n\r\n plt.plot(freq[idx], full[idx], '*')\r\n title_string = 'PSD ' + \\\r\n str(\"%6.3g\" % rms)+' GRMS Overall ' + str(datetime.datetime.now())\r\n plt.title(title_string)\r\n plt.xlim([xmin, xmax])\r\n plt.ylim([ymin, ymax])\r\n plt.ylabel(psd_label)\r\n plt.xlabel(' Frequency (Hz) ')\r\n plt.grid(True)\r\n timestamp = str(datetime.datetime.now())\r\n timestamp = timestamp.replace(' ', '-')\r\n timestamp = timestamp.replace('.', '_')\r\n timestamp = timestamp.replace(':', '-')\r\n stitle = 'power_spectral_density' + timestamp + \".png\"\r\n # savepath = directory + \"/\" + stitle\r\n # stitle = 'power_spectral_density' + str(datetime.datetime.now()) + \".png\"\r\n savepath = os.path.join(directory, stitle)\r\n plt.xscale('log')\r\n plt.yscale('log')\r\n plt.savefig(savepath)\r\n plt.show(block=False)\r\n plt.pause(1)\r\n plt.close('all')\r\n\r\n########################################################################\r\n\r\n\r\ndef psd_post(freq, full, rms, run_psd, output_psd):\r\n print(\" \")\r\n print(\" Overall RMS = %10.3g \" % rms)\r\n print(\" Three Sigma = %10.3g \" % (3*rms))\r\n\r\n idx = argmax(full)\r\n\r\n print(\" \")\r\n print(\" Maximum: Freq=%8.4g Hz Amp=%8.4g \" % (freq[idx], full[idx]))\r\n\r\n # print(\" \")\r\n # print(\" Write PSD data to file? 1=yes 2=no\")\r\n # iacc = GetInteger2()\r\n iacc = 1\r\n\r\n if(iacc == 1):\r\n\r\n if(run_psd == 1):\r\n\r\n # print(\" \")\r\n # print(\" Find output dialog box\")\r\n\r\n # root = tk.Tk()\r\n # root.withdraw()\r\n # output_file_path = asksaveasfilename(\r\n # parent=root, title=\"Enter the PSD output filename: \")\r\n # output_file = output_file_path.rstrip('\\n')\r\n # output_file = \"psd_data\"\r\n output_file = input(\r\n \"Enter file name(without extension) to store psd data : \")\r\n output_file = 'PSD/' + output_file + '.csv'\r\n mH = len(freq)\r\n WriteData2(mH, freq, full, output_file)\r\n run_psd = 0\r\n else:\r\n output_file = output_psd\r\n mH = len(freq)\r\n WriteData2(mH, freq, full, output_file)\r\n\r\n return (run_psd, output_file, idx)\r\n########################################################################\r\n","sub_path":"p_psd.py","file_name":"p_psd.py","file_ext":"py","file_size_in_byte":10567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"521899097","text":"\nimport unittest\nimport pandas as pd\n\nfrom ..follow_ons import follow_ons\n\nclass FollowOnTestCase(unittest.TestCase):\n\n def test_followOns1(self):\n\n df = pd.DataFrame({'title': ['ep1', 'ep1', 'ep1', 'ep2', 'ep2', 'ep2', 'ep2'], \n 'writer': ['Ed', 'Ed', 'Lory', 'Jack', 'Lory', 'jack', 'Ed'], \n 'pony': ['Applejack','Twilight Sparkle', 'Non-Pony','Twilight Sparkle', 'Applejack', 'Rainbow Dash', 'Non-Pony'],\n 'dialog': ['', \"\", \"\", \"\", \"\", \"\", \"\"]})\n \n follow = follow_ons(df, main_ponies=[\"Twilight Sparkle\", \"Applejack\", \"Rainbow Dash\", \"Non-Pony\"])\n\n real_follow = {'twilight': {'applejack': 0.5, 'rainbow': 0.0, 'non-pony': 0.5},\n 'applejack': {'twilight': 1.0, 'rainbow': 0.0, 'non-pony':0.0},\n 'rainbow': {'twilight': 0.0, 'applejack': 1.0, 'non-pony':0.0},\n 'non-pony': {'twilight': 0.5, 'applejack': 0.0, 'rainbow': 0.5}}\n\n self.assertEqual(follow, real_follow)\n\n\n def test_followOnDivideByZero(self):\n\n df = pd.DataFrame({'title': ['ep1', 'ep1', 'ep1'], \n 'writer': ['Ed', 'Jack', 'Lory'],\n 'pony': ['Twilight Sparkle', 'applejack', 'non-pony'],\n 'dialog': [' ', ' ', ' ']})\n\n follow = follow_ons(df, main_ponies=['Twilight Sparkle', 'Rainbow Dash'])\n\n real_follow = {'twilight': {'rainbow': 0.0},\n 'rainbow': {'twilight': 0.0}}\n\n self.assertEqual(follow, real_follow)\n\n\n","sub_path":"src/hw3/tests/follow_ons.py","file_name":"follow_ons.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"219404435","text":"# -*- coding: utf-8 -*- \nfrom django.db import models\n\n# Create your models here.\n \nclass Day(models.Model):\n name = models.CharField(max_length=255, verbose_name='Nazwa dnia liturgicznego', help_text='np. II \"Niedziela Wielkanocna\" lub \"Wspomnienie Św. Jacka Wyznawcy\"')\n date = models.DateField(verbose_name='data')\n intentions = models.TextField(verbose_name=\"intencje Mszy\", blank=True)\n active = models.BooleanField(verbose_name=\"opublikowane na stronie\")\n\n def get_str_date(self):\n acc_list = {\n 1:u\"stycznia\",\n 2:u\"lutego\",\n 3:u\"marca\",\n 4:u\"kwietnia\",\n 5:u\"maja\",\n 6:u\"czerwca\",\n 7:u\"lipca\",\n 8:u\"sierpnia\",\n 9:u\"września\",\n 10:u\"października\",\n 11:u\"listopada\",\n 12:u\"grudnia\"\n }\n return str(self.date.day)+\" \"+acc_list[self.date.month]+\" \"+str(self.date.year)\n\n\n def __unicode__(self):\n return self.get_str_date() + \" - \" + self.name\n\n class Meta:\n verbose_name = u\"Dzień liturgiczny\"\n verbose_name_plural = u\"Dni liturgiczne\"\n ordering = [\"-date\"]\n","sub_path":"intencje/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"28495607","text":"# coding:utf-8\n\nfrom PyQt4.QtCore import QString, Qt, SIGNAL, QRect\nfrom PyQt4.QtGui import QColor, QFont, QWidget, QColorDialog\nfrom generalconfig_ui import *\n\nclass generalConfig(QWidget, Ui_Dialog):\n def __init__(self, parent, defaultConfig = None):\n QWidget.__init__(self)\n self.parent = parent\n self.setupUi(self)\n self.loadConfigFile()\n\n def loadConfigFile(self):\n \"\"\"Load settings\"\"\"\n self.settings = self.parent.settings\n \n self.colorMain.setColor( self.parse_colorstr(self.settings['UI']['main']['font-color']) )\n self.colorPing.setColor( self.parse_colorstr(self.settings['UI']['ping']['font-color']) )\n self.colorPlayers.setColor( self.parse_colorstr(self.settings['UI']['players']['font-color']) )\n self.fontMain.setCurrentFont(QFont(self.settings['UI']['main']['font-family']))\n self.fontPing.setCurrentFont(QFont(self.settings['UI']['ping']['font-family']))\n self.fontPlayers.setCurrentFont(QFont(self.settings['UI']['players']['font-family']))\n self.spinRefreshInterval.setValue(self.settings['UI']['refresh_interval'])\n if self.settings['UI']['show']['ping'] == True:\n self.checkPing.setCheckState(Qt.Checked)\n else:\n self.checkPing.setCheckState(Qt.Unchecked)\n if self.settings['UI']['show']['maxplayers'] == True:\n self.checkMaxplayers.setCheckState(Qt.Checked)\n else:\n self.checkMaxplayers.setCheckState(Qt.Unchecked)\n \n def parse_colorstr(self, string):\n self.color = string.split(\",\")\n if len(self.color) == 4:\n for component in self.color:\n if int(component) < 0 or int(component) > 255:\n raise Exception()\n return QColor(int(self.color[0]), int(self.color[1]), int(self.color[2]), int(self.color[3]))\n else:\n raise Exception()","sub_path":"contents/code/generalConfig.py","file_name":"generalConfig.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"498007172","text":"from datetime import timedelta\n\n# The DAG object; we'll need this to instantiate a DAG\nfrom airflow import DAG\n\n# Operators; we need this to operate!\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.utils.dates import days_ago\n# These args will get passed on to each operator\n# You can override them on a per-task basis during operator initialization\n\ndefault_args = {\n 'owner': 'Leo',\n 'depends_on_past': False,\n 'email': ['leotaysh@gmail.com'],\n 'email_on_failure': True,\n 'email_on_retry': True,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n # 'queue': 'bash_queue',\n # 'pool': 'backfill',\n # 'priority_weight': 10,\n # 'end_date': datetime(2016, 1, 1),\n # 'wait_for_downstream': False,\n # 'dag': dag,\n # 'sla': timedelta(hours=2),\n # 'execution_timeout': timedelta(seconds=300),\n # 'on_failure_callback': some_function,\n # 'on_success_callback': some_other_function,\n # 'on_retry_callback': another_function,\n # 'sla_miss_callback': yet_another_function,\n # 'trigger_rule': 'all_success'\n}\n\ndag = DAG(\n 'Test',\n default_args=default_args,\n description='A simple test DAG',\n schedule_interval=timedelta(days=1),\n start_date=days_ago(2),\n tags=['example'],\n)\n\nt1 = BashOperator(\n task_id='print_date',\n bash_command='date',\n dag=dag,\n)\n\nt1","sub_path":"test_dag.py","file_name":"test_dag.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"494632905","text":"import re\nimport sys\nimport numpy as np\nfrom numpy import linalg as linalg\n\n#printing options\nnp.set_printoptions(threshold='nan')\n\natomicNumber = {\n 'H' : 1,\n 'He' : 2,\n 'Li' : 3,\n 'Be' : 4,\n 'B' : 5,\n 'C' : 6,\n 'N' : 7,\n 'O' : 8,\n 'F' : 9,\n 'Ne' : 10,\n 'Na' : 11,\n 'Mg' : 12,\n 'Al' : 13,\n 'Si' : 14,\n 'P' : 15,\n 'S' : 16,\n 'Cl' : 17,\n 'Ar' : 18,\n 'K' : 19,\n 'Ca' : 20,\n 'Sc' : 21,\n 'Ti' : 22,\n 'V' : 23,\n 'Cr' : 24,\n 'Mn' : 25,\n 'Fe' : 26,\n 'Co' : 27,\n 'Ni' : 28,\n 'Cu' : 29,\n 'Zn' : 30,\n 'Ga' : 31,\n 'Ge' : 32,\n 'As' : 33,\n 'Se' : 34,\n 'Br' : 35,\n 'Kr' : 36,\n} \natomicMass = {\n 'H' : 1.0079,\n 'He' : 4.003,\n 'Li' : 6.941,\n 'Be' : 9.012,\n 'B' : 10.811,\n 'C' : 12.011,\n 'N' : 14.0071,\n 'O' : 15.999,\n 'F' : 19.00,\n 'Ne' : 20.18,\n 'Na' : 22.99,\n 'Mg' : 24.305,\n 'Al' : 26.98,\n 'Si' : 28.085,\n 'P' : 30.97,\n 'S' : 32.055,\n 'Cl' : 35.455,\n 'Ar' : 39.95,\n 'K' : 39.10,\n 'Ca' : 40.08,\n 'Sc' : 44.96,\n 'Ti' : 47.87,\n 'V' : 50.94,\n 'Cr' : 52.00,\n 'Mn' : 54.94,\n 'Fe' : 55.85,\n 'Co' : 58.93,\n 'Ni' : 58.69,\n 'Cu' : 63.55,\n 'Zn' : 65.38,\n 'Ga' : 69.72,\n 'Ge' : 72.63,\n 'As' : 74.92,\n 'Se' : 78.96,\n 'Br' : 79.905,\n 'Kr' : 83.80,\n}\n\ndef getNuclCoord(inFile):\n lineNum = 0\n atom = []\n coord = []\n nuclCoordRe = re.compile(r\"Standard Nuclear Orientation\")\n endCoordRe = re.compile(r\"----+\")\n\n # Find the line number of the last line contains \n # the string 'Standard Nuclear Orientation'\n for i, line in enumerate(inFile.readlines()):\n if (nuclCoordRe.search(line)):\n lineNum = i \n\n # Actual coordinate line starts three lines later\n lineNum = lineNum + 3\n \n # File should be rewinded\n inFile.seek(0)\n \n for i, line in enumerate(inFile.readlines()):\n if (i >= lineNum ):\n # if the coordinate part ends, readline also ends\n if (endCoordRe.search(line)):\n break \n \n splitLine = line.split()\n atom.append(splitLine[1])\n coord = coord + [float(splitLine[2])]\\\n +[float(splitLine[3])]+[float(splitLine[4])]\n \n \n npAtom = np.array(atom);\n npCoord = np.array(coord);\n \n return (npAtom, npCoord) \n\ndef mVector(atom):\n nAtom = len(atom)\n mVec = np.zeros((3*nAtom, 1))\n \n for i in range(0,nAtom):\n mVec[3*i]=mVec[3*i+1]=mVec[3*i+2] = atomicMass[atom[i]]\n \n #mArr = np.diagflat(mVec)\n \n #print(mArr)\n return mVec\n \ndef getBonds(atom, coord):\n nAtom = len(atom)\n bond = np.zeros((nAtom, nAtom))\n \n for i in range(0,nAtom-1):\n for j in range(i+1,nAtom):\n\n dist = coord[3*i:3*i+3]-coord[3*j:3*j+3]\n \n #for k in range(0,3):\n # dist[k] = coord[3*i+k]-coord[3*j+k]\n\n r = linalg.norm(dist)\n\n if (atomicNumber[atom[i]]>1 and r<1.8):\n bond[i,j] = 1\n bond[j,i] = 1\n \n if (atomicNumber[atom[i]]==1 and r<1.3):\n bond[i,j] = 1\n bond[j,i] = 1\n \n if (atomicNumber[atom[i]]>9 and atomicNumber[atom[j]]>1 and r<2.7):\n bond[i,j] = 1\n bond[j,i] = 1\n \n #Looking for hydrogen bonds\n for i in range(0,nAtom):\n if (atom[i]=='H'):\n for j in range(0, nAtom):\n if (bond[i,j]==1 and (atom[j]=='N' or atom[j]=='O')):\n for k in range(j, nAtom):\n if (k != j and (atom[k]=='N' or atom[k]=='O')):\n dist = coord[3*i:3*i+3]-coord[3*k:3*k+3]\n# dist = np.zeros(3);\n#\n# for l in range(0,3):\n# dist[l] = coord[3*i+l]-coord[3*k+l]\n\n r = linalg.norm(dist)\n\n if (r < 2.8):\n bond[i,k] = 1\n bond[k,i] = 1\n return bond\n \ndef matB(atom, coord):\n nInternal = 0 #number of redundant internal coordinates\n nAtom = len(atom)\n B = np.zeros([0, 3*nAtom]);\n z = np.zeros([0])\n bond = getBonds(atom, coord)\n wk = np.sum(bond,axis=1);\n print(wk);\n nStretch = 0\n nBend = 0\n\n #Stretch internal coordinates \n for i in range(0, nAtom-1):\n for j in range(i, nAtom):\n if (bond[i,j] == 1):\n dist = coord[3*i:3*i+3]-coord[3*j:3*j+3]\n# dist = np.zeros(3);\n#\n# for k in range(0,3):\n# dist[k] = coord[3*i+k]-coord[3*j+k]\n\n r = linalg.norm(dist)\n dist = dist/r\n \n nInternal = nInternal + 1\n z = np.append(z,[r])\n \n newRow = np.zeros([1, 3*nAtom]);\n newRow[0,3*i] = dist[0]\n newRow[0,3*i+1] = dist[1]\n newRow[0,3*i+2] = dist[2]\n newRow[0,3*j] = dist[0]\n newRow[0,3*j+1] = dist[1]\n newRow[0,3*j+2] = dist[2]\n \n B = np.append(B,newRow,axis=0);\n \n nStretch = nInternal\n \n print(\"Number of stretch internal coordinates :\", nStretch)\n #end of stretch parts\n \n for i in range(0, nAtom-1):\n for j in range(i, nAtom):\n if (bond[i,j] == 1):\n distij = coord[3*i:3*i+3]-coord[3*j:3*j+3]\n #xij = coord[3*i]-coord[3*j]\n #yij = coord[3*i+1]-coord[3*j+1]\n #zij = coord[3*i+2]-coord[3*j+2]\n rij = linalg.norm(distij)\n distij = distij/r\n \n ##I do not know why we should use this\n #If there is >2 bonds on atom j we use big value\n sinMin = 0.05\n if (wk[j] > 2):\n sinMin = 0.2\n \n print(\"Number of internal coordinates :\", nInternal)\n\n \n return(B,z)\n \ndef main():\n if (len(sys.argv)<3):\n print (\"You must input at least two argments\")\n \n filename1 = sys.argv[1]\n filename2 = sys.argv[2]\n \n inFile1 = open(filename1,'r')\n inFile2 = open(filename2,'r')\n \n (atom, coord1) = getNuclCoord(inFile1)\n (atom, coord2) = getNuclCoord(inFile2)\n mVec = mVector(atom)\n bond = getBonds(atom, coord1)\n (B,z) = matB(atom,coord1)\n\n print(bond)\n #print(B)\n #print(z)\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"dushin.py","file_name":"dushin.py","file_ext":"py","file_size_in_byte":6716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"326075419","text":"\n\nclass Configuration:\n def __init__(self):\n self.window_high = 640\n self.window_width = 640\n self.sleep_time = 0.03\n self.ship_x = 300\n self.ship_y = 200\n self.bomb_coord_increase = 1\n self.ship_coord_increase = 3\n self.submarine_increase = 2\n","sub_path":"Configuration.py","file_name":"Configuration.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"532368750","text":"n = int(input())\nA = list(map(int, input().split()))\n\nabs_sum = 0\nminus = 0\nabs_list = []\nfor i in range(n):\n if A[i] < 0:\n minus += 1\n abs_list.append(abs(A[i]))\n abs_sum += abs_list[i]\n\nif minus % 2 != 0:\n abs_sum -= min(abs_list)*2\nprint(abs_sum)\n","sub_path":"ABC_D/ABC125_D.py","file_name":"ABC125_D.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"137536615","text":"\"\"\"\n该脚本用于调用训练好的模型权重去计算验证集/测试集的COCO指标\n以及每个类别的mAP(IoU=0.5)\n\"\"\"\n\nfrom json.decoder import JSONDecodeError\nimport os\nimport json\n\nimport torch\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom data import *\nfrom data.transforms import *\nfrom data.gdgrid import *\nfrom utils.augmentations import SSDAugmentation\nfrom utils.util_init import *\nfrom layers.modules import MultiBoxLoss\nfrom ssd import build_ssd\nimport os\n\nfrom train_utils import get_coco_api_from_dataset, CocoEvaluator\n\n\ndef summarize(self, catId=None):\n \"\"\"\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n \"\"\"\n\n def _summarize(ap=1, iouThr=None, areaRng='all', maxDets=100):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap == 1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n\n if isinstance(catId, int):\n s = s[:, :, catId, aind, mind]\n else:\n s = s[:, :, :, aind, mind]\n\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n\n if isinstance(catId, int):\n s = s[:, catId, aind, mind]\n else:\n s = s[:, :, aind, mind]\n\n if len(s[s > -1]) == 0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s > -1])\n\n print_string = iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s)\n return mean_s, print_string\n\n stats, print_list = [0] * 12, [\"\"] * 12\n stats[0], print_list[0] = _summarize(1)\n stats[1], print_list[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])\n stats[2], print_list[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])\n stats[3], print_list[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])\n stats[4], print_list[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[5], print_list[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])\n stats[6], print_list[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7], print_list[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8], print_list[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9], print_list[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])\n stats[10], print_list[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[11], print_list[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])\n\n print_info = \"\\n\".join(print_list)\n\n if not self.eval:\n raise Exception('Please run accumulate() first')\n\n return stats, print_info\n\n\ndef main(parser_data):\n device = torch.device(parser_data.use_cuda if torch.cuda.is_available() else \"cpu\")\n print(\"Using {} device training.\".format(device.type))\n\n data_transform = {\n \"val\": transforms.Compose([transforms.ToTensor()])\n }\n\n # read class_indict\n label_json_path = './dianwang_classes.json'\n assert os.path.exists(label_json_path), \"json file {} dose not exist.\".format(label_json_path)\n json_file = open(label_json_path, 'r')\n class_dict = json.load(json_file)\n category_index = {v: k for k, v in class_dict.items()}\n\n dataset_root = parser_data.data_path\n \"\"\"\n # check voc root\n if os.path.exists(os.path.join(VOC_root, \"VOCdevkit\")) is False:\n raise FileNotFoundError(\"VOCdevkit dose not in path:'{}'.\".format(VOC_root))\n \"\"\"\n\n # 注意这里的collate_fn是自定义的,因为读取的数据包括image和targets,不能直接使用默认的方法合成batch\n batch_size = parser_data.batch_size\n nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers\n print('Using %g dataloader workers' % nw)\n\n # load validation data set\n val_dataset = DwDataset(dataset_root, data_transform[\"val\"], \"val.txt\")\n val_dataset_loader = torch.utils.data.DataLoader(val_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=nw,\n pin_memory=True,\n collate_fn=val_dataset.collate_fn)\n\n # val_dataset = DwDataset(args.dataset_root,\n # compose_transforms,\n # \"val.txt\")\n # val_dataset_loader = data.DataLoader(valset, args.batch_size,\n # # num_workers=args.num_workers,\n # shuffle=False, \n # collate_fn=valset.collate_fn,\n # pin_memory=False)\n # create model num_classes equal background + 20 classes\n # 注意,这里的norm_layer要和训练脚本中保持一致\n num_classes = len(category_index) + 1 # +1 for background\n model = build_ssd('test', 300, num_classes) # initialize SSD\n\n # 载入你自己训练好的模型权重\n\n model.load_state_dict(torch.load(args.trained_model))\n print('Finished loading model!')\n \n # print(model)\n\n model.to(device)\n\n # evaluate on the test dataset\n coco = get_coco_api_from_dataset(val_dataset)\n iou_types = [\"bbox\"]\n coco_evaluator = CocoEvaluator(coco, iou_types)\n cpu_device = torch.device(\"cpu\")\n\n model.eval()\n with torch.no_grad():\n for image, targets in tqdm(val_dataset_loader, desc=\"validation...\"):\n if args.use_cuda and torch.cuda.is_available():\n image = Variable(image.cuda())\n # targets = [Variable(ann.cuda(), volatile=True) for ann in targets]\n else:\n image = Variable(image)\n # targets = [Variable(ann, volatile=True) for ann in targets]\n # 将图片传入指定设备device\n # image = list(img.to(device) for img in image)\n\n # inference\n print('+++++++++++', image.size())\n outputs = model(image)\n\n outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]\n res = {target[\"image_id\"].item(): output for target, output in zip(targets, outputs)}\n \n \n for i in range(len(outputs)):\n jsontext = {'image_id': targets[i]['image_id'].item(), 'objs':[]}\n for j in range(len(outputs[i][\"labels\"])):\n bbox, label, score = outputs[i][\"boxes\"][j], outputs[i][\"labels\"][j], outputs[i][\"scores\"][j]\n obj_dict = {\"label\":label.numpy().tolist(), \"bbox\":bbox.numpy().tolist(), \"score\":score.numpy().tolist()}\n jsontext[\"objs\"].append(obj_dict)\n jsondata = json.dumps(jsontext, indent=4, separators=(',', ': '))\n with open(\"./pred_results/img\" + str(targets[i]['image_id'].item()) + \".json\",'w') as f:\n f.write(jsondata)\n \n coco_evaluator.update(res)\n \n\n coco_evaluator.synchronize_between_processes()\n\n # accumulate predictions from all images\n coco_evaluator.accumulate()\n coco_evaluator.summarize()\n\n coco_eval = coco_evaluator.coco_eval[\"bbox\"]\n # calculate COCO info for all classes\n coco_stats, print_coco = summarize(coco_eval)\n\n # calculate voc info for every classes(IoU=0.5)\n voc_map_info_list = []\n for i in range(len(category_index)):\n stats, _ = summarize(coco_eval, catId=i)\n voc_map_info_list.append(\" {:15}: {}\".format(category_index[i + 1], stats[1]))\n\n print_voc = \"\\n\".join(voc_map_info_list)\n print(print_voc)\n\n # 将验证结果保存至txt文件中\n with open(\"record_mAP.txt\", \"w\") as f:\n record_lines = [\"COCO results:\",\n print_coco,\n \"\",\n \"mAP(IoU=0.5) for each category:\",\n print_voc]\n f.write(\"\\n\".join(record_lines))\n\ndef reinfer(img):\n boxes = img[\"boxes\"]\n labels = img[\"labels\"]\n scores = img[\"scores\"]\n\n keep = {}\n \n bage_idxs = torch.where(labels==1)[0]\n offground_idxs = torch.where(labels==2)[0]\n ground_idxs = torch.where(labels==3)[0]\n safebelt_idxs = torch.where(labels==4)[0]\n\n # offground ground 互斥\n for offground_idx in offground_idxs:\n for ground_idx in ground_idxs:\n offground_box = boxes[offground_idx.item()]\n ground_box = boxes[ground_idx.item()]\n \n\n \n # count iou\n # if iou \n pass\n\ndef get_iou(box1, score1, box2, score2):\n area1 = (box1[2] - box1[0]) * (box1[3] - box1[1])\n area2 = (box2[2] - box2[0]) * (box2[3] - box2[1])\n\n \n \n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(\n description=__doc__)\n\n # 使用设备类型\n parser.add_argument('--use_cuda', default='cuda', help='device')\n\n # # 检测目标类别数\n # parser.add_argument('--num-classes', type=int, default='20', help='number of classes')\n\n # 数据集的根目录(VOCdevkit)\n parser.add_argument('--data-path', default='/home/qingren/Project/Tianchi_dw/Dataset', help='dataset root')\n\n # 训练好的权重文件\n parser.add_argument('--trained_model', default='checkpoints/model-70.pkl', type=str, help='training weights')\n\n # batch size\n parser.add_argument('--batch_size', default=1, type=int, metavar='N',\n help='batch size when validation.')\n\n args = parser.parse_args()\n\n main(args)\n","sub_path":"gd_eval.py","file_name":"gd_eval.py","file_ext":"py","file_size_in_byte":10473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"185084263","text":"# Copyright (c) 2017 Iotic Labs Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://github.com/Iotic-Labs/py-application-examples/blob/master/LICENSE\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Iotic Labs ExtMon2: Simple feed status\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nlogging.getLogger('rdflib').setLevel(logging.WARNING)\nlogging.getLogger('IoticAgent').setLevel(logging.WARNING)\nlogging.basicConfig(format='%(asctime)s,%(msecs)03d %(levelname)s [%(name)s] {%(threadName)s} %(message)s',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nfrom sys import argv, exit # pylint: disable=redefined-builtin\nfrom os import environ, mkdir\nfrom os.path import exists, isdir, abspath, join, split\nfrom threading import Thread\nfrom functools import partial\nfrom datetime import datetime\n\nfrom humanize import naturaltime\n\nfrom jinja2 import Environment, FileSystemLoader\n\nfrom IoticAgent import IOT\nfrom IoticAgent.Core.compat import Event, Lock, monotonic\n\nfrom .Config import Config\n\n\nEXTMON2 = 'extmon2'\n\nFEEDS = 'FD'\nCHANGED = 'CH'\nLASTCHANGE = 'LC'\n\nLAST_SEEN = 'L_S'\nSEEN = 'SE'\n\n# Note: used in html template or config\nNAME = 'name'\nLASTSEEN = 'lastseen'\nCLASS = 'class'\nMAX_AGE = 'max_age'\nWARN_AGE = 'warn_age'\nERROR_AGE = 'error_age'\n\n# If no feeddata has arrived update the HTML after N seconds\nMINCHANGE = 30\n\n\ndef __feeddata(client, stash, stashlock, data):\n with stashlock:\n if data['pid'] in stash[FEEDS]:\n logger.debug(\"Received FEEDATA from %s\", data['pid'])\n else:\n logger.warning(\"Got feeddata for unknown GUID: %s\", data['pid'])\n name = \"Feed: %s\" % data['pid']\n point_desc = client.describe(data['pid'])\n if point_desc is not None:\n name += \"
\" + point_desc['meta']['label']\n thing_desc = client.describe(point_desc['meta']['parent'])\n if thing_desc is not None:\n name += \"
\" + thing_desc['meta']['label']\n name += \"
\"\n stash[FEEDS][data['pid']] = {\n MAX_AGE: 600,\n WARN_AGE: 1200,\n ERROR_AGE: 2400,\n NAME: name\n }\n stash[FEEDS][data['pid']][LAST_SEEN] = datetime.utcnow()\n stash[FEEDS][data['pid']][SEEN] = True\n stash[CHANGED] = True\n\n\ndef extmon(config, stop):\n stash = {FEEDS: {},\n CHANGED: True,\n LASTCHANGE: 0}\n stashlock = Lock()\n\n templatedir = split(config.get(EXTMON2, 'template'))[0]\n templatefile = split(config.get(EXTMON2, 'template'))[1]\n wwwfile = join(config.get(EXTMON2, 'wwwpath'), 'index.html')\n\n feeds_list = config.get(EXTMON2, 'feeds')\n for feed in feeds_list:\n guid = config.get(feed, 'guid')\n stash[FEEDS][guid] = config.get(feed)\n stash[FEEDS][guid][SEEN] = False\n stash[FEEDS][guid][LAST_SEEN] = datetime.utcnow()\n max_age = stash[FEEDS][guid][MAX_AGE] = int(stash[FEEDS][guid][MAX_AGE])\n if WARN_AGE not in stash[FEEDS][guid]:\n stash[FEEDS][guid][WARN_AGE] = max_age * 2\n else:\n stash[FEEDS][guid][WARN_AGE] = int(stash[FEEDS][guid][WARN_AGE])\n if ERROR_AGE not in stash[FEEDS][guid]:\n stash[FEEDS][guid][ERROR_AGE] = max_age * 3\n else:\n stash[FEEDS][guid][ERROR_AGE] = int(stash[FEEDS][guid][ERROR_AGE])\n\n client = IOT.Client(config=config.get(EXTMON2, 'agent'))\n client.register_catchall_feeddata(partial(__feeddata, client, stash, stashlock))\n\n while not stop.is_set():\n with client:\n try:\n thing = client.create_thing(\"extmon2\")\n except:\n logger.error(\"Failed to create_thing(extmon2). Giving up.\")\n stop.set()\n return\n\n with stashlock:\n for guid in stash[FEEDS]:\n try:\n thing.follow(guid)\n except:\n logger.error(\"Failed to follow('%s'). Giving up.\", guid)\n stop.set()\n return\n if NAME not in stash[FEEDS][guid]:\n desc = client.describe(guid)\n if desc is None:\n stash[FEEDS][guid][NAME] = 'No Public Meta GUID: ' + guid\n else:\n stash[FEEDS][guid][NAME] = desc['meta']['label']\n\n while not stop.is_set():\n with stashlock:\n if stash[CHANGED] or monotonic() - stash[LASTCHANGE] >= MINCHANGE:\n logger.debug(\"Stash changed, updating HTML\")\n nowtime = datetime.utcnow()\n stash[LASTCHANGE] = monotonic()\n\n for guid in stash[FEEDS]:\n delta = nowtime - stash[FEEDS][guid][LAST_SEEN]\n delta_secs = delta.total_seconds()\n stash[FEEDS][guid][LASTSEEN] = naturaltime(delta_secs)\n\n if delta_secs < stash[FEEDS][guid][MAX_AGE] and stash[FEEDS][guid][SEEN]:\n stash[FEEDS][guid][CLASS] = 'green'\n elif delta_secs < stash[FEEDS][guid][WARN_AGE]:\n stash[FEEDS][guid][CLASS] = 'yellow'\n else:\n stash[FEEDS][guid][CLASS] = 'red'\n\n if stash[FEEDS][guid][SEEN] is False:\n stash[FEEDS][guid][LASTSEEN] = \"Not seen since restart: \" + stash[FEEDS][guid][LASTSEEN]\n\n j2env = Environment(loader=FileSystemLoader(templatedir), trim_blocks=True)\n with open(wwwfile, 'w') as f:\n f.write(j2env.get_template(templatefile).render(feeds=stash[FEEDS]))\n\n stash[CHANGED] = False\n\n stop.wait(timeout=1)\n\n # If this function ends prematurely ensure stop is set!\n stop.set()\n\n\ndef usage():\n logger.error('Usage: python3 -m Ioticiser ../cfg/example.ini')\n return 1\n\n\ndef main(): # pylint: disable=too-many-return-statements,too-many-branches\n if len(argv) < 2:\n if not exists(argv[1]):\n return usage()\n try:\n cfg = Config(argv[1])\n except:\n logger.exception(\"Failed to load/parse Config file '%s'. Giving up.\", argv[1])\n return 1\n\n wwwpath = cfg.get(EXTMON2, 'wwwpath')\n if wwwpath is None:\n logger.error(\"Config file must have [extmon2] section with wwwpath = /path/to/storage\")\n return 1\n wwwpath = abspath(wwwpath)\n if not exists(wwwpath):\n mkdir(wwwpath)\n elif exists(wwwpath) and not isdir(wwwpath):\n logger.error(\"Config file must have [extmon2] wwpath not a directory\")\n return 1\n\n template = cfg.get(EXTMON2, 'template')\n if template is None or not exists(template):\n logger.error(\"Config file must have [extmon2] section with template = /path/to/file.html\")\n return 1\n\n agent = cfg.get(EXTMON2, 'agent')\n if agent is None or not exists(agent):\n logger.error(\"Config file must have [extmon2] section with agent = /path/to/agent.ini\")\n return 1\n\n feeds_list = cfg.get(EXTMON2, 'feeds')\n if feeds_list is None:\n logger.error(\"Config file must have [extmon2] feeds = \\n\\nFeedeName\\n\\tFeed_Two\")\n return 1\n\n for feed in feeds_list:\n if cfg.get(feed, 'guid') is None or cfg.get(feed, MAX_AGE) is None:\n logger.error(\"Config section for feed [%s] must have guid and max_age\", feed)\n return 1\n\n stop_evt = Event()\n thread = Thread(target=extmon, name='extmon', args=(cfg, stop_evt,))\n thread.start()\n\n if 'IOTIC_BACKGROUND' in environ:\n from signal import signal, SIGINT, SIGTERM\n\n logger.info(\"Started in non-interactive mode.\")\n\n def exit_handler(signum, frame): # pylint: disable=unused-argument\n logger.info('Shutdown requested')\n stop_evt.set()\n\n signal(SIGINT, exit_handler)\n signal(SIGTERM, exit_handler)\n\n while not stop_evt.is_set():\n stop_evt.wait(timeout=5)\n stop_evt.set()\n else:\n try:\n while not stop_evt.is_set():\n logger.info('Enter ctrl+c to exit')\n stop_evt.wait(timeout=600)\n except SystemExit:\n pass\n except KeyboardInterrupt:\n pass\n stop_evt.set()\n\n logger.info(\"Waiting for thread to finish...\")\n thread.join()\n return 0\n\n\nif __name__ == '__main__':\n exit(main())\n","sub_path":"feed_monitor/src/ExtMon2/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":9165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"464386268","text":"\r\nimport sys\r\nimport os\r\n\r\nfrom PyQt5 import QtGui, QtWidgets\r\nfrom CmdGui import Ui_Dialog\r\n\r\n\r\nclass MyFirstGuiProgram(Ui_Dialog):\r\n\r\n def __init__(self, dialog):\r\n Ui_Dialog.__init__(self)\r\n self.setupUi(dialog)\r\n\r\n self.execButton.clicked.connect(self.addInputTextToListbox)\r\n self.ListDrives.clicked.connect(self.listLocalDrives)\r\n self.ListDir.clicked.connect(self.listDirectories)\r\n self.ListFiles.clicked.connect(self.listFilesOnly)\r\n\r\n def addInputTextToListbox(self):\r\n cmd = self.lineEdit.text()\r\n if cmd == \"\":\r\n os.system('echo Please enter a command to execute.')\r\n else:\r\n os.system('cls')\r\n os.system(cmd)\r\n\r\n def listLocalDrives(self):\r\n os.system('cls')\r\n os.system('fsutil fsinfo drives')\r\n\r\n def listDirectories(self):\r\n os.system('cls')\r\n os.system('dir /ad')\r\n\r\n def listFilesOnly(self):\r\n os.system('cls')\r\n os.system('dir /b /oe')\r\n\r\nif __name__ == '__main__':\r\n\r\n app = QtWidgets.QApplication([])\r\n app.setWindowIcon(QtGui.QIcon('hp.ico'))\r\n dialog = QtWidgets.QDialog()\r\n prog = MyFirstGuiProgram(dialog)\r\n dialog.setWindowTitle('CmdButtons')\r\n dialog.show()\r\n\r\n sys.exit(app.exec_())\r\n","sub_path":"CmdButtons1.py","file_name":"CmdButtons1.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"61742860","text":"\n#This file contains 2 models singletask, multitask\n\n\nimport functools\nfrom typing import Optional\n\nimport numpy as np\nimport tree\nfrom gym.spaces import Box, Dict, Discrete, MultiDiscrete, Tuple\nfrom ray.rllib.models.preprocessors import get_preprocessor, Preprocessor\nfrom ray.rllib.models import MODEL_DEFAULTS\nfrom ray.rllib.models.torch.model import TorchModel\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\nfrom ray.rllib.models.base_model import RecurrentModel, Model, ModelIO\nfrom ray.rllib.models.modelv2 import ModelV2\nfrom ray.rllib.models.torch.misc import SlimFC\n\nfrom ray.rllib.models.utils import get_activation_fn\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils.framework import try_import_torch\nfrom vaemodel import Encoder\nfrom ray.rllib.models.torch.complex_input_net import ComplexInputNetwork\nfrom ray.rllib.policy.view_requirement import ViewRequirement\n\nfrom typing import Dict, List, Tuple\nfrom ray.rllib.utils.typing import ModelConfigDict, TensorType\nfrom ray.rllib.policy.rnn_sequencing import add_time_dimension\nimport time\nfrom ray.rllib.models.modelv2 import ModelV2\nfrom ray.rllib.models.torch.misc import (\n normc_initializer,\n same_padding,\n SlimConv2d,\n SlimFC,\n)\nfrom ray.rllib.models.utils import get_activation_fn, get_filter_config\nfrom ray.rllib.models.torch.recurrent_net import RecurrentNetwork as TorchRNN\n\ntorch, nn = try_import_torch()\n\n\n# The global, shared layer to be used by both models.\n# this model outputs a 512 latent dimension\n\nBEOGYM_GLOBAL_SHARED_BACKBONE= Encoder(channels=5, ch=32, z=512)\n#if using lstm this could be used:\n#TORCH_GLOBAL_SHARED_BACKBONE= VAE(channel_in=1, ch=32, z=512)\n\nBEOGYM_GLOBAL_SHARED_POLICY = SlimFC(\n 64,\n 5,\n activation_fn=nn.ReLU,\n initializer=torch.nn.init.xavier_uniform_,\n)\n\n#this is class is used when we are working with a single game\nclass SingleBeogymModel(TorchModelV2, nn.Module):\n\n\n def __init__(\n self, observation_space, action_space, num_outputs, model_config, name\n ):\n TorchModelV2.__init__(\n self, observation_space, action_space, num_outputs, model_config, name\n )\n nn.Module.__init__(self)\n\n\n\n\nclass SharedBackboneAtariModel(ComplexInputNetwork):\n def __init__(self, observation_space, action_space, num_outputs, model_config, name):\n super().__init__(observation_space, action_space, num_outputs, model_config, name)\n\n\n\n\n\n\nclass LSTM2Network(TorchRNN, nn.Module):\n \"\"\"A conv. + recurrent torch net example using a pre-trained MobileNet.\"\"\"\n\n def __init__(\n self, obs_space, action_space, num_outputs, model_config, name\n ):\n\n TorchRNN.__init__(\n self, obs_space, action_space, num_outputs, model_config, name\n )\n nn.Module.__init__(self)\n self.lstm_state_size = 256\n self.cnn_shape = [84,84,3]\n # self.cnn_shape = list(cnn_shape)\n self.visual_size_in = self.cnn_shape[0] * self.cnn_shape[1] * self.cnn_shape[2]\n # MobileNetV2 has a flat output of (1000,).\n self.visual_size_out = 512\n\n layers = []\n if not model_config.get(\"conv_filters\"):\n model_config[\"conv_filters\"] = get_filter_config(obs_space.shape)\n filters = self.model_config[\"conv_filters\"]\n assert len(filters) > 0, \"Must provide at least 1 entry in `conv_filters`!\"\n filters = [[16, 3, 2], [32, 3, 2], [64, 3, 2], [128, 3, 2], [256, 3, 2], [512,3,1]]\n\n (w, h, in_channels) = (84,84,3)\n in_size = [w, h]\n for out_channels, kernel, stride in filters[:-1]:\n padding, out_size = same_padding(in_size, kernel, stride)\n layers.append(\n SlimConv2d(\n in_channels,\n out_channels,\n kernel,\n stride,\n padding,\n activation_fn='relu',\n )\n )\n in_channels = out_channels\n in_size = out_size\n\n out_channels, kernel, stride = filters[-1]\n\n layers.append(\n SlimConv2d(\n in_channels,\n out_channels,\n kernel,\n stride,\n None, # padding=valid\n activation_fn='relu',\n )\n )\n\n self.cnn_model = nn.Sequential(*layers)\n\n\n\n self.lstm = nn.LSTM(\n self.visual_size_out+2, self.lstm_state_size, batch_first=True\n )\n\n\n self.final_lstm = nn.LSTM(\n 258, self.lstm_state_size, batch_first=True\n )\n\n\n self.logits = SlimFC(\n in_size=self.lstm_state_size,\n out_size=self.num_outputs,\n activation_fn=None,\n initializer=torch.nn.init.xavier_uniform_,\n )\n self.value_branch = SlimFC(\n in_size=self.lstm_state_size,\n out_size=1,\n activation_fn=None,\n initializer=torch.nn.init.xavier_uniform_,\n )\n\n\n # Holds the current \"base\" output (before logits layer).\n self._features = None\n self.view_requirements[SampleBatch.PREV_ACTIONS] = ViewRequirement(\n SampleBatch.ACTIONS, space=self.action_space, shift=-1\n )\n self.view_requirements[SampleBatch.PREV_REWARDS] = ViewRequirement(\n SampleBatch.REWARDS, shift=-1\n )\n\n @override(ModelV2)\n def forward(\n self,\n input_dict: Dict[str, TensorType],\n state: List[TensorType],\n seq_lens: TensorType,\n ) -> Tuple[TensorType, List[TensorType]]:\n \n \"\"\"Adds time dimension to batch before sending inputs to forward_rnn().\n\n You should implement forward_rnn() in your subclass.\"\"\"\n flat_inputs = torch.cat((input_dict[\"obs\"]['obs'].view(input_dict[\"obs\"]['obs'].shape[0], -1), input_dict[\"obs\"]['aux'].view(input_dict[\"obs\"]['aux'].shape[0], -1)), dim=1).float()\n # flat_inputs = input_dict[\"obs\"]['obs'].view(input_dict[\"obs\"]['obs'].shape[0], -1).float()\n # Note that max_seq_len != input_dict.max_seq_len != seq_lens.max()\n # as input_dict may have extra zero-padding beyond seq_lens.max().\n # Use add_time_dimension to handle this\n assert seq_lens is not None\n rew=torch.reshape(input_dict[SampleBatch.PREV_REWARDS].float(), [-1, 1])\n act=torch.reshape(input_dict[SampleBatch.PREV_ACTIONS].float(), [-1, 1])\n aux=torch.cat((rew,act), dim=1)\n flat_inputs = torch.cat((flat_inputs,aux), dim=1)\n self.time_major = self.model_config.get(\"_time_major\", False)\n inputs = add_time_dimension(\n flat_inputs,\n seq_lens=seq_lens,\n framework=\"torch\",\n time_major=self.time_major,\n )\n output, new_state = self.forward_rnn(inputs, state, seq_lens,aux)\n output = torch.reshape(output, [-1, self.num_outputs])\n # print(input_dict['prev_rewards'])\n # print(input_dict[SampleBatch.PREV_REWARDS].unsqueeze(dim=1).shape)\n return output, new_state\n\n\n\n @override(TorchRNN)\n def forward_rnn(self, inputs, state, seq_lens,aux):\n # Create image dims.\n info_in = inputs[:, :,84*84*3:84*84*3+2]\n ar_in = inputs[:, :,84*84*3+2:]\n vision_in = inputs[:, :, :84*84*3]\n vision_in = torch.reshape(vision_in, [-1] + self.cnn_shape)\n vision_in = vision_in.permute(0, 3, 1, 2)\n vision_out = self.cnn_model(vision_in)\n # Flatten.\n vision_out = vision_out.squeeze(2)\n vision_out = vision_out.squeeze(2)\n vision_out_time_ranked = torch.reshape(\n vision_out, [inputs.shape[0], inputs.shape[1], vision_out.shape[-1]]\n )\n if len(state[0].shape) == 2:\n state[0] = state[0].unsqueeze(0)\n state[1] = state[1].unsqueeze(0)\n state[2] = state[2].unsqueeze(0)\n state[3] = state[3].unsqueeze(0)\n # Forward through LSTM.\n vision_out_time_ranked = torch.cat((vision_out_time_ranked,info_in), dim=2)\n self._features, [h, c] = self.lstm(vision_out_time_ranked, [state[0], state[1]])\n # Forward LSTM out through logits layer and value layer.\n self._features = torch.cat((self._features,ar_in), dim=2)\n self._features, [fh, fc] = self.final_lstm(self._features, [state[2], state[3]])\n logits = self.logits(self._features)\n return logits, [h.squeeze(0), c.squeeze(0),fh.squeeze(0), fc.squeeze(0)]\n\n @override(ModelV2)\n def get_initial_state(self):\n # Place hidden states on same device as model.\n linear = next(self.logits._model.children())\n h = [\n linear.weight.new(1, self.lstm_state_size).zero_().squeeze(0),\n linear.weight.new(1, self.lstm_state_size).zero_().squeeze(0),\n linear.weight.new(1, self.lstm_state_size).zero_().squeeze(0), \n linear.weight.new(1, self.lstm_state_size).zero_().squeeze(0)\n ]\n return h\n\n\n @override(ModelV2)\n def value_function(self):\n assert self._features is not None, \"must call forward() first\"\n return torch.reshape(self.value_branch(self._features), [-1])","sub_path":"models/beogymmodels.py","file_name":"beogymmodels.py","file_ext":"py","file_size_in_byte":9256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"158247907","text":"from flask import Blueprint, flash, render_template, request, session, abort, \\\n redirect, url_for\nimport requests\nimport os\nfrom app import app\n\nmod_pages = Blueprint('pages', __name__, url_prefix='/pages')\n\n@mod_pages.route(\"/games\")\ndef games():\n return render_template('pages/games.html')\n\n@app.route(\"/\")\n@mod_pages.route(\"/asian-beauty\")\ndef asian_beauty():\n print(os.environ)\n api_key = os.environ['TUMBLR_API_KEY']\n r = requests.get(\"http://api.tumblr.com/v2/blog/xkcn.tumblr.com/posts/photo?api_key=\" + api_key + \"&limit=10\")\n return render_template('pages/asian-beauty.html', images=r.json()['response']['posts'])","sub_path":"app/mod_pages/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"431233977","text":"from glyphtools import get_glyph_metrics, categorize_glyph\nfrom fontFeatures import ValueRecord, Attachment, Positioning, Chaining, Routine\nimport warnings\n\n\ndef add_value_records(vr1, vr2):\n if vr1.xPlacement or vr2.xPlacement:\n vr1.xPlacement = (vr1.xPlacement or 0) + (vr2.xPlacement or 0)\n if vr1.yPlacement or vr2.yPlacement:\n vr1.yPlacement = (vr1.yPlacement or 0) + (vr2.yPlacement or 0)\n if vr1.xAdvance or vr2.xAdvance:\n vr1.xAdvance = (vr1.xAdvance or 0) + (vr2.xAdvance or 0)\n if vr1.yAdvance or vr2.yAdvance:\n vr1.yAdvance = (vr1.yAdvance or 0) + (vr2.yAdvance or 0)\n\n\nclass JankyPos:\n def __init__(self, font, direction=\"LTR\"):\n self.font = font\n self.direction = direction\n\n def serialize_buffer(self, buf):\n \"\"\"Returns the contents of the given buffer in a string format similar to\n that used by hb-shape.\"\"\"\n outs = []\n for info in buf:\n position = info[\"position\"]\n outs.append(\"%s\" % info[\"glyph\"])\n outs[-1] = outs[-1] + \"+%i\" % position.xAdvance\n if position.xPlacement != 0 or position.yPlacement != 0:\n outs[-1] = outs[-1] + \"@<%i,%i>\" % (\n position.xPlacement or 0,\n position.yPlacement or 0,\n )\n return \"|\".join(outs)\n\n def positioning_buffer(self, glyphstring):\n return [\n {\n \"glyph\": g,\n \"position\": ValueRecord(\n xAdvance=get_glyph_metrics(self.font, g)[\"width\"],\n ),\n \"category\": categorize_glyph(self.font, g),\n }\n for g in glyphstring\n ]\n\n def process_fontfeatures(self, buf, ff):\n features = [\"rvrn\"]\n if self.direction == \"LTR\":\n features.extend([\"ltra\", \"ltrm\"])\n elif self.direction == \"RTL\":\n features.extend([\"rtla\", \"rtlm\"])\n features.extend([\"frac\", \"numr\", \"dnom\", \"rand\"])\n features.extend([\"abvm\", \"blwm\", \"ccmp\", \"locl\", \"mark\", \"mkmk\", \"rlig\"])\n if self.direction == \"LTR\" or self.direction == \"RTL\":\n features.extend([\"calt\", \"clig\", \"curs\", \"dist\", \"kern\", \"liga\", \"rclt\"])\n else:\n features.extend([\"vert\"])\n\n for f in features:\n if f not in ff.features:\n continue\n for r in ff.features[f]:\n if isinstance(r, Routine):\n buf = self.process_rules(buf, r.rules)\n else:\n buf = self.process_rules(buf, [r])\n if self.direction == \"RTL\":\n buf = list(reversed(buf))\n return buf\n\n def process_rules(self, buf, rules):\n for r in rules:\n if isinstance(r, Positioning):\n if len(r.glyphs) == 1:\n buf = self.position_one(buf, r)\n else:\n continue # XXX\n elif isinstance(r, Attachment):\n if r.is_cursive:\n buf = self.attach_cursive(buf, r)\n else:\n buf = self.attach(buf, r)\n elif isinstance(r, Chaining):\n buf = self.chain(buf, r)\n else:\n continue\n return buf\n\n def chain(self, buf, rule):\n # XXXX\n return buf\n\n def position_one(self, buf, rule):\n applicable_range = range(\n 0 + len(rule.precontext), len(buf) - len(rule.postcontext)\n )\n assert len(rule.glyphs) == 1\n for i in applicable_range:\n g, vr = buf[i][\"glyph\"], buf[i][\"position\"]\n if rule.precontext or rule.postcontext:\n pre = [x[\"glyph\"] for x in buf[i - len(rule.precontext) + 1 : i]]\n post = [x[\"glyph\"] for x in buf[i + 1 : i + len(rule.postcontext) + 1]]\n if tuple(pre) != tuple(rule.precontext) or tuple(post) != tuple(\n rule.postcontext\n ):\n continue\n if g not in rule.glyphs[0]:\n continue\n add_value_records(vr, rule.valuerecords[0])\n return buf\n\n def attach(self, buf, rule):\n for ix, info in enumerate(buf):\n g = info[\"glyph\"]\n vr = info[\"position\"]\n if ix == 0:\n continue\n # XXX search backwards until you find a base\n # XXX Unless we are doing mkmk\n previous = ix - 1\n while previous > 0 and buf[previous][\"category\"][0] != \"base\":\n previous = previous - 1\n prev = buf[previous][\"glyph\"]\n prevVr = buf[previous][\"position\"]\n if g in rule.marks and ix > 0 and prev in rule.bases:\n xpos = rule.bases[prev][0] - rule.marks[g][0]\n ypos = rule.bases[prev][1] - rule.marks[g][1]\n vr.xPlacement = (vr.xPlacement or 0) + xpos\n vr.yPlacement = (vr.yPlacement or 0) + ypos\n if self.direction == \"LTR\":\n vr.xPlacement = (vr.xPlacement or 0) - prevVr.xAdvance\n return buf\n\n def attach_cursive(self, buf, rule):\n for j, info in enumerate(buf):\n g = info[\"glyph\"]\n vr = info[\"position\"]\n if j == 0 or buf[j][\"category\"][0] != \"base\":\n continue\n i = j - 1\n while i > 0 and buf[i][\"category\"][0] != \"base\":\n i = i - 1\n\n # Get entry anchor for i and exit anchor for i\n prev = buf[i][\"glyph\"]\n if g not in rule.bases or not prev in rule.marks:\n continue\n exit_x, exit_y = rule.marks[prev]\n entry_x, entry_y = rule.bases[g]\n if self.direction == \"RTL\":\n d = exit_x + (buf[i][\"position\"].xPlacement or 0)\n buf[i][\"position\"].xAdvance = (buf[i][\"position\"].xAdvance or 0) - d\n buf[i][\"position\"].xPlacement = (buf[i][\"position\"].xPlacement or 0) - d\n buf[j][\"position\"].xAdvance = entry_x + (\n buf[j][\"position\"].xPlacement or 0\n )\n else:\n raise ValueError\n child = i\n parent = j\n x_offset = entry_x - exit_x\n y_offset = entry_y - exit_y\n if True or not (rule.flags & 1): # LeftToRight XXX\n parent, child = child, parent\n x_offset = -x_offset\n y_offset = -y_offset\n buf[child][\"position\"].yPlacement = (\n buf[parent][\"position\"].yPlacement or 0\n ) + y_offset\n return buf\n\n\nif __name__ == \"__main__\":\n import sys\n from fontFeatures.ttLib import unparse\n from fontTools.ttLib import TTFont\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Test janky positioning\")\n parser.add_argument(\"font\", metavar=\"FONT\", help=\"font file\")\n parser.add_argument(\"glyphs\", metavar=\"GLYPHS\", help=\"glyph string\")\n parser.add_argument(\"--direction\", action=\"store\", help=\"direction\")\n\n args = parser.parse_args()\n font = TTFont(args.font)\n glyphs = args.glyphs.split()\n ff = unparse(font)\n janky = JankyPos(font)\n if args.direction:\n janky.direction = args.direction\n buf = janky.positioning_buffer(glyphs)\n buf = janky.process_fontfeatures(buf, ff)\n print(janky.serialize_buffer(buf))\n","sub_path":"fontFeatures/jankyPOS.py","file_name":"jankyPOS.py","file_ext":"py","file_size_in_byte":7430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"357904894","text":"'''\nTest code for Modeling Epidemics\n\nEmma Nechamkin and Anne Rogers\nJuly 2018\n\nBorja Sotomayor\nSeptember 2018, 2020\n\nAnne Rogers\nJuly 2019, July 2021\n'''\n\nimport json\nimport os\nimport sys\nimport random\n\nimport pytest\n\nimport sir\n\n### TODO: question: can the call to sys.path go away?\n\n# Handle the fact that the grading code may not\n# be in the same directory as sir.py\nsys.path.append(os.getcwd())\n\n# Get the name of the directory that holds the grading code.\nBASE_DIR = os.path.dirname(__file__)\nTEST_DIR = os.path.join(BASE_DIR, \"tests\")\n\n\n###### Test utility funtions ######\ndef gen_check_rand_calls(seed, max_num_calls):\n '''\n Generate a function that can be used to check whether the correct\n number of calls were made to the random number generator.\n\n Inputs:\n seed (int): the seed for the random number generator\n max_num_calls (int): the maximum number of calls to\n random.random() that will need to be verified.\n\n Returns: function of one variable\n '''\n\n random.seed(seed)\n rand_vals = [random.random() for i in range(max_num_calls+1)]\n\n def check(expected_num_calls):\n '''\n Check whether the expected number of calls to random.random()\n were made.\n\n Inputs:\n expected_num_calls: the number of calls that should have\n been made.\n\n Returns: boolean that will be True if the check succeeded and\n False otherwise and, if necessary, an error message as a\n string.\n '''\n assert expected_num_calls <= max_num_calls\n\n # Make a call to random.\n actual_r = random.random()\n expected_r = rand_vals[expected_num_calls]\n\n # Did the call to random yield the expected value?\n if actual_r == pytest.approx(expected_r):\n return True, None\n\n for i, rand_val in enumerate(rand_vals):\n if actual_r == pytest.approx(rand_val):\n if i < expected_num_calls:\n return False, \"Not enough calls to random.random()\"\n return False, \"Too many calls to random.random()\"\n\n return False, \"Incorrect number of calls to random.random()\"\n\n return check\n\n\nCHECK_RAND_20170217 = gen_check_rand_calls(20170217, 100)\n\n\ndef convert_city(params, city_key=\"city\"):\n \"\"\"\n Convert the representation of the individual people in the city\n from lists to tuples.\n\n Args:\n params (dictionary): the parameters for a given test\n city_key (str, optional): Defaults to \"city\".\n\n Returns:\n [list of tuples]: returns a list of person tuples\n \"\"\"\n return [tuple(p) for p in params[city_key]]\n\n\ndef read_config_file(filename):\n '''\n Load the test cases from a JSON file.\n\n Inputs:\n filename (string): the name of the test configuration file.\n\n Returns: (list) test cases\n '''\n\n with open(os.path.join(TEST_DIR, filename)) as f:\n tests = json.load(f)\n\n for t in tests:\n if \"city\" not in t:\n break\n t[\"city\"] = convert_city(t)\n\n # The test numbers start at 1\n return zip(range(len(tests)), tests)\n\n\ndef check_sequence(recreate_msg, actual, expected):\n \"\"\"\n Do a few standard checks for sequences: (1) Does the actual value have the\n expected length, (2) do the individual elements of the actual value have the\n right types, and (3) do the actual and expected values match.\n\n Args:\n recreate_msg (string): explains how to rerun the test in ipython3\n actual (tuple or list): the actual value\n expected (typle or list): the expected value\n \"\"\"\n\n expected_type_str = \"tuple\" if isinstance(expected, tuple) else \"list\"\n\n msg = \"Result should be a {} of length {}\\n\"\n msg += recreate_msg\n msg = msg.format(expected_type_str, len(expected))\n\n # check length\n assert len(actual) == len(expected), msg\n\n # check element types\n type_msg = \"Element {} in the {} should have type {}\\n\"\n type_msg += recreate_msg\n\n for i, (a, e) in enumerate(zip(actual, expected)):\n assert isinstance(a, type(e)), \\\n type_msg.format(i, expected_type_str, type(e))\n\n # check whole value\n val_msg = \"Actual ({}) and expected ({}) values do not match an index {}.\\n\"\n val_msg += \" Actual: {}\\n\"\n val_msg += \" Expected: {}\\n\"\n val_msg += recreate_msg\n assert actual == expected, val_msg\n\n\ndef check_result(recreate_msg, actual, expected):\n \"\"\"\n Verify that the actual value return from a function matches the\n expected value.\n\n Args:\n recreate_msg (string): describes how to recreate the test in ipython3\n actual (some type): actual value.\n expected (some type): expected value\n \"\"\"\n msg = \"The function returned None.\"\n msg += \" Did you forget to include a return statement?\\n\"\n assert actual is not None, \\\n msg + recreate_msg + \"\\n\"\n\n msg = \"The function returned a value of the wrong type.\\n\"\n msg += \" Expected return type: {}.\\n\"\n msg += \" Actual return type: {}.\\n\"\n msg += recreate_msg + \"\\n\"\n assert isinstance(actual, type(expected)), \\\n msg.format(type(expected), type(actual))\n\n if isinstance(expected, (tuple, list)):\n # do some quick length and element type checks on the sequence\n check_sequence(recreate_msg, actual, expected)\n else:\n # check the values.\n msg = \"Actual ({}) and expected ({}) values do not match.\\n\"\n msg += recreate_msg + \"\\n\"\n assert actual == expected, \\\n msg.format(actual, expected)\n\n\n\n\n###### Task: has_an_infected_neighbor ######\ndef __test_has_an_infected_neighbor(test_params, is_test6):\n '''\n Test harness for has_an_infected_neighbor\n\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n city, index of a location in the city,\n the expected result\n '''\n\n test_num, params = test_params\n\n city = params[\"city\"]\n city_copy = city[:]\n actual = sir.has_an_infected_neighbor(city, params[\"location\"])\n\n recreate_msg = (\"See the information for Task {}1, Test {} to \"\n \"see how to recreate this test.\")\n recreate_msg = recreate_msg.format(\"6-\" if is_test6 else \"\", test_num)\n\n expected = params[\"expected\"]\n check_result(recreate_msg, actual, expected)\n\n # Extra check\n assert city_copy == city, \\\n \"\\nDo not modify the input city!\\n\" + recreate_msg\n\n@pytest.mark.parametrize(\n \"test_params\",\n read_config_file(\"has_infected_neighbor_tests.json\"))\ndef test_has_an_infected_neighbor(test_params):\n \"\"\"\n Test for has_an_infected_neighbor (no vaxxed)\n\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n city, index of a location in the city,\n the expected result\n \"\"\"\n __test_has_an_infected_neighbor(test_params, False)\n\n\n@pytest.mark.vax\n@pytest.mark.parametrize(\n \"test_params\",\n read_config_file(\"vax_has_infected_neighbor_tests.json\"))\ndef test_vax_has_an_infected_neighbor(test_params):\n \"\"\"\n Test for has_an_infected_neighbor with vaxxed\n\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n city, index of a location in the city,\n the expected result\n \"\"\"\n __test_has_an_infected_neighbor(test_params, True)\n\n\n\n###### Task : Advance person at location ######\ndef __test_advance_person_at_location(test_params, is_test6):\n \"\"\"\n Test harness for advance_person_at_location\n\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n seed, city, index of location in the city,\n infection rate, number of days contagious\n expected result\n \"\"\"\n\n test_num, params = test_params\n\n city = params[\"city\"]\n city_copy = city[:]\n\n actual = sir.advance_person_at_location(city,\n params[\"location\"],\n params[\"days_contagious\"])\n\n recreate_msg = (\"See the information for Task 2, Test {} to \"\n \"see how to recreate this test.\")\n recreate_msg = recreate_msg.format(\"6-\" if is_test6 else \"\", test_num)\n\n # convert expected to the correct type\n expected = tuple(params[\"expected\"])\n check_result(recreate_msg, actual, expected)\n\n # Extra checks\n assert city_copy == city, \\\n \"\\nDo not modify the city!\\n\" + recreate_msg\n\n\n@pytest.mark.parametrize(\n \"test_params\",\n read_config_file(\"advance_person_tests.json\"))\ndef test_advance_person_at_location(test_params):\n \"\"\"\n Test for advance_person_at_location, no vaxxed people.\n\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n seed, city, index of location in the city,\n infection rate, number of days contagious\n expected result\n \"\"\"\n __test_advance_person_at_location(test_params, False)\n\n\n@pytest.mark.vax\n@pytest.mark.parametrize(\n \"test_params\",\n read_config_file(\"vax_advance_person_tests.json\"))\ndef test_vax_advance_person_at_location(test_params):\n \"\"\"\n Test for advance_person_at_location, some vaxxed people.\n\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n seed, city, index of location in the city,\n infection rate, number of days contagious\n expected result\n \"\"\"\n __test_advance_person_at_location(test_params, True)\n\n\n###### Task: Move simulation forward one day ######\n@pytest.mark.parametrize(\n \"test_params\",\n read_config_file(\"simulate_one_day_tests.json\"))\ndef __test_simulate_one_day(test_params, is_test6):\n \"\"\"\n Test harness for simulate_one_day\n\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n seed, city, infection rate, number of days contagious,\n expected result\n \"\"\"\n\n test_num, params = test_params\n\n city = params[\"city\"]\n city_copy = city[:]\n\n actual = sir.simulate_one_day(city,\n params[\"days_contagious\"])\n\n recreate_msg = (\"See the information for Task 3, Test {} to \"\n \"see how to recreate this test.\")\n recreate_msg = recreate_msg.format(\"6-\" if is_test6 else \"\", test_num)\n\n # Fix the type of the people in the city\n expected = convert_city(params, \"expected\")\n check_result(recreate_msg, actual, expected)\n\n # Extra check\n assert city_copy == city, \\\n \"\\nDo not modify the input city!\\n\" + recreate_msg\n\n@pytest.mark.parametrize(\n \"test_params\",\n read_config_file(\"simulate_one_day_tests.json\"))\ndef test_simulate_one_day(test_params):\n \"\"\"\n Test for simulate_one_day, no vaxxed people\n\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n seed, city, infection rate, number of days contagious,\n expected result\n \"\"\"\n __test_simulate_one_day(test_params, False)\n\n\n@pytest.mark.vax\n@pytest.mark.parametrize(\n \"test_params\",\n read_config_file(\"vax_simulate_one_day_tests.json\"))\ndef test_vax_simulate_one_day(test_params):\n \"\"\"\n Test for simulate_one_day, some vaxxed people\n\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n seed, city, infection rate, number of days contagious,\n expected result\n \"\"\"\n __test_simulate_one_day(test_params, True)\n\n\n\n###### Task: check stopping condition ######\ndef __test_is_transmission_possible(test_params, is_test6):\n \"\"\"\n Test harness for is_transmission_possible function\n\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n city and the expected result\n \"\"\"\n\n test_num, params = test_params\n\n city = params[\"city\"]\n city_copy = city[:]\n\n actual = sir.is_transmission_possible(city)\n\n recreate_msg = (\"See the information for Task 4, Test {} to \"\n \"see how to recreate this test.\")\n recreate_msg = recreate_msg.format(\"6-\" if is_test6 else \"\", test_num)\n\n expected = params[\"expected\"]\n check_result(recreate_msg, actual, expected)\n\n # Extra check\n assert city_copy == city, \\\n \"\\nDo not modify the input city!\\n\" + recreate_msg\n\n\n@pytest.mark.parametrize(\n \"test_params\",\n read_config_file(\"is_transmission_possible.json\"))\ndef test_is_transmission_possible(test_params):\n \"\"\"\n Test harness for is_transmission_possible function no vaxxed people.\n\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n \"\"\"\n\n __test_is_transmission_possible(test_params, False)\n\n\n@pytest.mark.vax\n@pytest.mark.parametrize(\n \"test_params\",\n read_config_file(\"vax_is_transmission_possible.json\"))\ndef test_vax_is_transmission_possible(test_params):\n \"\"\"\n Test harness for is_transmission_possible function with some vaxxed\n folks added in.\n\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n \"\"\"\n\n __test_is_transmission_possible(test_params, True)\n\n\n###### Task: run simulation over multiple days ######\ndef __test_run_simulation(test_params, is_test6):\n \"\"\"\n Test harness for run_simulation\n\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n city, the number of days contagious, and the\n expected result\n \"\"\"\n\n test_num, params = test_params\n\n starting_city = params[\"city\"]\n city_copy = starting_city[:]\n\n actual = sir.run_simulation(starting_city,\n params[\"days_contagious\"])\n\n recreate_msg = (\"See the information for Task 5, Test {} to \"\n \"see how to recreate this test.\")\n recreate_msg = recreate_msg.format(\"6-\" if is_test6 else \"\", test_num)\n\n # convert the type pf the people in the city\n expected = params[\"expected\"]\n expected = ([tuple(p) for p in expected[0]], expected[1])\n check_result(recreate_msg, actual, expected)\n\n # Extra check\n assert city_copy == starting_city, \\\n \"\\nDo not modify the input city!\\n\" + recreate_msg\n\n\n@pytest.mark.parametrize(\n \"test_params\",\n read_config_file(\"run_simulation_tests.json\"))\ndef test_run_simulation(test_params):\n \"\"\"\n Test run_simulation, no vaxxed people\n\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n city, the number of days contagious, and the\n expected result\n \"\"\"\n __test_run_simulation(test_params, False)\n\n@pytest.mark.vax\n@pytest.mark.parametrize(\n \"test_params\",\n read_config_file(\"vax_run_simulation_tests.json\"))\ndef test_vax_run_simulation(test_params):\n \"\"\"\n Test run_simulation, some vaxxed people\n\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n city, the number of days contagious, and the\n expected result\n \"\"\"\n __test_run_simulation(test_params, True)\n\n\n###### Task: vaccinate a person ######\n@pytest.mark.parametrize(\n \"test_params\",\n read_config_file(\"vaccinate_person.json\"))\ndef test_vaccinate_person(test_params):\n \"\"\"\n Test vaccinate_person\n\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n the seed, an augmented person, the expected result, and the expected\n number of calls to random.\n \"\"\"\n test_num, params = test_params\n\n # set the seed\n random.seed(params[\"seed\"])\n\n # use the same seed for all the tests\n if params[\"seed\"] == sir.TEST_SEED:\n # Use pre-generated random checking function\n check_rand = CHECK_RAND_20170217\n else:\n check_rand = gen_check_rand_calls(params[\"seed\"], 100)\n\n aug_person = tuple(params[\"aug_person\"])\n\n recreate_msg = (\"See the information for Task 7, Test {} to \"\n \"see how to recreate this test.\")\n recreate_msg = recreate_msg.format(test_num)\n\n actual = sir.vaccinate_person(aug_person)\n\n expected = tuple(params[\"expected\"])\n check_result(recreate_msg, actual, expected)\n\n # Extra checks\n (check, msg) = check_rand(params[\"num_rand_calls\"])\n assert check, msg + \"\\n\" + recreate_msg\n\n###### Task: vaccinate a city ######\n@pytest.mark.parametrize(\n \"test_params\",\n read_config_file(\"vaccinate_city.json\"))\ndef test_vaccinate_city(test_params):\n \"\"\"\n Test vaccinate_city\n Inputs:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n a seed, a list of augmented persons, and the expected result city.\n \"\"\"\n\n test_num, params = test_params\n\n city = params[\"city\"]\n city_copy = city[:]\n expected_num_rand_calls = len([p[0] for p in city if p[0] == \"S\"])\n\n # use the same seed for all the tests\n if params[\"seed\"] == sir.TEST_SEED and expected_num_rand_calls < 100:\n # Use pre-generated random checking function\n check_rand = CHECK_RAND_20170217\n else:\n check_rand = gen_check_rand_calls(params[\"seed\"],\n expected_num_rand_calls)\n\n actual = sir.vaccinate_city(city, params[\"seed\"])\n\n recreate_msg = (\"See the information for Task 8, Test {} to \"\n \"see how to recreate this test.\")\n recreate_msg = recreate_msg.format(test_num)\n\n expected = convert_city(params, \"expected\")\n check_result(recreate_msg, actual, expected)\n\n # Extra checks\n assert city == city_copy, \\\n \"\\nDo not modify the input city!\\n\" + recreate_msg\n\n # one call to random expected for each susceptible person.\n (check, msg) = check_rand(expected_num_rand_calls)\n assert check, msg + \"\\n\" + recreate_msg\n\n\n###### Task: Combine vaccination and simulation ######\n@pytest.mark.parametrize(\n \"test_params\",\n read_config_file(\"vaccinate_and_simulate.json\"))\ndef test_vaccinate_and_simulate(test_params):\n \"\"\"\n Test vax_and_simulate\n\n Args:\n test_params (int, dictionary): the test number and the test\n parameters dictionary:\n augmented persons, and the expected result.\n \"\"\"\n\n test_num, params = test_params\n city = params[\"city\"]\n days_contagious = params[\"days_contagious\"]\n seed = params[\"seed\"]\n\n actual = sir.vaccinate_and_simulate(city, days_contagious, seed)\n\n recreate_msg = (\"See the information for Task 9, Test {} to \"\n \"see how to recreate this test.\")\n recreate_msg = recreate_msg.format(test_num)\n\n # convert the expected city to have the right type\n expected = params[\"expected\"]\n expected = ([tuple(p) for p in expected[0]], expected[1])\n check_result(recreate_msg, actual, expected)\n","sub_path":"test_sir.py","file_name":"test_sir.py","file_ext":"py","file_size_in_byte":18940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"72699916","text":"\"\"\"\nDescription:\n============\nThis module contains the mpiReader object classes for the TESLaCU package.\nIt should not be imported unless \"__main__\" has been executed with MPI.\n\nNotes:\n======\n\nIndexing convention:\n--------------------\nSince TESLa has mostly worked in MATLAB and Fortran, it is common for us to\nthink in terms of column-major index order, i.e., [x1, x2, x3], where x1 is\ncontiguous in memory and x3 is always the inhomogenous dimension in the\nAthena-RFX flame geometry.\nHowever, Python and C/C++ are natively row-major index order, i.e.\n[x3, x2, x1], where x1 remains contiguous in memory and x3 remains the\ninhomogenous dimension.\n\nCoding Style Guide:\n-------------------\nThis module generally adheres to the Python style guide published in PEP 8,\nwith the following notable exceptions:\n- Warning W503 (line break occurred before a binary operator) is ignored\n- Error E129 (visually indented line with same indent as next logical line)\n is ignored\n- Error E225 (missing whitespace around operator) is ignored\n\nFor more information see \n\nDefinitions:\n============\n\nAuthors:\n========\nColin Towery\n\nTurbulence and Energy Systems Laboratory\nDepartment of Mechanical Engineering\nUniversity of Colorado Boulder\nhttp://tesla.colorado.edu\n\"\"\"\n\nfrom mpi4py import MPI\nimport numpy as np\nimport sys\n# from vtk import vtkStructuredPointsReader\n# from vtk.util import numpy_support as vn\n\n__all__ = []\n\n\ndef factory(mpi_comm=MPI.COMM_WORLD, idir='./', ndims=3, decomp=None,\n fnx=None, anx=None, nh=None, periodic=None, ftype='binary'):\n \"\"\"\n The factory() function is a \"class factory\" which returns the\n appropriate mpi-parallel reader class instance based upon the\n inputs. Each subclass contains a different ...\n\n Arguments:\n\n Output:\n \"\"\"\n if MPI.COMM_WORLD.rank == 0:\n print('mpiReader.factory() not yet written!')\n MPI.Finalize()\n sys.exit(1)\n# -----------------------------------------------------------------------------\n\n\nclass mpiBinaryReader(object):\n \"\"\"\n class mpiBinaryReader\n\n Development notes:\n - this thing is pretty much not developed at all.\n \"\"\"\n\n def __init__(self, mpi_comm=MPI.COMM_WORLD, idir='./', ndims=3,\n decomp=None, nx=None, nh=None, periodic=None, byteswap=True):\n\n # DEFINE THE INSTANCE VARIABLES\n\n # \"Protected\" variables masked by property method\n # Global variables\n self.__idir = idir\n self.__comm = mpi_comm\n self.__ndims = ndims\n self.__byteswap = byteswap\n\n if decomp is None:\n decomp = list([True, ])\n decomp.extend([False]*(ndims-1))\n self.__decomp = decomp\n elif len(decomp) == ndims:\n self.__decomp = decomp\n else:\n raise IndexError(\"Either len(decomp) must be ndims or \"\n \"decomp must be None\")\n\n if nx is None:\n self.__nx = np.array([512]*ndims, dtype=int)\n elif len(nx) == ndims:\n self.__nx = np.array(nx, dtype=int) # \"analysis nx\"\n else:\n raise IndexError(\"Either len(nx) must be ndims or nx \"\n \"must be None\")\n\n if nh is None:\n self.__nh = np.zeros(ndims, dtype=int)\n elif len(nh) == ndims:\n self.__nh = np.array(nh, dtype=int)\n else:\n raise IndexError(\"Either len(nh) must be ndims or nh \"\n \"must be None\")\n\n if periodic is None:\n self.__periodic = tuple([False]*ndims)\n elif len(periodic) == ndims:\n self.__periodic = tuple(periodic)\n else:\n raise IndexError(\"Either len(periodic) must be ndims or \"\n \"periodic must be None\")\n\n # Local subdomain variables\n self.__nnx = self.__nx.copy()\n self.__ixs = np.zeros(ndims, dtype=int)\n self.__ixe = self.__nx.copy()\n\n if sum(self.__decomp) == 1:\n # 1D domain decomposition (plates in 3D, pencils in 2D)\n self.__nnx[0] = self.__nx[0]/self.__ntasks\n self.__ixs[0] = self.__nnx[0]*self.__taskid\n self.__ixe[0] = self.__ixs[0]+self.__nnx[0]\n else:\n raise AssertionError(\"mpiReader can't yet handle anything \"\n \"but 1D Decomposition.\")\n\n @property\n def comm(self):\n return self.__comm\n\n @property\n def taskid(self):\n return self.__taskid\n\n @property\n def ntasks(self):\n return self.__ntasks\n\n @property\n def ndims(self):\n return self.__ndims\n\n @property\n def decomp(self):\n return self.__decomp\n\n @property\n def nx(self):\n return self.__nx\n\n @property\n def nh(self):\n return self.__nh\n\n @property\n def nnx(self):\n return self.__nnx\n\n @property\n def ixs(self):\n return self.__ixs\n\n @property\n def ixe(self):\n return self.__ixe\n\n @property\n def byteswap(self):\n return self.__byteswap\n\n def simulation_time(self, filename):\n if self.taskid==0:\n with open(self.__idir+filename) as fh:\n t = float(fh.readline())\n else:\n t = None\n\n t = self.comm.bcast(t, root=0)\n\n return t\n\n def read_variable(self, filename, dtype=np.float64):\n \"\"\"Currently hard coded to 1D domain decomposition.\"\"\"\n status = MPI.Status()\n stmp = np.zeros(self.nnx, dtype=np.float32)\n fpath = self.__idir+filename\n fhandle = MPI.File.Open(self.comm, fpath)\n offset = self.taskid*stmp.nbytes\n fhandle.Read_at_all(offset, stmp, status)\n fhandle.Close()\n\n if self.byteswap:\n var = stmp.byteswap(True).astype(dtype)\n else:\n var = stmp.astype(dtype)\n return var\n\n def read_variable_ghost_cells(self, filename, dtype=np.float64):\n \"\"\"Currently hard coded to 1D domain decomposition.\"\"\"\n status = MPI.Status()\n shape = np.array([self.nh[0]*2, self.nnx[1], self.nnx[2]])\n stmp = np.zeros(shape, dtype=np.float32)\n hsize = shape.prod()*2 # 1/2 of stmp size * 4 bytes\n dsize = self.nnx.prod()*4 # subdomain size * 4 bytes\n\n fpath = self.__idir+filename\n fhandle = MPI.File.Open(self.comm, fpath)\n\n # read in the -z ghost zones\n if self.taskid==0:\n idx = self.ntasks\n else:\n idx = self.taskid\n offset = dsize*idx - hsize\n fhandle.Read_at_all(offset, stmp[:self.nh[0], ...], status)\n\n # read in the +z ghost zones\n if self.taskid==self.ntasks-1:\n idx = 0\n else:\n idx = self.taskid+1\n offset = dsize*idx\n fhandle.Read_at_all(offset, stmp[self.nh[0]:, ...], status)\n\n fhandle.Close()\n\n if self.byteswap:\n var = stmp.byteswap(True).astype(dtype)\n else:\n var = stmp.astype(dtype)\n return var\n\n\n###############################################################################\n# class mpiVtkReader(object):\n \"\"\"\n reader = vtkStructuredPointsReader()\n reader.SetFileName(filename)\n reader.ReadAllVectorsOn()\n reader.ReadAllScalarsOn()\n reader.Update()\n\n data = reader.GetOutput()\n\n dim = data.GetDimensions()\n vec = list(dim)\n vec = [i-1 for i in dim]\n vec.append(3)\n\n u = vn.vtk_to_numpy(data.GetCellData().GetArray('velocity'))\n rho = vn.vtk_to_numpy(data.GetCellData().GetArray('density'))\n Etot = vn.vtk_to_numpy(data.GetCellData().GetArray('total_energy'))\n Y = vn.vtk_to_numpy(data.GetCellData().GetArray('scalar'))\n byte_mask = vn.vtk_to_numpy(data.GetCellData().GetArray('avtGhostZones'))\n\n x = zeros(data.GetNumberOfPoints())\n y = zeros(data.GetNumberOfPoints())\n z = zeros(data.GetNumberOfPoints())\n\n for i in range(data.GetNumberOfPoints()):\n x[i],y[i],z[i] = data.GetPoint(i)\n\n u = u.reshape(vec,order='F')\n rho = rho.reshape(dim,order='F')\n Etot = Etot.reshape(dim,order='F')\n Y = Y.reshape(dim,order='F')\n x = x.reshape(dim,order='F')\n y = y.reshape(dim,order='F')\n z = z.reshape(dim,order='F')\n byte_mask = byte_mask.reshape(dim,order='F')\n \"\"\"\n","sub_path":"teslacu/mpiReader.py","file_name":"mpiReader.py","file_ext":"py","file_size_in_byte":8520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"413877170","text":"# Faça um programa que pergunte o preço de três produtos e informe qual produto você deve comprar, sabendo que a decisão é sempre pelo mais barato.\n\nproduto1 = float(input('Digite aqui o valor do 1º produto: '))\nproduto2 = float(input('Digite aqui o valor do 2º produto: '))\nproduto3 = float(input('Digite aqui o valor do 3º produto: '))\n\nif produto1 < produto2 and produto3:\n print(\"O primeiro produto é o mais barato!\")\nelif produto2 < produto3 and produto1:\n print(\"O segundo produto é o mais barato!\")\nelif produto3 < produto2 and produto1:\n print(\"O terceiro produto é o mais barato!\")\nelse:\n print(\"Todos os produtos tem o mesmo valor!\")\n","sub_path":"estruturaDeDecisão/exercicio8.py","file_name":"exercicio8.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"192474384","text":"#!/usr/bin/env python\nimport copy\nimport logging\nlogger = logging.getLogger(__name__)\n\nimport numpy as np\n\n\nclass RewardFunction(object):\n \"\"\"\n TODO: implement state-action and state-action-nextstate feature rfs\n \n state_features : dict with mapping from states to lists of features\n feature_rewards : dict with mapping from features to reward values\n \n Reward is simply the sum of all features (for now). This implementation\n represents reward functions based on either states; states and actions, or\n states, actions, and nextstates. Orthogonally, it can represent them\n in tabular form, or as sums of features (over either states, states/actions,\n or states/actions/nextstates.\n \"\"\"\n def __init__(self,\n state_features=None,\n state_rewards=None, #this is a deprecated argument\n reward_dict=None,\n feature_rewards=None,\n default_reward=0,\n terminal_states=None,\n terminal_state_reward=0,\n step_cost=0,\n rmax=None,\n cache_rewards=True):\n if terminal_states is None:\n terminal_states = [(-1, -1), (-2, -2)]\n if state_rewards is not None:\n reward_dict = state_rewards\n\n self.terminal_states = tuple(sorted(terminal_states))\n self.terminal_state_reward = terminal_state_reward\n self.default_reward = default_reward\n \n if (state_features is not None) and (feature_rewards is not None):\n self.state_features = state_features\n self.feature_rewards = feature_rewards\n self.type = 'state_feature_based'\n elif reward_dict is not None:\n self.reward_dict = copy.deepcopy(reward_dict)\n if type(list(reward_dict.values())[0]) is dict:\n if type(list(reward_dict.values())[0].values()[0]) is dict:\n self.type = 'state_action_nextstate_dict'\n else:\n self.type = 'state_action_dict'\n else:\n self.type = 'state_dict'\n else:\n self.reward_dict = {}\n self.type = 'state_dict'\n \n if self.type == 'state_dict':\n for ts in terminal_states:\n self.reward_dict[ts] = terminal_state_reward\n\n self.step_cost = step_cost\n\n #set rmax\n if rmax is None:\n if self.type == 'state_dict':\n rs = list(self.reward_dict.values()) + [default_reward,]\n rmax = max(rs)\n elif self.type == 'state_action_dict':\n rmax = -np.inf\n for s, ar in self.reward_dict.items():\n for a, r in ar.items():\n rmax = max(rmax, r)\n elif self.type == 'state_feature_based':\n fr = np.array(list(self.feature_rewards.values()))\n pos_fr = fr[fr > 0]\n if (len(pos_fr) == 0):\n pos_fr = [max(fr),]\n rmax = np.sum(pos_fr)\n else:\n raise ValueError(\"Cannot set Rmax\")\n self.rmax = rmax\n\n self.reward_cache = {}\n self.cache_rewards = cache_rewards\n \n def reward(self, s=None, a=None, ns=None):\n if self.type == 'state_dict':\n reward = self.reward_dict.get(ns, self.default_reward)\n\n elif self.type == \"state_action_dict\":\n if s not in self.reward_dict:\n reward = self.default_reward\n else:\n reward = self.reward_dict[s].get(a, self.default_reward)\n\n elif self.type == \"state_action_nextstate_dict\":\n if s not in self.reward_dict:\n reward = self.default_reward\n elif a not in self.reward_dict[s]:\n reward = self.default_reward\n else:\n reward = self.reward_dict[s][a].get(ns, self.default_reward)\n\n elif self.type == 'state_feature_based':\n if ns in self.terminal_states:\n reward = self.terminal_state_reward\n\n elif self.cache_rewards:\n if ns not in self.reward_cache:\n fs = self.state_features.get(ns, [])\n r = 0\n for f in fs:\n r += self.feature_rewards.get(f, self.default_reward)\n self.reward_cache[ns] = r\n reward = self.reward_cache[ns]\n else:\n fs = self.state_features.get(ns, [])\n reward = np.sum([self.feature_rewards[f] for f in fs])\n\n elif self.type == 'state_action_feature_based':\n pass\n\n elif self.type == 'state_action_nextstate_feature_based':\n pass\n \n if ns in self.terminal_states:\n return reward\n else:\n return reward + self.step_cost\n\n def gen_reward_dict(self, states=None, state_actions=None,\n state_action_nextstates=None, tf=None,\n include_actions=False, include_nextstates=False):\n\n # ================================================ #\n # Generate a state-action-nextstate rf dictionary #\n # ================================================ #\n if (include_actions and include_nextstates) \\\n or self.type in ['state_action_nextstate_dict',\n 'state_action_nextstate_feature_based']:\n rf = {}\n for s, a_ns in state_action_nextstates.items():\n rf[s] = {}\n for a, nstates in a_ns.items():\n rf[s][a] = {}\n for ns in nstates:\n\n #=========================================#\n # Handle the different rf types #\n #=========================================#\n if self.type in ['state_dict', 'state_feature_based']:\n rf[s][a][ns] = self.reward(ns=ns)\n\n elif self.type in ['state_action_dict',\n 'state_action_feature_based']:\n rf[s][a][ns] = self.reward(s=s, a=a)\n\n elif self.type in ['state_action_nextstate_dict',\n 'state_action_nextstate_feature_based']:\n rf[s][a][ns] = self.reward(s=s, a=a, ns=ns)\n\n else:\n raise ValueError(\"Undefined reward function dictionary!\")\n\n # ================================================ #\n # Generate a state-action rf dictionary #\n # ================================================ #\n elif include_actions or self.type in ['state_action_dict',\n 'state_action_feature_based']:\n rf = {}\n # ======================================== #\n # Handle the different rf types #\n # ======================================== #\n if self.type in ['state_dict', 'state_feature_based']:\n for s, a_ns in state_action_nextstates.items():\n rf[s] = {}\n for a, nstates in a_ns.items():\n if len(nstates) > 1:\n raise ValueError(\"Undefinable reward function dictionary!\")\n rf[s][a] = self.reward(ns=nstates[0])\n\n elif self.type in ['state_action_dict',\n 'state_action_feature_based']:\n for s, actions in state_actions.items():\n rf[s] = {}\n for a in actions:\n rf[s][a] = self.reward(s=s, a=a)\n else:\n raise ValueError(\"Undefined reward function dictionary!\")\n\n # ================================================ #\n # Generate a state rf dictionary #\n # ================================================ #\n elif self.type in ['state_dict', 'state_feature_based']:\n rf = {ns: self.reward(ns=ns) for ns in states}\n\n else:\n raise ValueError(\"Undefined reward function dictionary!\")\n\n return rf\n\n\n def __hash__(self):\n try:\n return self.hash\n except AttributeError:\n pass\n\n #todo write a test for this hash function\n myhash = [self.type,\n self.terminal_state_reward,\n self.terminal_states,\n self.default_reward,\n self.step_cost]\n\n if self.type == 'state_dict':\n myhash.extend([\n tuple(sorted(self.reward_dict.items())),\n ])\n else:\n myhash.extend([\n False,\n ])\n\n if self.type == 'state_action_dict':\n sar = []\n for s, ar in self.reward_dict.items():\n ar = tuple(sorted(ar.items()))\n sar.append((s, ar))\n sar = tuple(sorted(sar))\n myhash.extend([sar,])\n else:\n myhash.extend([False,])\n\n if self.type == 'state_action_nextstate_dict':\n sansr = []\n for s, ansr in self.reward_dict.items():\n ansr_ = []\n for a, nsr in ansr.items():\n nsr = tuple(sorted(nsr.items()))\n ansr_.append((a, nsr))\n ansr_ = tuple(sorted(ansr_))\n sansr.append(ansr_)\n sansr = tuple(sorted(sansr))\n myhash.extend([sansr,])\n else:\n myhash.extend([False,])\n\n if self.type == 'state_feature_based':\n myhash.extend([\n tuple(sorted(self.state_features.items())),\n tuple(sorted(self.feature_rewards.items()))\n ])\n else:\n myhash.extend([False,\n False])\n\n self.hash = hash(tuple(myhash))\n\n return hash(tuple(myhash))\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return hash(self) == hash(other)\n return False\n","sub_path":"lib/pyrlap/pyrlap/core/reward_function.py","file_name":"reward_function.py","file_ext":"py","file_size_in_byte":10269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"170370710","text":"#!/usr/bin/python2\n\nimport nxt.locator\nfrom nxt.motor import *\n\nturn = 200\n\ndef spin_around(b):\n m_front = Motor(b, PORT_A)\n m_front.turn(-20, turn)\n\nb = nxt.locator.find_one_brick()\nspin_around(b)\n\n","sub_path":"testing/front.py","file_name":"front.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"122582483","text":"#!/usr/bin/python\n\nimport sys\nimport matplotlib\n# the following line is added immediately after import matplotlib\n# and before import pylot. The purpose is to ensure the plotting\n# works even under remote login (i.e. headless display)\nmatplotlib.use('Agg')\nfrom matplotlib import cm\nimport matplotlib.pyplot as pyplot\nfrom csv import reader\nimport numpy as np\nimport argparse\nimport re\nimport os\nimport pdb\n\n\"\"\"Plot AverageReturn from progress.csv.\nTo use this script to generate plot for AverageReturn:\n python plotcurve.py -i progress.csv -o fig.png\n python plotcurve.py -i progress.csv -o fig.png AverageReturn\n\nusage: [-h] [-i INPUT] [-o OUTPUT] [--format FORMAT] [key [key ...]]\n\npositional arguments:\n key keys of scores to plot, the default will be\n AverageReturn\n\noptional arguments:\n -h, --help show this help message and exit\n -i INPUT, --input INPUT\n input filename of log, default will be standard\n input\n -o OUTPUT, --output OUTPUT\n output filename of figure, default will be standard\n output\n --format FORMAT figure format(png|pdf|ps|eps|svg)\n\n\n\"\"\"\n\n\ndef plot_average_return(keys, inputfile, outputfile):\n print(\"open file \", inputfile)\n # pdb.set_trace()\n with open(inputfile, 'r') as f:\n data = list(reader(f))\n # data[0] header\n # ['AverageDiscountedReturn', 'MinReturn', 'Entropy', 'StdReturn',\n # 'NumTrajs', 'PolicyExecTime', 'MaxKL', 'EnvExecTime', 'Time',\n # 'Perplexity', 'AverageReturn', 'AveragePolicyStd', 'LossAfter',\n # 'LossBefore', 'ExplainedVariance', 'MeanKL', 'MaxReturn',\n # 'ProcessExecTime', 'Iteration', 'ItrTime']\n # data[1::] recorded statistics\n if len(data) < 2:\n return\n\n if not keys:\n key = 'AverageReturn'\n else:\n key = keys[0]\n rIdx = data[0].index(key)\n returns = [d[rIdx] for d in data[1::]]\n # plot\n pyplot.plot(range(len(returns)), returns, linewidth=3.0)\n pyplot.title(key + ' ' + ' over iterations')\n # pyplot.xscale('log')\n pyplot.xlabel('iteration')\n pyplot.ylabel(key)\n # pyplot.show()\n pyplot.savefig(outputfile, bbox_inches='tight')\n pyplot.clf()\n print(\"save to output file\")\n\n\ndef removeDuplicates(num_samples):\n num_samples.append(0)\n unique_idx = [i for i in range(len(num_samples) - 1) if num_samples[i] !=\n num_samples[i + 1]]\n return unique_idx\n\n\ndef plot_average_return_sample(keys, inputfile, outputfile):\n print(\"open file \", inputfile)\n # pdb.set_trace()\n with open(inputfile, 'r') as f:\n data = list(reader(f))\n # data[0] header\n # [NumSamples,NumTrajs,MaxKL,Iteration,TotalRho,AverageDiscountedReturn,\n # MinReturn,TotalWeights,LossAfter,Time,AverageReturn,AveragePolicyStd,\n # MaxReturn,StdReturn,ESS,ItrTime,TotalLoglik,LossBefore,Entropy,MeanKL,Perplexity]\n # data[1::] recorded statistics\n if len(data) < 2:\n return\n\n if not keys:\n key = 'AverageReturn'\n else:\n key = keys[0]\n rIdx = data[0].index(key)\n returns = [d[rIdx] for d in data[1::]]\n num_samples = [d[data[0].index('NumSamples')] for d in data[1::]]\n # samples [5000, 5000, ....] returns[-230, -230, ....]\n idx = removeDuplicates(num_samples)\n num_samples = np.array(num_samples)\n num_samples = num_samples[idx]\n returns = np.array(returns)\n returns = returns[idx]\n # plot\n pyplot.plot(num_samples, returns, linewidth=3.0)\n pyplot.title(key + ' ' + ' over samples')\n pyplot.xlabel('samples')\n pyplot.ylabel(key)\n # pyplot.show()\n pyplot.savefig(outputfile, bbox_inches='tight')\n pyplot.clf()\n print(\"save to output file\")\n\n\ndef plot_multiple_average_return_sample(keys, inputfiles, outputfile):\n # pdb.set_trace()\n\n data_list = []\n labels = ['svrg', 'stein_trpo']\n for inputfile in inputfiles:\n print(\"open file \", inputfile)\n with open(inputfile, 'r') as f:\n data_list.append(list(reader(f)))\n for idx, data in enumerate(data_list):\n # data[0] header\n # [NumSamples,NumTrajs,MaxKL,Iteration,TotalRho,AverageDiscountedReturn,\n # MinReturn,TotalWeights,LossAfter,Time,AverageReturn,AveragePolicyStd,\n # MaxReturn,StdReturn,ESS,ItrTime,TotalLoglik,LossBefore,Entropy,MeanKL,Perplexity]\n # data[1::] recorded statistics\n if len(data) < 2:\n return\n\n if not keys:\n key = 'AverageReturn'\n else:\n key = keys[0]\n rIdx = data[0].index(key)\n returns = [d[rIdx] for d in data[1::]]\n #num_samples = [d[data[0].index('NumSamples')] for d in data[1::]]\n # samples [5000, 5000, ....] returns[-230, -230, ....]\n #idx = removeDuplicates(num_samples)\n #num_samples = np.array(num_samples)\n #num_samples = num_samples[idx]\n #returns = np.array(returns)\n #returns = returns[idx]\n # plot\n pyplot.plot(range(len(returns)), returns,\n linewidth=3.0, label=labels[idx])\n # pyplot.show()\n pyplot.title(key + ' ' + ' over samples')\n pyplot.xscale('log')\n pyplot.xlabel('samples')\n pyplot.ylabel(key)\n pyplot.savefig(outputfile, bbox_inches='tight')\n pyplot.clf()\n print(\"save to output file\")\n\n\ndef plot_multiple_average_return_time(keys, inputfiles, outputfile):\n # pdb.set_trace()\n\n data_list = []\n for inputfile in inputfiles:\n print(\"open file \", inputfile)\n with open(inputfile, 'r') as f:\n data_list.append(list(reader(f)))\n labels = ['stein_trpo']\n for idx, data in enumerate(data_list):\n # data[0] header\n # [NumSamples,NumTrajs,MaxKL,Iteration,TotalRho,AverageDiscountedReturn,\n # MinReturn,TotalWeights,LossAfter,Time,AverageReturn,AveragePolicyStd,\n # MaxReturn,StdReturn,ESS,ItrTime,TotalLoglik,LossBefore,Entropy,MeanKL,Perplexity]\n # data[1::] recorded statistics\n if len(data) < 2:\n return\n\n if not keys:\n key = 'Average Returns'\n else:\n key = keys[0]\n rIdx = data[0].index('AverageReturn')\n returns = [d[rIdx] for d in data[1::]]\n #num_samples = [d[data[0].index('NumSamples')] for d in data[1::]]\n #times = [d[data[0].index('ItrTime')] for d in data[1::]]\n # samples [5000, 5000, ....] returns[-230, -230, ....]\n #idx = removeDuplicates(num_samples)\n #times = np.array(times)\n #times = times[idx]\n #returns = np.array(returns)\n #returns = returns[idx]\n # plot\n # pdb.set_trace()\n #pyplot.plot(np.cumsum(times), returns, linewidth=3.0)\n pyplot.plot(range(len(returns)), returns,\n linewidth=3.0, label=labels[idx])\n # pyplot.show()\n pyplot.title(key + ' ' + ' over iterations', fontsize=16)\n pyplot.xlabel('iteration',fontsize=16)\n pyplot.ylabel(key, fontsize=16)\n pyplot.tick_params(axis='x', labelsize=16)\n pyplot.tick_params(axis='y', labelsize=16)\n pyplot.legend()\n pyplot.savefig(outputfile, bbox_inches='tight')\n pyplot.clf()\n print(\"save to output file\")\n\n\ndef main(argv):\n \"\"\"\n main method of plotting curves.\n \"\"\"\n cmdparser = argparse.ArgumentParser(\n \"Plot AverageReturn from progress.csv.\")\n cmdparser.add_argument(\n 'key', nargs='*',\n help='key of scores to plot, the default is AverageReturn')\n cmdparser.add_argument(\n '-i',\n '--inputs',\n nargs='*',\n help='input filename(s) of progress.csv '\n 'default will be standard input')\n cmdparser.add_argument(\n '-o',\n '--output',\n help='output filename of figure, '\n 'default will be standard output')\n cmdparser.add_argument(\n '--format',\n help='figure format(png|pdf|ps|eps|svg)')\n cmdparser.add_argument(\n '-t',\n '--time',\n help='print returns-times or returns-samples '\n 'default will be returns-samples')\n args = cmdparser.parse_args(argv)\n format = args.format\n if args.output:\n outputfile = open(args.output, 'wb')\n if not format:\n # pdb.set_trace()\n format = os.path.splitext(args.output)[1]\n if not format:\n format = 'png'\n else:\n outputfile = sys.stdout\n\n input_dir = \"/home/tianbing/github/rllab/data/local/experiment/\"\n #plot_average_return_sample(args.key, input_dir + args.input + \"/progress.csv\" , outputfile)\n inputfiles = [input_dir + inputfile + \"/progress.csv\" for inputfile in\n args.inputs]\n if args.time == 'r':\n plot_average_return(args.key, inputfiles[0], outputfile)\n elif args.time == 't':\n plot_multiple_average_return_time(args.key, inputfiles, outputfile)\n else:\n plot_multiple_average_return_sample(args.key, inputfiles, outputfile)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"scripts/plotcurve.py","file_name":"plotcurve.py","file_ext":"py","file_size_in_byte":9027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"19411996","text":"class Solution:\n def canWinNim(self, n: int) -> bool:\n if n >=134882061:\n return n%4 != 0\n if n <= 3:\n return True\n dp = [False] * (n+1)\n dp[1] = dp[2] = dp[3] = True\n for i in range(4,n+1):\n dp[i] = not (dp[i-1] and dp[i-2] and dp[i-3])\n return dp[-1]\n","sub_path":"algorithms/292. Nim Game.py","file_name":"292. Nim Game.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"298993829","text":"from pymongo import MongoClient\nimport requests, json, re\n\n\ndef add_elastic(_id, data, elastic_config):\n host = elastic_config['host']\n port = elastic_config['port']\n collection = elastic_config['collection']\n\n url = \"http://\" + host + \":\" + port + \"/\" + collection + \"/doc/\" + _id\n formatted_data = json.dumps(data)\n requests.put(url, data=formatted_data, headers={\"Content-Type\": \"application/json\"})\n\n\ndef add_mongo(_id, data, collection, mongo_config):\n host = mongo_config['host']\n port = mongo_config['port']\n database = mongo_config['database_name']\n\n data['_id'] = _id\n client = MongoClient(host, port)\n client[database][collection].insert_one(data)\n client.close()\n\n\ndef add_mongo_no_id(data, collection, mongo_config):\n host = mongo_config['host']\n port = mongo_config['port']\n database = mongo_config['database_name']\n\n client = MongoClient(host, port)\n client[database][collection].insert_one(data)\n client.close()\n\n\ndef add_bulk_mongo(data, collection, mongo_config):\n host = mongo_config['host']\n port = mongo_config['port']\n database = mongo_config['database_name']\n\n client = MongoClient(host, port)\n client[database][collection].insert_many(data, ordered=False)\n client.close()\n\n\ndef exists_mongo(_id, collection, mongo_config):\n host = mongo_config['host']\n port = mongo_config['port']\n database = mongo_config['database_name']\n\n client = MongoClient(host, port)\n ans = bool(client[database][collection].find_one(filter={\"_id\": _id}))\n client.close()\n return ans\n","sub_path":"serverless/scrape/local/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"55762692","text":"# coding:utf-8\n'''\n@Copyright:LintCode\n@Author: monolake\n@Problem: http://www.lintcode.com/problem/binary-tree-level-order-traversal\n@Language: Python\n@Datetime: 16-11-28 16:14\n'''\n\n\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: Level order in a list of lists of integers\n \"\"\"\n\n def levelOrder(self, root):\n # write your code here\n result = []\n if root is None:\n return []\n temp = [root]\n level = [0]\n\n while temp:\n cur_node = temp.pop(0)\n cur_level = level.pop(0)\n if len(result) < cur_level + 1:\n result.append([])\n result[cur_level].append(cur_node.val)\n if cur_node.left:\n temp.append(cur_node.left)\n level.append(cur_level + 1)\n if cur_node.right:\n temp.append(cur_node.right)\n level.append(cur_level + 1)\n \n return result\n \n \n \n","sub_path":"69_binary-tree-level-order-traversal/binary-tree-level-order-traversal.py","file_name":"binary-tree-level-order-traversal.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"525772203","text":"\r\n\"\"\" \r\n\r\n# Predicting Listing Prices on Airbnb with Spark and Scikit-Learn\r\nMore often than not, machine learning can help inform assumptions about customer behaviors and its applications \r\nvary based of business problems. Helping customers make better choices, building loyalty over time and making customers contagiously \r\nhappy about your business to mention a few.\r\n\r\nLet's deep dive into advanced analytics with Python and it's very popular library scikit-learn as we look to help \r\nvisitors make better choices using the Inside Airbnb dataset for Amsterdam\r\n\r\n## Getting started\r\nWe'll start by predicting the listing price we expect the user to enter for a rental, \r\nbased on the attributes of the listing. Being able to predict the price has several applications:\r\n we may advice the customer on pricing a unit (maybe display a warrning if the number chosen is too large or small), assist in \r\n how to advertise it, or inform our own analysis of the market for investment decision\r\n\r\n##Exploring and Cleaning the Data\r\nFirst, let's load the listing.csv dataset from our FileSystem into a Pandas dataframce. \r\nThe data consists of approximately 1000 listings with over 90 columns, describing each listing in detail.\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn import ensemble\r\nfrom sklearn import linear_model\r\nfrom sklearn.grid_search import GridSearchCV\r\nfrom sklearn import preprocessing\r\nfrom sklearn.cross_validation import train_test_split\r\nimport sklearn.metrics as metrics\r\nimport matplotlib.pyplot as plt\r\nfrom collections import Counter\r\nimport seaborn as sns\r\n\r\n\r\nAIRBNBFILE = \"listings.csv\"\r\ncols = ['price','accommodates','bedrooms','beds','neighbourhood_cleansed','room_type',\r\n'cancellation_policy','instant_bookable','reviews_per_month','number_of_reviews','availability_30','review_scores_rating']\r\n\r\ndf = pd.read_csv(AIRBNBFILE, usecols=cols)\r\n\r\n\r\ndf.head()\r\n\r\n# Row Count on dataset\r\nlen(df.index)\r\n\r\n\r\n#13849\r\n\r\n\r\n\"\"\"\r\nElements are counted from an iterable or initialized from another mapping (or counter):\r\n\r\n\r\nc = Counter() # a new, empty counter\r\nc = Counter('gallahad') # a new counter from an iterable\r\nc = Counter({'red': 4, 'blue': 2}) # a new counter from a mapping\r\nc = Counter(cats=4, dogs=8) # a new counter from keyword args\r\n\r\nCounter objects have a dictionary interface except that they return a zero count for missing items instead of raising a KeyError:\r\n\r\nc = Counter(['eggs', 'ham'])\r\nc['bacon'] # count of a missing element is zero\r\n0\r\n\r\nSetting a count to zero does not remove an element from a counter. Use del to remove it entirely:\r\n\r\nc['sausage'] = 0 # counter entry with a zero count\r\ndel c['sausage'] # del actually removes the entry\r\n\r\n\r\n\"\"\"\r\n\r\n\r\n\r\nnb_count = Counter(df.neighbourhood_cleansed)\r\n\r\nnb_count\r\n\"\"\"\r\nCounter GENERA UN DICCIONARIO clave : nº apariciones\r\nCounter(\r\n{'De Baarsjes - Oud-West': 2431, 'Centrum-West': 1880, \r\n'De Pijp - Rivierenbuurt': 1624, 'Centrum-Oost': 1346, \r\n'Westerpark': 1029, 'Zuid': 1010, 'Oud-Oost': 806, 'Bos en Lommer': 695, \r\n'Oostelijk Havengebied - Indische Buurt': 694, 'Oud-Noord': 391, \r\n'Watergraafsmeer': 362, 'Slotervaart': 280, 'IJburg - Zeeburgereiland': 272, \r\n'Noord-West': 186, 'Buitenveldert - Zuidas': 160, \r\n'Noord-Oost': 159, 'Geuzenveld - Slotermeer': 138, \r\n'Osdorp': 99, 'De Aker - Nieuw Sloten': 91, \r\n'Bijlmer-Oost': 72, 'Gaasperdam - Driemond': 63, 'Bijlmer-Centrum': 61})\r\n\"\"\"\r\n\r\n\r\n\r\ntdf = pd.DataFrame.from_dict(nb_count, orient='index').sort_values(by=0)\r\n\r\ntdf.plot(kind='bar')\r\n\r\nplt.show()\r\n\r\n\r\n\"\"\"\r\n\r\nThis data is coming in a little raw, so some cleaning is in order.\r\n\r\nParticularly, the 'number_reviews' and 'reviews_per_month' fields look like they need some special processing\r\nto remove a large number of NaN values. \r\nRather than discarding these outright, \r\nwe set the value of 'reviews_per_month' to 0 where there is currently a NaN, \r\nbecause some quick analysis shows that this field is NaN only wherever 'number_of_reviews is 0 (which makes some intuitive sense).\r\n\r\nWe also drop any entries that are obviously strange, such as listings with a \r\nvalue of 0 for bedrooms, beds, or price, \r\nthen finally drop any remaining rows that have any NaN values. \r\nThe resulting dataset contains 5246 entries, a subset of the original 10629.\r\n\r\n\"\"\"\r\n\r\nlen(tdf.index)\r\n\r\n\r\n# 22\r\ntdf\r\n\"\"\"\r\n 0\r\nBijlmer-Centrum 61\r\nGaasperdam - Driemond 63\r\nBijlmer-Oost 72\r\nDe Aker - Nieuw Sloten 91\r\nOsdorp 99\r\nGeuzenveld - Slotermeer 138\r\nNoord-Oost 159\r\nBuitenveldert - Zuidas 160\r\nNoord-West 186\r\nIJburg - Zeeburgereiland 272\r\nSlotervaart 280\r\nWatergraafsmeer 362\r\nOud-Noord 391\r\nOostelijk Havengebied - Indische Buurt 694\r\nBos en Lommer 695\r\nOud-Oost 806\r\nZuid 1010\r\nWesterpark 1029\r\nCentrum-Oost 1346\r\nDe Pijp - Rivierenbuurt 1624\r\nCentrum-West 1880\r\nDe Baarsjes - Oud-West 2431\r\n\"\"\"\r\n\r\n\r\n# do some data cleaning\r\n\r\n# first fill 'reviews_per_month' with 0 where there are no values\r\ndf['reviews_per_month'].fillna('0', inplace=True)\r\n\r\n# Also drop rows with weird values\r\ndf = df[df.bedrooms != 0]\r\ndf = df[df.beds != 0]\r\ndf = df[df.price != 0]\r\ndf = df.dropna(axis=0)\r\n\r\nlen(df.index)\r\n\r\n\r\ndf = df[df.bedrooms == 1]\r\nlen(df.index)\r\n\r\n# 7047\r\n\r\n\"\"\"\r\nThe last bit of cleaning/filtering we'll do is convert the 'price' column to floating point, \r\nand drop all of the entries with more than one bedroom.\r\n\r\nApproximately 70% of the entries in this file are for one bedroom listings (in large cities, especially San Francisco, \r\nthis might be the norm), so it's the type of unit for which we have the most samples. By focusing our regression on \r\na single type of unit, our model will be better as there is less to discover about the complex interactions with other features \r\n(such as whether the unit is shared, private, or has a private room). To make predictions for larger units, one way to go would \r\nbe to develop separate models for each of the different sizes (2, 3, 4 bedrooms, etc.), or do some clustering to see if \r\nit is better to partition the data in some other way\r\n\"\"\"\r\n\r\ndf['price'] = df['price'].replace('[\\$,)]', '', regex=True).replace(\"[()]\", '-', regex=True).astype(float)\r\n\r\ndf.price.head(4)\r\n\r\n\r\n\"\"\"\"\r\n0\t95.0\r\n1\t60.0\r\n2\t45.0\r\n3\t35.0\r\nName: price, dtype: float64\r\n\r\n\r\nWe have a few columns that contain categorical variables. \r\nThese are handled in slightly different ways depending on their possible values. \r\nThe 'neighborhood_cleansed' field, in its raw form, is a string representing the name of the neighborhood. \r\nThe regressors in scikit-learn will only accept numeric fields. \r\nFor this type of variable we can use the get_dummies routine in Pandas to convert these to 'dummy' variables. \r\nThis process is also known as \"one hot\" encoding, meaning we add a column for every possible value of the field. \r\nEach listing (row) contains a '1' in for its own neighborhood, else the column contains a '0' for that neighborhood.\r\n\r\nWe handle this in a similar way for the 'room_type' and 'cancellation_policy' fields.\r\n\r\nFor the instant_bookable field, we can represent it with a single column since it's a boolean \r\nvalue (it represents whether or not the unit can be booked instantly online without confirmation from the owner).\r\n\"\"\"\r\n\r\n# get feature encoding for categorical variables\r\nn_dummies = pd.get_dummies(df.neighbourhood_cleansed)\r\nrt_dummies = pd.get_dummies(df.room_type)\r\nxcl_dummies = pd.get_dummies(df.cancellation_policy)\r\n\r\n# convert boolean column to a single boolean value indicating whether this listing has instant booking available\r\nib_dummies = pd.get_dummies(df.instant_bookable, prefix=\"instant\")\r\nib_dummies = ib_dummies.drop('instant_f', axis=1)\r\n\r\n# replace the old columns with our new one-hot encoded ones\r\nalldata = pd.concat((df.drop(['neighbourhood_cleansed', 'room_type', 'cancellation_policy', 'instant_bookable'], axis=1), n_dummies.astype(int), rt_dummies.astype(int),xcl_dummies.astype(int), ib_dummies.astype(int)),axis=1)\r\n\r\nallcols = alldata.columns\r\nalldata.head(5)\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nUsing the scatter_matrix function in Pandas next, we can quickly show a matrix of each feature \r\nas a function of another, checking for any collinearity among the features.\r\n The cells along the diagonal of this matrix contain a histogram, with the values shown along the X axis.\r\n Collinearity in this case is less likely because we've picked a \r\n small set of features that are not obviously related to each other, but it's a good thing to check \r\n anyway to see if we have anything that will throw things off.\r\n\"\"\"\r\n\r\nscattercols = ['price','accommodates', 'number_of_reviews', 'reviews_per_month', 'beds', 'availability_30', 'review_scores_rating']\r\naxs = pd.scatter_matrix(alldata[scattercols],figsize=(12, 12), c='red')\r\n\r\n\r\nsns.pairplot(alldata[scattercols])\r\n\r\n\r\n\"\"\"\r\nLooking at the output of scatter_matrix, no features show any obvious problems. \r\nThe most closely related features appear to be 'beds' and 'accommodates'. \r\nIt's true that the more beds a unit has, the more it accommodates for sleeping, \r\nbut even these are only loosely related and the result is far from a straight line. \r\nPresumably this is because of different bed sizes, sleeping arrangements, and layout of the rental unit.\r\n\r\nHowever, should you not be comfortable, the features while working on your analysis, you could reduce colinearity amongst\r\n the features using **Principal Component Analysis (PCA)** on the feature vectors\r\n\r\n## Making Predictions with Scikit-Learn and Spark\r\n\r\nOne of the great things about scikit-learn is that we can easily try a bunch of different linear models on the same data. \r\nThis will give us some clues as to where we can start tuning. We will start with six of them: vanilla linear regression, \r\nridge and lasso regressions, ElasticNet, bayesian ridge, and a lesser used one called Orthogonal Matching Pursuit.\r\n\r\nTo evaluate which model(s) are doing better, we will need some way to score the results. \r\nIn this example I've chosen median absolute error, mainly because it makes sense at a glance \r\n(it easily translates to a dollar amount relative to price) and is less \r\nsensitive to outliers than other metrics like mean squared error.\r\n\r\nSpeaking of which, it is fairly likely that we have some outliers in the data since we haven't done \r\nany filtering or clustering for them, so this is a good way to get a quick and dirty measure of performance \r\nbefore we move to fine-tuning (and of course, we could do more with outliers in the data preparation step).\r\n\r\n\"\"\"\r\n\r\nrs = 1\r\nests = [ linear_model.LinearRegression(), linear_model.Ridge(),linear_model.Lasso(), linear_model.ElasticNet(),linear_model.BayesianRidge(), linear_model.OrthogonalMatchingPursuit() ]\r\n\r\nests_labels = np.array(['Linear', 'Ridge', 'Lasso', 'ElasticNet', 'BayesRidge', 'OMP'])\r\nerrvals = np.array([])\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(alldata.drop(['price'], axis=1),alldata.price, test_size=0.2, random_state=20)\r\n\r\nfor e in ests:\r\n e.fit(X_train, y_train)\r\n this_err = metrics.median_absolute_error(y_test, e.predict(X_test))\r\n #print \"got error %0.2f\" % this_err\r\n errvals = np.append(errvals, this_err)\r\n\r\n\"\"\"\r\nLinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False)\r\nRidge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,normalize=False, random_state=None, solver='auto', tol=0.001)\r\nLasso(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=1000,normalize=False, positive=False, precompute=False, random_state=None,selection='cyclic', tol=0.0001, warm_start=False)\r\nElasticNet(alpha=1.0, copy_X=True, fit_intercept=True, l1_ratio=0.5,max_iter=1000, normalize=False, positive=False, precompute=False,random_state=None, selection='cyclic', tol=0.0001, warm_start=False)\r\nBayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False, copy_X=True,fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06, n_iter=300,normalize=False, tol=0.001, verbose=False)\r\nOrthogonalMatchingPursuit(fit_intercept=True, n_nonzero_coefs=None,normalize=True, precompute='auto', tol=None)\r\n\"\"\"\r\n\r\npos = np.arange(errvals.shape[0])\r\nsrt = np.argsort(errvals)\r\nplt.figure(figsize=(7,5))\r\nplt.bar(pos, errvals[srt], align='center')\r\nplt.xticks(pos, ests_labels[srt])\r\nplt.xlabel('Estimator')\r\nplt.ylabel('Median Absolute Error')\r\n\r\nplt.show()\r\n\r\n\r\n\r\n\"\"\"\r\nLooking at the error from each of these six estimators, they appear to be roughly the same with most of the estimators being able \r\nto predict the price with a median error around 16-20 dollars, with BayesRidge coming out on top by a small margin. \r\nHaving the results be this close isn't surprising, because we haven't done any tuning. \r\nThe results give us a good general idea of where we stand with the individual estimators.\r\n\r\nNext we'll try an ensemble method to see if we can get better results. \r\nThe upside is that we will likely get a better score, and the downside is that the ensemble methods have a \r\nbewildering variety of hyperparameters that must be tuned, each of which can affect our model \r\nand requires some experimentation to get right. \r\nThe common way to approach this is to use an exhaustive \"grid search\" which simply tries all the supplied parameter \r\ncombinations and uses cross-validation folding to find the best one. \r\nScikit-learn provides the very handy GridSearchCV function for this purpose.\r\nThe tradeoff of using GridSearchCV is that the exhaustive search and cross-validation \r\ncan take a lot of CPU and time. This is where we can use Spark to distribute the search \r\nto more machines and cores, enabling us to test more combinations faster.\r\n\r\nFor our first attempt, we'll limit the number of parameters just so we can get the results \r\nback quickly and see if we're doing better than any of the individual methods above.\r\n\"\"\"\r\n\r\nn_est = 300\r\n\r\ntuned_parameters = {\"n_estimators\": [ n_est ],\"max_depth\" : [ 4 ],\"learning_rate\": [ 0.01 ],\"min_samples_split\" : [ 1 ],\"loss\" : [ 'ls', 'lad' ]}\r\n\r\ngbr = ensemble.GradientBoostingRegressor()\r\nclf = GridSearchCV(gbr, cv=3, param_grid=tuned_parameters,scoring='median_absolute_error')\r\npreds = clf.fit(X_train, y_train)\r\nbest = clf.best_estimator_\r\n\r\nbest\r\n\r\n\r\n\r\n\"\"\"\r\nMY BEST\r\nGradientBoostingRegressor(alpha=0.9, init=None, learning_rate=0.01,\r\n loss='lad', max_depth=4, max_features=None,\r\n max_leaf_nodes=None, min_samples_leaf=1, min_samples_split=1,\r\n min_weight_fraction_leaf=0.0, n_estimators=300,\r\n presort='auto', random_state=None, subsample=1.0, verbose=0,\r\n warm_start=False)\r\n\r\nEL DEL BLOG \r\nGradientBoostingRegressor(alpha=0.9, init=None, learning_rate=0.01,loss='lad', max_depth=4, max_features=None,max_leaf_nodes=None, \r\nmin_samples_leaf=1, min_samples_split=1,min_weight_fraction_leaf=0.0, n_estimators=300,presort='auto', random_state=None,\r\nsubsample=1.0, verbose=0,warm_start=False)\r\n\"\"\"\r\n\r\n\r\nabs(clf.best_score_)\r\n\"\"\"\r\n\r\n# 15.803574251936618\r\n\r\nThe result of this attempt is a median error of $15.80.\r\n\r\nAlready, it looks like we're doing better with GradientBoostingRegressor than we were with any of the prior attempts. \r\nWithout doing any tuning, the median error is around 20% less than the best error of the previous group (with BayesRidge()).\r\n\r\nLet's get a quick measure of how the error is affected by each round of boosting, so we can see if throwing \r\nmore iterations at the problem is going to help.\r\n\r\n\"\"\"\r\n# plot error for each round of boosting\r\ntest_score = np.zeros(n_est, dtype=np.float64)\r\n\r\ntrain_score = best.train_score_\r\n\r\nfor i, y_pred in enumerate(best.staged_predict(X_test)):\r\n test_score[i] = best.loss_(y_test, y_pred)\r\n\r\n\r\nplt.figure(figsize=(12, 6))\r\nplt.subplot(1, 2, 1)\r\nplt.plot(np.arange(n_est), train_score, 'darkblue', label='Training Set Error')\r\nplt.plot(np.arange(n_est), test_score, 'red', label='Test Set Error')\r\nplt.legend(loc='upper right')\r\nplt.xlabel('Boosting Iterations')\r\nplt.ylabel('Least Absolute Deviation')\r\n\r\nplt.show()\r\n\r\n\r\n\"\"\"\r\nIt looks like the curve is flattening near the right side of the plot (after around 200-250 iterations), \r\nbut is still benefitting from more iterations, so we could increase the iterations to 500 without much thought.\r\n\r\nNow we can do some tuning with GridSearchCV and explore more of the hyperparameter combinations. \r\nHowever, this requires some serious CPU and a complete run of all of the combinations can easily take hours to finish, even on a small number of rows.\r\n\r\n### To do the next step, please install spark_sklearn package from Databricks\r\nBy simply replacing the following lines in our file, we can use the new spark-sklearn integration package. \r\nThis allows us to test more hyperparameter combinations, ultimately reducing error, and we can do it all in less time. This will perform more excellently depending on the size of your cluster\r\n\"\"\"\r\n\r\n\"\"\" \r\n*****************************\r\nSPARK\r\n\r\nfrom pyspark import SparkContext, SparkConf\r\nfrom spark_sklearn import GridSearchCV\r\n\r\nconf = SparkConf()\r\nsc = SparkContext(conf=conf)\r\n\r\nclf = GridSearchCV(sc, gbr, cv=3, param_grid=tuned_parameters, scoring='median_absolute_error')\r\n*****************************\r\nEND SPARK\r\n\"\"\"\r\n\r\n\r\n\r\n\"\"\"\r\nFinally let's look at the feature importances to see which features were most influential \r\nin predicting the listing price. This will show us a relative scoring of how important \r\neach feature is relative to the feature with the most importance.\r\n\"\"\"\r\n\r\nfeature_importance = clf.best_estimator_.feature_importances_\r\n# make importances relative to max importance\r\nfeature_importance = 100.0 * (feature_importance / feature_importance.max())\r\nsorted_idx = np.argsort(feature_importance)\r\npos = np.arange(sorted_idx.shape[0]) + .5\r\npvals = feature_importance[sorted_idx]\r\npcols = X_train.columns[sorted_idx]\r\nplt.figure(figsize=(8,12))\r\nplt.barh(pos, pvals, align='center')\r\nplt.yticks(pos, pcols)\r\nplt.xlabel('Relative Importance')\r\nplt.title('Variable Importance')\r\n\r\n\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nClearly some of the variables have more influence than others, and the results here are somewhat intuitive. The most influential feature is the 'Entire home/apt' attribute; this indicates whether or not the unit is shared with other people, and has the most effect in setting the price. We would probably expect this feature to be high on the list, and it might be even higher o Reviews are important, as is the short-term availability of the unit, and the Centrum-West and Centrum-Oost neighborhood features also scored high.\r\n\r\nYou could apply **PCA** to reduce the feature vectors and possibly have better predictions\r\n\r\n### Conclusions\r\nIn this example we looked at how to predict the price using multiple factors, \r\nthen scale-out our cross validation and hyperparameter search across a \r\nMapR cluster using Spark. We learned the following key points:\r\nIn this case, the ensemble method we tried (GradientBoostingRegressor)\r\n had better results than any individual estimator.\r\n\r\nWith GridSearchCV, we tested more hyperparameter combinations that ultimately led us to a better result.\r\n\r\nUsing [a spark-sklearn](https://github.com/databricks/spark-sklearn) is a straightforward way \r\nto throw more CPU at any machine learning problem you might have. We used the package to reduce \r\nthe time spent searching and reduce the error for our estimator.\r\n\r\nThere are many ways in which our first-pass analysis could be improved. \r\nFor example, we might start including more of the data from Inside Airbnb. \r\nWe could do sentiment analysis on the text of the reviews, \r\nbringing this in as an additional feature.\r\n\r\n\r\n\"\"\"","sub_path":"Predicting Airbnb Listing with Spark and Scikit-Learn_git.py","file_name":"Predicting Airbnb Listing with Spark and Scikit-Learn_git.py","file_ext":"py","file_size_in_byte":20257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"276541384","text":"#!/usr/bin/env python3\n\"\"\"Pong game\n\nGraphics game for two players. Every player controls the paddle on their side,\nand tries to hit the ball over to the opponent's side.\n\nControl:\nPlayer 1: Keys W and S\nPlayer 2: Arrows Up and Down\nEnd: Esc\n\n\nThe game uses the Pyglet gravitational library, which is Python's superstructure over OpenGL.\n\nThe Coordinate System is oriented as follows:\n\n\n y ^\n |\n HEIGHT +---------------------------------------+\n | : |\n | : |\n | : |\n | ; [] |\n |] ; [|\n |] ; [|\n |] ; [|\n |] ; [|\n | ; |\n | : |\n | ; |\n | ; |\n 0 +---------------------------------------+------> x\n : : :\n 0 WIDTH/2 WIDTH\n\nBe careful if you have experience with some graphics programs or 2D\nlibraries. OpenGL uses the mathematical coordinate system, zero is *bottom left*.\n\n\"\"\"\n\n# The first line (#!/usr/bin/env python3) is the so-called \"shebang\": In\n# Unix-based operating systems (Linux, OS X) this line enables you to run this file simply by using the command: ./pong.py\n\n# And now to the game itself: first you have to import the necessary methods from the pyglet library\n\nimport random\n\nimport pyglet\nfrom pyglet import gl\nfrom pyglet.window import key\n\n\n# Some constants:\n\n# Window size (in pixels)\nWIDTH = 900\nHEIGHT = 600\n\nBALL_SIZE = 20\nBAT_THICKNESS = 10\nBAT_LENGTH = 100\nSPEED = 200 # pixels per second\nBAT_SPEED = SPEED * 1.5 # also pixels per second\n\nNET_LENGTH = 20\nFONT_SIZE = 42\nTEXT_ALIGN = 30\n\n# We will remember the state of the game in global variables.\n# A professional programmer would get mad but it is simpler for us.\n# Just don't forget that a command like:\n# ball_coordinates = [0, 0]\n# in a function will create only a local variable, which wouldn't have\n# anything in common with the global `ball_coordinates` variable.\n# And a command like:\n# ball_coordinates[0] = 0\n# sets the first element in the global variable `ball_coordinates`.\n\nbat_coordinates = [HEIGHT // 2, HEIGHT // 2] # vertical position of two paddles\nball_coordinates = [0, 0] # x, y ball coordinates -- set in reset()\nball_speed = [0, 0] # x, y components of ball speed -- set in reset()\nkeys_pressed = set() # set of pressed keys\nscore = [0, 0] # score for 2 players\n\n# The position of the paddles and the ball are always determined by the center of the rectangle.\n\n\ndef reset():\n \"\"\"set initial state\n\n This function will be called in the beginning and also after any of\n the players loses the game.\n The function places the ball into the centre of the window and will give\n it a random speed.\n\n We are not resetting score and paddle position here, those will stay until the next round.\n \"\"\"\n ball_coordinates[0] = WIDTH // 2\n ball_coordinates[1] = HEIGHT // 2\n\n # x speed - right or left\n if random.randint(0, 1):\n ball_speed[0] = SPEED\n else:\n ball_speed[0] = -SPEED\n # y speed - completely random\n ball_speed[1] = random.uniform(-1, 1) * SPEED\n\n\ndef revive(dt):\n \"\"\"Calculate new game state\n\n This function is called many times per second. It will get the time\n in seconds that has passed since the last call in the `dt` argument.\n The computer is very quick, so the number will usually be small.\n \"\"\"\n # As we know from physics, a ball with speed `v` moves in time `t` for\n # `v*t` length.\n # We can expand this expression for x and y components.\n ball_coordinates[0] += ball_speed[0] * dt\n ball_coordinates[1] += ball_speed[1] * dt\n\n # Bounce from the bottom edge:\n # When the ball is too \"low\" it should bounce back and start to move up.\n # That means that it will have y speed component above 0. \n # The x component won't change.\n if ball_coordinates[1] < BALL_SIZE // 2:\n ball_speed[1] = abs(ball_speed[1])\n\n # Bounce from the top edge:\n # The same, but the ball is too high and it has to move down.\n if ball_coordinates[1] > HEIGHT - BALL_SIZE // 2:\n ball_speed[1] = -abs(ball_speed[1])\n\n # Paddle movement - the loop has to run twice - once for each paddle.\n for bat_number in (0, 1):\n # movement according to pressed keys (function `key_press`)\n if ('up', bat_number) in keys_pressed:\n bat_coordinates[bat_number] += BAT_SPEED * dt\n if ('down', bat_number) in keys_pressed:\n bat_coordinates[bat_number] -= BAT_SPEED * dt\n\n # bottom stop - when the paddle reaches the bottom edge we set it to the minimum\n if bat_coordinates[bat_number] < BAT_LENGTH / 2:\n bat_coordinates[bat_number] = BAT_LENGTH / 2\n # top stop - when the paddle reaches the top edge we set it to the maximum\n if bat_coordinates[bat_number] > HEIGHT - BAT_LENGTH / 2:\n bat_coordinates[bat_number] = HEIGHT - BAT_LENGTH / 2\n\n # Ball bounce:\n # If the ball is too far left, it can bounce off the left paddle.\n # If the ball doesn't touch the paddle, then player on the left side loses. \n # It's similar for the right side.\n # I recommend to draw it on a piece of paper :)\n\n # First I will write down the minimal and maximal position where the paddle has to be\n # (centre of the bat) to bounce back the ball\n bat_min = ball_coordinates[1] - BALL_SIZE/2 - BAT_LENGTH/2\n bat_max = ball_coordinates[1] + BALL_SIZE/2 + BAT_LENGTH/2\n\n # bounce to the left\n if ball_coordinates[0] < BAT_THICKNESS + BALL_SIZE / 2:\n if bat_min < bat_coordinates[0] < bat_max:\n # paddle is at the right spot, we can bounce the ball back\n ball_speed[0] = abs(ball_speed[0])\n else:\n # paddle is not in the right spot, the player loses\n score[1] += 1\n reset()\n\n # bounce to the right\n if ball_coordinates[0] > WIDTH - (BAT_THICKNESS + BALL_SIZE / 2):\n if bat_min < bat_coordinates[1] < bat_max:\n ball_speed[0] = -abs(ball_speed[0])\n else:\n score[0] += 1\n reset()\n\n\ndef draw_rectangle(x1, y1, x2, y2):\n \"\"\"Draw the rectangle at the given coordinates\n\n What it should look like:\n\n y2 - +-----+\n |/////|\n y1 - +-----+\n : :\n x1 x2\n \"\"\"\n # I am calling OpenGL here which is the easiest to use for us at the moment.\n gl.glBegin(gl.GL_TRIANGLE_FAN) # draw connected triangles\n gl.glVertex2f(int(x1), int(y1)) # coordinate A\n gl.glVertex2f(int(x1), int(y2)) # coordinate B\n gl.glVertex2f(int(x2), int(y2)) # coordinate C, draw triangle ABC\n gl.glVertex2f(int(x2), int(y1)) # coordinate D, draw triangle BCD\n # another coordinate E would draw the triangle CDE, and so on...\n gl.glEnd() # stop drawing the triangles\n\n\ndef draw_text(text, x, y, x_position):\n \"\"\"Draw te given text at the given coordinates.\n\n Argument `x_position` can be \"left\" or \"right\" - sets where the text will be aligned\n \"\"\"\n # Pyglet can print text and we will create the object \"write\" and then we will draw it.\n # (Usually we would create this object once and then we would just change its text and\n # redraw it but we will do it this way cause it's easier)\n write = pyglet.text.Label(\n text,\n font_name='League Gothic',\n font_size=FONT_SIZE,\n x=x, y=y, anchor_x=x_position)\n write.draw()\n\n\ndef render():\n \"\"\"Render(draw) state of the game\"\"\"\n gl.glClear(gl.GL_COLOR_BUFFER_BIT) # clear the window (paint the window black)\n gl.glColor3f(1, 1, 1) # set the paint to white\n\n # ball\n draw_rectangle(\n ball_coordinates[0] - BALL_SIZE // 2,\n ball_coordinates[1] - BALL_SIZE // 2,\n ball_coordinates[0] + BALL_SIZE // 2,\n ball_coordinates[1] + BALL_SIZE // 2)\n\n # bats - we will create list of paddle coordinates and for each pair of coordinates\n # in this list we will draw the paddle.\n for x, y in [(0, bat_coordinates[0]), (WIDTH, bat_coordinates[1])] :\n draw_rectangle(\n x - BAT_THICKNESS,\n y - BAT_LENGTH // 2,\n x + BAT_THICKNESS,\n y + BAT_LENGTH // 2)\n\n # dashed line (as net) - composed from a couple of small rectangles.\n for y in range(NET_LENGTH // 2, HEIGHT, NET_LENGTH * 2):\n draw_rectangle(\n WIDTH // 2 - 1,\n y,\n WIDTH // 2 + 1,\n y + NET_LENGTH)\n\n # And finally we will draw the score of both players\n draw_text(str(score[0]),\n x=TEXT_ALIGN,\n y=HEIGHT - TEXT_ALIGN - FONT_SIZE,\n x_position='left')\n\n draw_text(str(score[1]),\n x=WIDTH - TEXT_ALIGN,\n y=HEIGHT - TEXT_ALIGN - FONT_SIZE,\n x_position='right')\n\n\ndef key_press(symbol, modifiers):\n \"\"\"Handles key press\n\n When the player presses a key, we add a tuple (direction, paddle number) to the `keys_pressed` set.\n So the program can move with the paddles according to what's in the set.\n \"\"\"\n if symbol == key.W:\n keys_pressed.add(('up', 0))\n if symbol == key.S:\n keys_pressed.add(('down', 0))\n if symbol == key.UP:\n keys_pressed.add(('up', 1))\n if symbol == key.DOWN:\n keys_pressed.add(('down', 1))\n # Pyglet handles ESC key by itself: it closes the window and exits the run() function\n\n\ndef key_release(symbol, modifiers):\n \"\"\"Handles when key is released.\n\n The opposite to the `key_press` function -- regarding the arguments.\n It will remove the tuple of direction and paddle number from the set.\n \"\"\"\n # Notice the usage of function `discard`: unlike `remove` it won't\n # raise an error when the element is not in the set. So the program\n # won't end when the user presses a key elsewhere, and then switches back to our\n # window and release the key there.\n if symbol == key.W:\n keys_pressed.discard(('up', 0))\n if symbol == key.S:\n keys_pressed.discard(('down', 0))\n if symbol == key.UP:\n keys_pressed.discard(('up', 1))\n if symbol == key.DOWN:\n keys_pressed.discard(('down', 1))\n\n # By the way, the functions key_release and key_press could be simplified by\n # using dictionaries. Will you try that?\n\n# We will set the initial state.\nreset()\n\n# We will create the window where we will draw.\nwindow = pyglet.window.Window(width=WIDTH, height=HEIGHT)\n\n# We will add some functions to the window which will react to some events.\n# For example when the user presses a key, Pyglet will call\n# a function that we will register with `on_key_press`, and it will pass a specific\n# key to the function. You can find the list of all the events that can happen\n# and what exactly Pyglet passes as an argument in the Pyglet documentation or with the\n# `help(pyglet.window.event)` function.\nwindow.push_handlers(\n on_draw=render, # for drawing into the window use the function `render`\n on_key_press=key_press, # when a key is pressed call the function `key_press`\n on_key_release=key_release, # when a key is released call `key_release`\n )\n\n# We also have another function, but we don't want to assign it to any window event.\n# We want to call it everytime the clock \"ticks\".\npyglet.clock.schedule(revive)\n\npyglet.app.run() # everything is set, let the game begin\n# Function run() will be calling revive in a loop, render the screen, and if some event occurs, it will also\n# call the function we assign to the event.\n","sub_path":"lessons/beginners-en/pyglet/static/pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":11973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"274296479","text":"N = int(input())\nA = list(map(int, input().split()))\n\nsA = sorted(A, reverse=True)\n\n# 最大を1つ, それ以外は2つずつ大きいものから順に選べる, 奇数の時は最後は1つ\nif N % 2 == 0:\n res = sum(sA[:N//2]) * 2 - sA[0]\nelse:\n res = sum(sA[:N//2]) * 2 - sA[0] + sA[N//2]\n\nprint(res)\n","sub_path":"abc/173/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"389407771","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport urllib3\nimport time\nimport random\n\n#Casas Bahia possui certificado de proteção para scrap\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\nuser_agent = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'}\n\nclass Get(object):\n\n _qnt_dados_atual = 0\n\n #Produz dados para extração\n @staticmethod\n def __iniciador__ (url, sleep = 0.5):\n\n time.sleep(abs(sleep + random.randint(-5, 5)/20))\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n user_agent = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'}\n http = urllib3.PoolManager(10, headers=user_agent)\n conteudo= http.request('GET', url)\n soup = BeautifulSoup(conteudo.data.decode('utf-8'), 'html.parser')\n return soup\n\n def __PropriedadesCod__(self, soup, qntDados):\n var = self.__controlador__(self, soup, qntDados)\n return var\n\n #Método gerenciador e retorna uma lista de dicionários de produtos\n @staticmethod\n def __controlador__ (self, soup, qntDados):\n prods = list()\n x = self.__class__._qnt_dados_atual\n while x < qntDados:\n links = self.__linksProds__(self, soup, qntDados)\n for i in links:\n prods.append(self.__informacoes__(i))\n x += self.__class__._qnt_dados_atual\n url2 = self.__proxPag__(soup)\n print(url2)\n print(\"\\n\")\n soup = self.__iniciador__(url2)\n return prods\n\n #Pega todos links de notebooks da página atual\n @staticmethod\n def __linksProds__ (self, soup, qntDados) :\n resUrl = []\n for div in soup.findAll('div', {'class':'cont-product'}):\n if (self.__class__._qnt_dados_atual < qntDados):\n a = div.find('a')\n resUrl.append(a.attrs['href'])\n self.__class__._qnt_dados_atual += 1\n # for i in resUrl:\n # print(i)\n return resUrl\n\n #Pega o link da proxima página\n @staticmethod\n def __proxPag__ (soup):\n encontrou = False\n link = str()\n point = ''\n compare = \"
  • \"\n for ul in soup.findAll('ul', {'class':'ListaPaginas'}):\n for li in ul.find_all('li'):\n\n # Se a string compare está contida em str(li)\n if ( compare in str(li) ):\n point = 'Ready'\n print(\"here\")\n if (li.find('a') and ((encontrou == False) and (point =='Ready')) ):\n encontrou = True\n a = li.find('a')\n link = str(a.attrs['href'])\n return link\n\n\n #Pega todos os dados da página do produto:\n @staticmethod\n def __informacoes__ (url, sleep = 0.5):\n\n time.sleep(abs(sleep + random.randint(-5, 5)/20))\n http = urllib3.PoolManager(10, headers=user_agent)\n conteudo_prod = http.request('GET', url)\n\n soup_prod = BeautifulSoup(conteudo_prod.data.decode('utf-8'), 'html.parser')\n b = soup_prod.find_all(\"b\")\n dt = soup_prod.find_all(\"dt\", )\n dd = soup_prod.find_all(\"dd\")\n info = dict()\n info['title'] = b[0].get_text().encode('utf8')\n\n #get_text(strip=True, separator=' '): remove espaços inúteis\n # print(\"tam dd:\", len(dd))\n # for i in range(0, len(dd)):\n # print(dd[i].get_text(strip=True, separator=' '))\n\n # print(\"\\n \\n\")\n\n # print(\"tam dt:\", len(dt))\n # for j in range(0, len(dt)):\n # print(dt[j].get_text(strip=True, separator=' '))\n\n for i in range(0, len(dt)):\n info[dt[i].get_text(strip=True, separator=' ').encode('utf8')] = dd[i].get_text(strip=True, separator=' ').encode('utf8')\n #info['description'] = p[0].get_text().encode('utf8')\n return info\n\n @classmethod\n def all(cls):\n return cls.objects","sub_path":"1-Anotacao/ws_tablet/Bahia/fileApp.py","file_name":"fileApp.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"78933448","text":"from blinker import Namespace\n\n_signals = Namespace()\n\nenrollment_linked = _signals.signal('enrollment_linked')\n\"\"\"Notifies receivers that enrollment has been linked to an invenio user\n\n:param enrollment: the linked enrollment\n\"\"\"\n\nenrollment_created = _signals.signal('enrollment_created')\n\"\"\"Notifies receivers that enrollment has been created.\n\n:param enrollment: the linked enrollment\n:return True if no notification should be sent\n\"\"\"\n\nenrollment_accepted = _signals.signal('enrollment_accepted')\n\"\"\"Notifies receivers that enrollment has been accepted (when accept is required)\n\n:param enrollment: the accepted enrollment\n\"\"\"\n\nenrollment_rejected = _signals.signal('enrollment_rejected')\n\"\"\"Notifies receivers that enrollment has been rejected (when accept is required)\n\n:param enrollment: the rejected enrollment\n\"\"\"\n\nenrollment_successful = _signals.signal('enrollment_successful')\n\"\"\"Notifies receivers that enrollment has been successfully carried out\n\n:param enrollment: the successful enrollment\n\"\"\"\n\nenrollment_failed = _signals.signal('enrollment_handler_failed')\n\"\"\"Notifies receivers that enrollment failed.\n\n:param enrollment: the failed enrollment\n:param exception: failure exception\n\"\"\"\n\nenrollment_duplicit_user = _signals.signal('enrollment_duplicit_user')\n\"\"\"Notifies receivers that the same enrollment is used by two different accounts.\n\n:param enrollment: the failed enrollment\n:param impostor: the second user that wants to use the enrollment\n\"\"\"\n\nenrollment_revoked = _signals.signal('enrollment_revoked')\n\"\"\"Notifies receivers that enrollment has been successfully revoked\n\n:param enrollment: the revoked enrollment\n\"\"\"\n\nrevocation_failed = _signals.signal('revocation_failed')\n\"\"\"Notifies receivers that revocation failed.\n\n:param enrollment: the failed enrollment\n:param exception: failure exception\n\"\"\"\n","sub_path":"oarepo_enrollments/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"422360398","text":"def fizzBuzz(n):\n for x in range(n+1):\n if x == 0:\n continue\n elif (x % 3 == 0 and x % 5 == 0):\n print('FizzBuzz')\n elif ( x % 3 == 0):\n print('Fizz')\n elif (x % 5 ==0):\n print('Buzz')\n else:\n print(x)\nfizzBuzz(15)","sub_path":"hack_rank_test.py","file_name":"hack_rank_test.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"618444699","text":"from app.klient import dodanie\nfrom app import db\nfrom app.klient.models import Zamowienia\nfrom flask import render_template\nfrom flask import request,flash\n\n@dodanie.route('/')\ndef display_all():\n\tzamowienia=Zamowienia.query.all()\n\treturn render_template('dodaj.html',zamowienia=zamowienia)\n\n\n\n@dodanie.route('/produkt',methods=['GET','POST'])\ndef display_news():\n\t# try:\n\tif request.method == \"POST\":\n\t\tfirma = request.form['firma']\n\t\tprodukt1 = request.form['produkt1']\n\t\tprodukt2 = request.form['produkt2']\n\t\tZamowienia.create_zamowienie(firma=firma,produkt1=produkt1,produkt2=produkt2)\n\tzamowienia=Zamowienia.query.all()\n\treturn render_template('dodaj.html',zamowienia=zamowienia)\n","sub_path":"app/klient/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"418232155","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 28 17:25:09 2021\n\n@author: asche\n\"\"\"\nfrom scipy.integrate import odeint\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nt = np.linspace(0, 100, 1000)\n\nE=.01\nw=0\ndx = 5\ndy=0\nr = dx\n\nx0 = 0\ny0 = 1\n\nU1=-np.exp(-((x0+dx)**2+(y0+dy)**2));\nU2=-np.exp(-((x0-dx)**2+(y0+dy)**2));\nU = U1+U2\n\nv = np.sqrt(np.abs(2*(E-U))) \n\ntheta = np.arctan2(y0,x0);\n\nvx0 = v*np.cos(theta);\nvy0 = v*np.sin(theta);\n\nvx0 = v\nvy0=0\n\ny0 = [vx0, x0, vy0, y0] \n\n# Equations of Motion \ndef gauss2pot(y, t, w,r):\n y1, y2, y3, y4 = y\n\n dpx = r*(np.cos(2*np.pi*w*t))\n dpy = r*(np.sin(2*np.pi*w*t)); \n \n Fy1 = -2*(y4+dpy)*np.exp(-((y2+dpx)**2+(y4+dpy)**2)) \n Fx1 = -2*(y2+dpx)*np.exp(-((y2+dpx)**2+(y4+dpy)**2))\n Fy2 = -2*(y4-dpy)*np.exp(-((y2-dpx)**2+(y4-dpy)**2)) \n Fx2 = -2*(y2-dpx)*np.exp(-((y2-dpx)**2+(y4-dpy)**2))\n\n dp1 = Fx1+Fx2\n dp2 = Fy1+Fy2\n \n dydt = [dp1,y1,dp2,y3]\n return dydt\n\n\n\nsol = odeint(gauss2pot, y0, t,args=(w,r))\n\nplt.plot(sol[:,1],sol[:,3], 'b', label='theta(t)')\n","sub_path":"trajectory2gauss.py","file_name":"trajectory2gauss.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"650939049","text":"\"\"\"\r\nPopulate database with all the data\r\n\"\"\"\r\n\r\nimport sqlite3\r\n\r\n# Create database\r\nconn = sqlite3.connect('sample.db')\r\nc = conn.cursor()\r\n\r\n# Create table\r\nc.execute(\"CREATE TABLE stocks (date TEXT, price REAL, volume INTEGER, name TEXT, code TEXT)\")\r\n\r\n# Opens data file and writes formatted data into data list\r\nwith open('data') as f:\r\n raw = (i.strip() for i in f.readlines())\r\n data = []\r\n for line in raw:\r\n date, price, volume, name, code = line.strip().split(',')\r\n price = float(price)\r\n volume = int(volume)\r\n\r\n data.append((date, price, volume, name, code))\r\n\r\n# Writes data into SQLite database\r\nc.executemany(\"INSERT INTO stocks VALUES (?, ?, ?, ?, ?)\", data)\r\n\r\n# Commit changes\r\nconn.commit()\r\n\r\n# Close Off\r\nc.close()\r\nconn.close()\r\nprint(\"All done.. \")\r\n","sub_path":"FlaskWebApp/populate_database.py","file_name":"populate_database.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"29926164","text":"\r\n#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n \r\n# Módulos\r\nimport sys, pygame, time, random\r\nfrom pygame.locals import *\r\n\r\n# Constantes\r\nANCHO = 1080\r\nALTO = 600\r\n\r\n#Funciones\r\n# ---------------------------------------------------------------------\r\n\r\ndef texto(texto, posx, posy, color=(255, 255, 255), tam=20):\r\n\tfuente = pygame.font.Font(\"images/BLADRMF_.ttf\", tam)\r\n\tsalida = pygame.font.Font.render(fuente, texto, 1, color)\r\n\tsalida_rect = salida.get_rect()\r\n\tsalida_rect.centerx = posx\r\n\tsalida_rect.centery = posy\r\n\treturn salida, salida_rect\r\n\r\n \r\n#Clases\r\n# ---------------------------------------------------------------------\r\nclass Snake(object):\r\n \r\n def __init__(self):\r\n self.image = pygame.image.load('images/serpiente/CabezaDerecha.png')\r\n self.image2 = pygame.image.load('images/serpiente/CuerpoH.png')\r\n self.image3 = pygame.image.load('images/serpiente/ColaDerecha.png')\r\n \r\n self.image7 = pygame.image.load('images/serpiente/CuerpoV.png')\r\n \r\n self.image4 = pygame.image.load('images/serpiente/CabezaArriba.png')\r\n self.image5 = pygame.image.load('images/serpiente/CabezaIzquierda.png')\r\n self.image6 = pygame.image.load('images/serpiente/CabezaAbajo.png')\r\n \r\n self.image9 = pygame.image.load('images/serpiente/ColaDerecha.png')\r\n self.image10 = pygame.image.load('images/serpiente/ColaIzquierda.png')\r\n self.image11 = pygame.image.load('images/serpiente/ColaAbajo.png')\r\n self.image12 = pygame.image.load('images/serpiente/ColaArriba.png')\r\n \r\n self.image13 = pygame.image.load('images/serpiente/izqabajo.png')\r\n self.image14 = pygame.image.load('images/serpiente/derabajo.png')\r\n self.image15 = pygame.image.load('images/serpiente/izqarriba.png')\r\n self.image16 = pygame.image.load('images/serpiente/derarriba.png')\r\n \r\n\r\n \r\n\r\n \r\n # posicion\r\n self.rect = self.image.get_rect()\r\n\r\n \r\n self.timetoreload = 0\r\n \r\n self.body=[[540,360],[540-self.rect.w,360],[540-self.rect.w*2,360]]\r\n self.bodyDireccion=[\"\",\"\",\"\"]\r\n self.direccion = \"\"\r\n self.handle_keys()\r\n\r\n ###### \r\n\r\n def handle_keys(self):\r\n \r\n key = pygame.key.get_pressed()\r\n \r\n if key[pygame.K_DOWN]: \r\n if self.direccion !=\"arriba\":\r\n self.direccion = \"abajo\"\r\n elif key[pygame.K_UP]:\r\n if self.direccion !=\"abajo\":\r\n self.direccion = \"arriba\" \r\n\r\n elif key[pygame.K_RIGHT]:\r\n if self.direccion !=\"izquierda\":\r\n self.direccion = \"derecha\" \r\n\r\n elif key[pygame.K_LEFT]: \r\n if self.direccion == \"\":\r\n self.direccion=\"\"\r\n\r\n elif self.direccion !=\"derecha\":\r\n self.direccion = \"izquierda\"\r\n \r\n\r\n\r\n def animate(self):\r\n gotox = self.body[0][0]\r\n gotoy = self.body[0][1]\r\n if self.direccion==\"abajo\":\r\n self.body[0][1] += self.rect.h\r\n elif self.direccion==\"arriba\":\r\n self.body[0][1] -= self.rect.h\r\n elif self.direccion==\"izquierda\":\r\n self.body[0][0] -= self.rect.w\r\n elif self.direccion==\"derecha\":\r\n self.body[0][0] += self.rect.w\r\n# bordes\r\n if self.body[0][0]>ANCHO-30:\r\n self.body[0][0]=0\r\n if self.body[0][0]<0:\r\n self.body[0][0]=ANCHO\r\n\r\n if self.body[0][1]>ALTO-30:\r\n self.body[0][1]=0\r\n if self.body[0][1]<0:\r\n self.body[0][1]=ALTO\r\n \r\n if self.direccion != \"\":\r\n for i in range(1,len(self.body)):\r\n x=self.body[i][0]\r\n y=self.body[i][1]\r\n if xgotox:\r\n self.bodyDireccion[i]=\"izquierda\"\r\n elif y>gotoy:\r\n self.bodyDireccion[i]=\"arriba\"\r\n elif yself.body[len(self.body)-1][0]:\r\n return self.image9\r\n \r\n elif self.body[len(self.body)-2][0]==self.body[len(self.body)-1][0] :\r\n if self.body[len(self.body)-2][1]self.body[len(self.body)-1][1]:\r\n return self.image11\r\n \r\n return self.image2\r\n \r\n def chooseSpriteForBody(self,i,bDireccion):\r\n \r\n if self.body[i+1][1] == self.body[i][1]:\r\n if self.body[i][1]self.body[i-1][1]:\r\n if bDireccion==\"izquierda\":\r\n return self.image15\r\n\r\n elif bDireccion==\"derecha\": \r\n return self.image16\r\n \r\n elif self.body[i][1]==self.body[i-1][1]:\r\n return self.image2\r\n \r\n elif self.body[i][0] == self.body[i+1][0]:\r\n \r\n if self.body[i][0]self.body[i-1][0]:\r\n if bDireccion==\"izquierda\":\r\n return self.image15\r\n\r\n elif bDireccion==\"derecha\": \r\n return self.image16 \r\n elif self.body[i][0]==self.body[i-1][0]:\r\n return self.image7\r\n \r\n\r\n if self.body[i][1] == self.body[i+1][1]:\r\n \r\n if self.body[i][1]self.body[i-1][1]:\r\n if bDireccion==\"arriba\":\r\n return self.image16\r\n\r\n elif bDireccion==\"abajo\": \r\n return self.image15 \r\n elif self.body[i][1]==self.body[i-1][1]:\r\n return self.image2\r\n \r\n elif self.body[i][0] == self.body[i+1][0]:\r\n \r\n if self.body[i][0]self.body[i-1][0]:\r\n if bDireccion==\"arriba\":\r\n return self.image14\r\n\r\n elif bDireccion==\"abajo\": \r\n return self.image16 \r\n elif self.body[i][0]==self.body[i-1][0]:\r\n return self.image7\r\n \r\n return self.image2\r\n \r\n\r\nclass Obstaculo(object):\r\n def __init__(self):\r\n\r\n self.image = pygame.image.load('images/puas.png')\r\n self.rect = self.image.get_rect()\r\n\r\n # self.body=[1020,540]\r\n \r\n\r\n self.rect.x=round(random.randrange(0,ANCHO-178)/178.0)*178.0\r\n self.rect.y=round(random.randrange(0,ALTO-178)/178.0)*178.0\r\n \r\n #self.rect.x = self.body[0]\r\n #self.rect.y = self.body[1]\r\n \r\n def draw(self, surface):\r\n \r\n surface.blit(self.image,(self.rect.x, self.rect.y))\r\n \r\n \r\n \r\n # surface.blit(self.image,(self.x,self.y))\r\n\r\n\r\nclass Bordes(object):\r\n def __init__(self):\r\n\r\n\r\n self.image = pygame.image.load('images/bordesH.png')\r\n self.image2 = pygame.image.load('images/NbordesV1portal.png')\r\n self.image3 = pygame.image.load('images/bordesH2.png')\r\n self.image4 = pygame.image.load('images/NbordesV1portal2.png')\r\n self.image5 = pygame.image.load('images/NbordesV2portal.png')\r\n self.image6 = pygame.image.load('images/NbordesV2portal2.png')\r\n \r\n\r\n self.rect = self.image.get_rect()\r\n self.rect2 = self.image3.get_rect()\r\n self.rect3 = self.image2.get_rect()\r\n self.rect4 = self.image4.get_rect()\r\n self.rect5 = self.image2.get_rect()\r\n self.rect6 = self.image4.get_rect()\r\n \r\n \r\n\r\n \r\n \r\n\r\n \r\n self.body=[[0,0],[0,ALTO-60],[0,60],[ANCHO-60,60],[0,ALTO-210],[ANCHO-60,390]]\r\n\r\n # self.rect.x=random.randrange(0,ANCHO-62)\r\n #self.rect.y=random.randrange(0,ALTO-62)\r\n \r\n \r\n \r\n def draw(self, surface):\r\n \r\n #surface.blit(self.image, (self.rect.x, self.rect.y))\r\n #Horizontales\r\n surface.blit(self.image, (self.body[0][0], self.body[0][1]))\r\n self.rect.x = self.body[0][0]\r\n self.rect.y=self.body[0][1]\r\n surface.blit(self.image3, (self.body[1][0], self.body[1][1]))\r\n self.rect2.x = self.body[1][0]\r\n self.rect2.y=self.body[1][1]\r\n #Verticales\r\n surface.blit(self.image2, (self.body[2][0],self.body[2][1]))\r\n self.rect3.x = self.body[2][0]\r\n self.rect3.y=self.body[2][1]\r\n surface.blit(self.image4, (self.body[4][0],self.body[4][1]))\r\n self.rect5.x = self.body[4][0]\r\n self.rect5.y=self.body[4][1]\r\n\r\n \r\n surface.blit(self.image5, (self.body[3][0], self.body[3][1]))\r\n self.rect4.x = self.body[3][0]\r\n self.rect4.y=self.body[3][1]\r\n surface.blit(self.image6, (self.body[5][0], self.body[5][1]))\r\n self.rect6.x = self.body[5][0]\r\n self.rect6.y=self.body[5][1]\r\n \r\n\r\n##\r\n \r\n # surface.blit(self.image,(self.x,self.y))\r\nclass Manzana(object):\r\n \r\n def __init__(self):\r\n\r\n self.image = pygame.image.load('images/manzana.png')\r\n self.rect = self.image.get_rect()\r\n self.cambio()\r\n\r\n def draw(self,surface):\r\n surface.blit(self.image,(self.x,self.y))\r\n\r\n def cambio(self):\r\n\r\n self.x=round(random.randrange(90,ANCHO-90)/30.0)*30.0\r\n self.y=round(random.randrange(90,ALTO-90)/30.0)*30.0\r\n self.rect.x = self.x\r\n self.rect.y=self.y\r\n\r\nclass Portales(object):\r\n \r\n def __init__(self):\r\n\r\n self.image = pygame.image.load('images/nportal1.png')\r\n self.image2 = pygame.image.load('images/nportal2.png')\r\n \r\n self.rect = self.image.get_rect()\r\n self.rect2 = self.image2.get_rect()\r\n\r\n self.body=[[0,ALTO-390],[ANCHO-60,210]]\r\n\r\n def draw(self,surface):\r\n surface.blit(self.image2,(self.body[0][0], self.body[0][1]))\r\n self.rect.x = self.body[0][0]\r\n self.rect.y=self.body[0][1]\r\n surface.blit(self.image,(self.body[1][0], self.body[1][1]))\r\n self.rect2.x = self.body[1][0]\r\n self.rect2.y=self.body[1][1]\r\n\r\n\r\n \r\n\r\n\r\n \r\n#Main\r\n# ---------------------------------------------------------------------\r\n\r\npygame.init()\r\n\r\nscreen = pygame.display.set_mode((ANCHO,ALTO))\r\npygame.display.set_caption(\"Snake Cyberpunk\")\r\nsnake = Snake()\r\nobstaculo = Obstaculo()\r\nbordes = Bordes()\r\nportales = Portales()\r\nfondo = pygame.image.load('images/fondo.png')\r\nmanzana = Manzana() \r\npygame.mixer.music.load('Musica/musiquita.mp3')\r\npygame.mixer.music.play(3)\r\nrunning = True\r\npygame.mixer.music.load('Musica/musiquita.mp3')\r\nsonidoManzana=pygame.mixer.Sound('Musica/comida.wav')\r\nsonidoMuerte=pygame.mixer.Sound('Musica/muerte.wav')\r\nsonidoPortal=pygame.mixer.Sound('Musica/portal.wav')\r\npygame.mixer.music.play(3)\r\npuntos = 0\r\n\r\n\r\nwhile running:\r\n screen.blit(fondo, (0,0))\r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n running = False\r\n\r\n snake.handle_keys()\r\n snake.animate()\r\n tex_img, tex_rec = texto(\"Puntuacion: \"+str(puntos), 145, 80, (255, 255, 255), 18) \r\n screen.blit(tex_img, tex_rec)\r\n \r\n\r\n##\r\n if snake.rect.colliderect(manzana.rect):\r\n snake.add()\r\n manzana.cambio()\r\n sonidoManzana.play()\r\n puntos+=1\r\n \r\n #elif snake.rect.colliderect(obstaculo.rect):\r\n # sonidoMuerte.play()\r\n # running= False\r\n # pygame.mixer.music.stop()\r\n \r\n if snake.rect.colliderect(bordes.rect):\r\n sonidoMuerte.play()\r\n running= False\r\n pygame.mixer.music.stop()\r\n\r\n elif snake.rect.colliderect(bordes.rect2):\r\n sonidoMuerte.play()\r\n running= False\r\n pygame.mixer.music.stop()\r\n elif snake.rect.colliderect(bordes.rect3):\r\n sonidoMuerte.play()\r\n running= False\r\n pygame.mixer.music.stop()\r\n\r\n elif snake.rect.colliderect(bordes.rect4):\r\n sonidoMuerte.play()\r\n running= False\r\n pygame.mixer.music.stop()\r\n\r\n elif snake.rect.colliderect(bordes.rect5):\r\n sonidoMuerte.play()\r\n running= False\r\n pygame.mixer.music.stop()\r\n\r\n elif snake.rect.colliderect(bordes.rect6):\r\n sonidoMuerte.play()\r\n running= False\r\n pygame.mixer.music.stop()\r\n\r\n elif snake.rect.colliderect(portales.rect):\r\n sonidoPortal.play()\r\n\r\n elif snake.rect.colliderect(portales.rect2):\r\n sonidoPortal.play()\r\n\r\n elif snake.muerte():\r\n sonidoMuerte.play()\r\n running= False\r\n pygame.mixer.music.stop()\r\n\r\n\r\n## elif snake.rect.colliderect(snake.rect8):\r\n## sonidoMuerte.play()\r\n## running= False\r\n## pygame.mixer.music.stop()\r\n## elif snake.rect.colliderect(snake.rect9):\r\n## sonidoMuerte.play()\r\n## running= False\r\n## pygame.mixer.music.stop()\r\n## elif snake.rect.colliderect(snake.rect10):\r\n## sonidoMuerte.play()\r\n## running= False\r\n## pygame.mixer.music.stop()\r\n## elif snake.rect.colliderect(snake.rect11):\r\n## sonidoMuerte.play()\r\n## running= False\r\n## pygame.mixer.music.stop()\r\n\r\n##\r\n\r\n\r\n\r\n \r\n\r\n snake.draw(screen)\r\n manzana.draw(screen)\r\n #obstaculo.draw(screen)\r\n bordes.draw(screen)\r\n portales.draw(screen)\r\n screen.blit(tex_img, tex_rec)\r\n \r\n \r\n \r\n pygame.display.update()\r\n\r\n time.sleep(0.07)\r\npygame.display.quit()\r\n\r\n\r\n","sub_path":"Ciberpunk Snake Final/CyberpunkSnake Level 3.py","file_name":"CyberpunkSnake Level 3.py","file_ext":"py","file_size_in_byte":17452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"457214338","text":"from datetime import timedelta, datetime\nfrom pathlib import Path\n\nimport h5py\n\n\ndef locate_gain_file(file_path, *, verbose=True):\n \"\"\"Locate gain file in default location at swissfel.\n\n The default gain file location is\n `/sf/jungfrau/config/gainMaps//gains.h5``.\n\n Args:\n file_path (str or Path): File path of a jungfrau data file.\n verbose (bool, optional): Print info about located gain file.\n\n Returns:\n str: A path to the located gain file.\n \"\"\"\n file_path = Path(file_path)\n if file_path.parts[1] != \"sf\":\n raise Exception(\"Gain file needs to be specified explicitly.\")\n\n detector_name = _read_detector_name(file_path)\n\n gain_path = Path(\"/sf/jungfrau/config/gainMaps/\")\n gain_file = gain_path.joinpath(detector_name, \"gains.h5\")\n\n if not gain_file.is_file():\n raise Exception(f\"No gain file in the default location: {gain_path}\")\n\n if verbose:\n print(f\"Auto-located gain file: {gain_file}\")\n\n return gain_file.as_posix()\n\n\ndef locate_pedestal_file(file_path, *, verbose=True):\n \"\"\"Locate pedestal file in default location at swissfel.\n\n The default pedestal file paths for a particula p-group are\n ``/sf//data//res/JF_pedestals/`` (old daq)\n or\n ``/sf//data//raw/JF_pedestals/`` (new daq).\n\n Args:\n file_path (str or Path): File path of a jungfrau data file.\n verbose (bool, optional): Print info about located pedestal file.\n\n Returns:\n str: A path to the located pedestal file.\n \"\"\"\n file_path = Path(file_path)\n if file_path.parts[1] != \"sf\":\n raise Exception(f\"Pedestal file needs to be specified explicitly.\")\n\n detector_name = _read_detector_name(file_path)\n\n pedestal_paths = (\n Path(*file_path.parts[:5]).joinpath(\"res\", \"JF_pedestals\"),\n Path(*file_path.parts[:5]).joinpath(\"raw\", \"JF_pedestals\"),\n )\n\n # find a pedestal file, which was created closest in time to the jungfrau file\n jf_file_mtime = file_path.stat().st_mtime\n closest_pedestal_file = \"\"\n min_mtime_diff = float(\"inf\")\n for pedestal_path in pedestal_paths:\n if pedestal_path.exists():\n for entry in pedestal_path.iterdir():\n if entry.is_file() and f\"{detector_name}.res.h5\" in entry.name:\n time_diff = jf_file_mtime - entry.stat().st_mtime\n if abs(time_diff) < abs(min_mtime_diff):\n min_mtime_diff = time_diff\n pedestal_mtime = entry.stat().st_mtime\n closest_pedestal_file = entry\n\n if not closest_pedestal_file:\n raise Exception(f\"No pedestal file found in default locations: {pedestal_paths}\")\n\n if verbose:\n print(f\"Auto-located pedestal file: {closest_pedestal_file}\")\n\n mtime_diff = min_mtime_diff\n if mtime_diff < 0:\n # timedelta doesn't work nicely with negative values\n # https://docs.python.org/3/library/datetime.html#datetime.timedelta.resolution\n tdelta_str = \"-\" + str(timedelta(seconds=-mtime_diff))\n else:\n tdelta_str = str(timedelta(seconds=mtime_diff))\n\n print(f\"jungfrau file: {datetime.fromtimestamp(jf_file_mtime).strftime('%H:%M %d.%m.%Y')}\")\n print(f\"pedestal file: {datetime.fromtimestamp(pedestal_mtime).strftime('%H:%M %d.%m.%Y')}\")\n print(\" mtime difference: \" + tdelta_str)\n\n return closest_pedestal_file.as_posix()\n\n\ndef _read_detector_name(file_path):\n with h5py.File(file_path, \"r\") as h5f:\n detector_name = h5f[\"/general/detector_name\"][()].decode()\n\n return detector_name\n","sub_path":"jungfrau_utils/swissfel_helpers.py","file_name":"swissfel_helpers.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"530204661","text":"from array import array\nimport ROOT,math\n\nx = array('d')\ny = array('d')\nz = array('d')\n\nfor m in range(1,27) :\n mass = str(800+200*m)\n if mass == '2400':\n continue\n if mass == '3400':\n continue\n if mass == '5400':\n continue\n x.append(int(mass))\n f = open('/afs/cern.ch/work/s/skalafut/public/WR_starting2015/limitSetting/700toysAprilTwentyThree/_SHv19/_SHv19ee_'+mass+'_EXPECTED.log')\n ## Systematics OFF\n #f = open('/afs/cern.ch/user/j/jchavesb/work/limits/200_TOYS_2015_ns/_WRv07/_WRv07mumu_'+mass+'_EXPECTED.log')\n f_dc = open('/afs/cern.ch/work/s/skalafut/public/WR_starting2015/forJorge2015BackgroundEstimates/observedLimitDatacards/WRmumujj_MASS'+mass+'.txt')\n #g = open('/afs/cern.ch/user/j/jchavesb/work/limits/200_TOYS_UNC2_ns/_WRv07/_WRv07mumu_'+mass+'_EXPECTED.log')\n g_dc = open('/afs/cern.ch/user/j/jchavesb/CMSSW_8_0_25/src/ExoAnalysis/cmsWR/minlim_dc_new/WRmumujj_MASS'+mass+'.txt')\n\n #f = open('/afs/cern.ch/user/j/jchavesb/work/limits/100_TOYS_2015/_WRv06/_WRv06mumu_'+mass+'_EXPECTED.log')\n #g = open('/afs/cern.ch/user/j/jchavesb/work/limits/100_TOYS/_WRv06/_WRv06ee_'+mass+'_EXPECTED.log')\n ##g = open('/afs/cern.ch/user/j/jchavesb/work/limits/100_TOYS_2016/_WRv06/_WRv06mumu_'+mass+'_EXPECTED.log')\n ## Systematics ON\n #f = open('/afs/cern.ch/user/j/jchavesb/work/limits/100_TOYS_2015_syst/_WRv06/_WRv06mumu_'+mass+'_EXPECTED.log')\n #g = open('/afs/cern.ch/user/j/jchavesb/work/limits/100_TOYS_Punzi_syst/_WRv06/_WRv06mumu_'+mass+'_EXPECTED.log')\n g = open('/afs/cern.ch/user/j/jchavesb/work/limits/200_TOYS_UNC2/_WRv07/_WRv07ee_'+mass+'_EXPECTED.log')\n\n o = 0\n signal_2015 = 0\n signal_2016 = 0\n \n for line in f_dc:\n if 'rate' in line:\n signal_2015 = float(line.split(' ')[2])\n\n for line in g_dc:\n if 'rate' in line:\n signal_2016 = float(line.split(' ')[2])\n\n \n for line in f:\n if 'COMBINE' in line:\n #print mass,line.split('(')[2].replace(',','')\n y.append(float(line.split('(')[2].replace(',','')))\n o = float(line.split('(')[2].replace(',',''))\n for line in g:\n if 'COMBINE' in line:\n #print mass, line.split('(')[2].replace(',','')\n z.append(3.72*float(line.split('(')[2].replace(',',''))/o)\n #z.append(float(line.split('(')[2].replace(',',''))*signal_2016/(o*signal_2015)/3.72)\n\ngr = ROOT.TGraph(23,x,y);\ngr2 = ROOT.TGraph(23,x,z);\n\nmycanvas = ROOT.TCanvas( \"mycanvas\", \"\", 0, 0, 800, 600 ) ;\n\ngr2.GetYaxis().SetTitleOffset(1.45);\n\ngr2.SetTitle(\"Limit ratio;Mass [GeV];2016 limits/2015 limits\")\ngr2.Draw(\"AC*\")\n#gr2.SetLineColor(ROOT.kRed)\nf1 = ROOT.TF1(\"f1\",\"0.07\",0,6000)\nf1 = ROOT.TF1(\"f1\",\"1.0\",0,6000)\nf1.Draw(\"same\")\nf2 = ROOT.TF1(\"f2\",\"0.27\",0,6000)\n#f2.Draw(\"same\")\n\nmg = ROOT.TMultiGraph()\nmg.Add(gr)\nmg.Add(gr2)\n\n#mycanvas.Print('ratio_lim_mumu.pdf')\n\n#mg.Draw(\"AC*\")\n\n","sub_path":"limit_ratio.py","file_name":"limit_ratio.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"650985606","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function, absolute_import\n\nimport base64\n\nfrom django.conf import settings\nfrom django.contrib import auth\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.contrib.auth.middleware import RemoteUserMiddleware\n\n__author__ = 'Matthieu Gallet'\n\n\nclass EasyDjangoMiddleware(RemoteUserMiddleware):\n \"\"\"Like :class:`django.contrib.auth.middleware.RemoteUserMiddleware` but:\n\n * can use any header defined by the setting `EASYDJANGO_REMOTE_USER_HEADER`,\n * handle the HTTP_X_FORWARDED_FOR HTTP header (set the right client IP)\n * handle HTTP basic authentication\n * set response header for Internet Explorer (to use its most recent render engine)\n \"\"\"\n remote_user_header = settings.EASYDJANGO_REMOTE_USER_HEADER\n\n # noinspection PyMethodMayBeStatic\n def process_request(self, request):\n\n if settings.USE_X_FORWARDED_FOR and 'HTTP_X_FORWARDED_FOR' in request.META:\n request.META['REMOTE_ADDR'] = request.META['HTTP_X_FORWARDED_FOR'].split(',')[0].strip()\n\n if settings.USE_HTTP_BASIC_AUTH and 'HTTP_AUTHORIZATION' in request.META:\n authentication = request.META['HTTP_AUTHORIZATION']\n (authmeth, auth_data) = authentication.split(' ', 1)\n if 'basic' == authmeth.lower():\n auth_data = base64.b64decode(auth_data.strip()).decode('utf-8')\n username, password = auth_data.split(':', 1)\n user = auth.authenticate(username=username, password=password)\n if user:\n request.user = user\n auth.login(request, user)\n\n if self.remote_user_header and self.remote_user_header in request.META:\n if not request.user.is_authenticated():\n self.remote_user_authentication(request)\n\n # noinspection PyUnusedLocal,PyMethodMayBeStatic\n def process_template_response(self, request, response):\n response['X-UA-Compatible'] = 'IE=edge,chrome=1'\n return response\n\n def remote_user_authentication(self, request):\n # AuthenticationMiddleware is required so that request.user exists.\n if not hasattr(request, 'user'):\n raise ImproperlyConfigured(\n \"The Django remote user auth middleware requires the\"\n \" authentication middleware to be installed. Edit your\"\n \" MIDDLEWARE_CLASSES setting to insert\"\n \" 'django.contrib.auth.middleware.AuthenticationMiddleware'\"\n \" before the RemoteUserMiddleware class.\")\n username = request.META.get(self.remote_user_header)\n if not username or username == '(null)': # special case due to apache2+auth_mod_kerb :-(\n return\n username, sep, domain = username.partition('@')\n # If the user is already authenticated and that user is the user we are\n # getting passed in the headers, then the correct user is already\n # persisted in the session and we don't need to continue.\n if request.user.is_authenticated():\n if request.user.get_username() == self.clean_username(username, request):\n return\n else:\n # An authenticated user is associated with the request, but\n # it does not match the authorized user in the header.\n self._remove_invalid_user(request)\n\n # We are seeing this user for the first time in this session, attempt\n # to authenticate the user.\n user = auth.authenticate(remote_user=username)\n if user:\n # User is valid. Set request.user and persist user in the session\n # by logging the user in.\n request.user = user\n auth.login(request, user)\n","sub_path":"easydjango/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"318850291","text":"# Events\n# Events are a form of asynchronous communication between Greenlets.\n\n\nimport gevent\nfrom gevent.event import Event\n\n\"\"\"\nIllustrates the use of events\n\"\"\"\n\n\ndef async_communication_through_events():\n evt = Event()\n\n def setter():\n \"\"\"After 3 seconds, wake all threads waiting on the value of evt\"\"\"\n print(\"A: Hey wait for me, I have to do something\")\n gevent.sleep(3)\n print(\"Ok, I'm done\")\n evt.set()\n\n def waiter():\n \"\"\"After 3 seconds the get call will unblock\"\"\"\n print(\"I'll wait for you\")\n evt.wait() # blocking\n print(\"It's about time\")\n\n gevent.joinall(\n [\n gevent.spawn(setter),\n gevent.spawn(waiter),\n gevent.spawn(waiter),\n gevent.spawn(waiter),\n gevent.spawn(waiter),\n gevent.spawn(waiter),\n ]\n )\n\n\n# AsyncResult\n# An extension of the Event object is the AsyncResult which allows you to send a value along with the wakeup call. This is sometimes called a future or a deferred, since it holds a reference to a future value that can be set on an arbitrary time schedule.\n\n\nimport gevent\nfrom gevent.event import AsyncResult\n\na = AsyncResult()\n\n\ndef async_communication_through_async_results():\n a = AsyncResult()\n\n def setter():\n \"\"\"\n After 3 seconds set the result of a.\n \"\"\"\n gevent.sleep(3)\n a.set(\"Hello!\")\n\n def waiter():\n \"\"\"\n After 3 seconds the get call will unblock after the setter\n puts a value into the AsyncResult.\n \"\"\"\n print(a.get())\n\n gevent.joinall(\n [gevent.spawn(setter), gevent.spawn(waiter),]\n )\n\n\n# Queues\n\n# Queues are ordered sets of data that have the usual put / get operations but are written in a way such that they can be safely manipulated across Greenlets.\n\n# For example if one Greenlet grabs an item off of the queue, the same item will not be grabbed by another Greenlet executing simultaneously.\n\nif __name__ == \"__main__\":\n # async_communication_through_events()\n async_communication_through_async_results()\n\n","sub_path":"reference/ref-gevent/sdiehl_tutorial/7_events_asyncresult.py","file_name":"7_events_asyncresult.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"130065358","text":"import sys\nsys.path.append('/UBB/1st Year/FP/Assignment 5-7')\nfrom Domain.Discipline import Discipline\nfrom Domain.Exceptions import DisciplineException\nfrom IterableDataStructure import Repo\nimport unittest\n\n\nclass DisciplineRepository:\n def __init__(self):\n '''\n creates an instance of the DisciplineRepository\n '''\n self.__data = Repo()\n\n def __find(self, ID):\n '''\n returns the index Discipline having the given ID\n Input: ID - an integer, the ID of the discipline that is being searched\n Output: index - if the discipline was found, -1 otherwise\n '''\n for i in range(len(self.__data)):\n if self.__data[i].getID() == ID:\n return i\n return -1\n\n def findBydID(self, ID):\n '''\n returns the index Discipline having the given ID\n Input: ID - an integer, the ID of the discipline that is being searched\n Output: the discipline if it was found, none otherwise\n '''\n indexID = self.__find(ID)\n if indexID == -1:\n return None\n return self.__data[indexID]\n\n def add(self, dis):\n '''\n add a Discipline to the repository\n Input: dis - object of type Discipline\n Output: the given Discipline is added to the repository, if no other Discipline has the same ID\n Exceptions: raises DisciplineException if another Discipline with the same name already exists\n '''\n if self.findBydID(dis.getID()) != None:\n raise DisciplineException(\"Discipline with ID \" + str(dis.getID()) + \" already exists!\")\n self.__data.append(dis)\n\n def update(self, ID, newName):\n '''\n updates a Discipline from the repository, using the given name\n Input - ID, a positive integer denoting the Discipline that must be updated\n - newName, a string which will replace the name of the existing Discipline\n Output - if such a Discipline exists, the name is updated\n Exception - raises Exception if Discipline with given ID does not exist\n '''\n indexID = self.__find(ID)\n if indexID == -1:\n raise DisciplineException(\"There is no discipline with ID \" + str(ID) + \"!\")\n self.__data[indexID].setName(newName)\n\n def remove(self, ID):\n '''\n removes a Discipline from the repository, using its name\n Input: ID, a positive integer denoting the Discipline that must be updated\n Output: if such a Discipline exists, it is removed\n '''\n indexID = self.__find(ID)\n if indexID == -1:\n raise DisciplineException(\"There is no discipline with ID \" + str(ID) + \"!\")\n self.__data.pop(indexID)\n\n def searchStringinName(self, string):\n '''\n searches a string in the name's of the disciplines' repository\n Input: string\n Output: all the Disciplines in the repository that have the string in their names\n '''\n res = []\n for i in range(len(self.__data)):\n Name = self.__data[i].getName()\n Name = Name.upper()\n if Name.find(string.upper()) != -1:\n res.append(self.__data[i])\n return res\n\n def __len__ (self):\n '''\n returns the size of the disciplines list\n '''\n return len(self.__data)\n\n def getAll(self):\n '''\n returns the list of the disciplines list\n '''\n return self.__data\n\nclass testDisRepo(unittest.TestCase):\n\n def testDisciplineRepository(self):\n repo = DisciplineRepository()\n\n d1 = Discipline(1, \"Japanese\")\n d2 = Discipline(1, \"How to draw anime\")\n\n assert len(repo) == 0\n\n repo.add(d1)\n assert len(repo) == 1\n assert repo.findBydID(1) == d1\n\n assert repo.searchStringinName(\"AP\") == [d1]\n assert repo.searchStringinName(\"ta\") == []\n\n try:\n repo.add(d1)\n assert False\n except DisciplineException:\n assert True\n\n try:\n repo.add(d2)\n assert False\n except DisciplineException:\n assert True\n\n d2 = Discipline(2, \"How to draw anime\")\n repo.add(d2)\n assert len(repo) == 2\n assert repo.findBydID(1) == d1\n assert repo.findBydID(2) == d2\n\n repo.update(2, \"Anime\")\n\n repo.remove(1)\n assert len(repo) == 1\n assert repo.findBydID(2) == d2\n assert repo.findBydID(1) == None\n\n try:\n repo.remove(1)\n assert False\n except DisciplineException:\n assert True\n\n repo.remove(2)\n assert len(repo) == 0","sub_path":"Assignment 5-7/Repository/DisciplineRepository.py","file_name":"DisciplineRepository.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"533972356","text":"import unittest\nimport urllib\nimport hashlib\nimport json\nimport multiprocessing\nimport time\nfrom Queue import Queue\nfrom datetime import datetime, timedelta\n\nimport MySQLdb\n\nfrom pulsa import config\nfrom pulsa.common.messaging import XMPPMessaging\nfrom pulsa.comp.BaseComp import BaseComponent\n\n#---Classes--------------------------------------------------------------------\nclass SRTestCase(unittest.TestCase):\n def setUp(self):\n self.bc = BaseComponent()\n self.dbconn = self.bc.dbconn\n self.cacheconn = self.bc.cacheconn\n\n def _resetTable(self, table_name):\n cursor = self.dbconn.cursor()\n sql = 'DELETE FROM `{0}`'.format(table_name)\n cursor.execute(sql)\n sql = 'ALTER TABLE `{0}` AUTO_INCREMENT=1'.format(table_name)\n cursor.execute(sql)\n cursor.close()\n self.dbconn.commit()\n\n\nclass TestAdminHelper(SRTestCase):\n '''Test case admin helper\n A test case that includes testing the admin helper should inherit\n this class'''\n def setUp(self):\n super(TestAdminHelper, self).setUp()\n\n def _callAdminHelper(self, module_name, post_data):\n post_data['pin'] = hashlib.md5('!elogic123').hexdigest()\n url = 'http://192.168.1.110:11811/admin/{0}'.format(module_name)\n resp = urllib.urlopen(url, urllib.urlencode(post_data))\n result = resp.read()\n return json.loads(result)\n\nclass TestCoreComponent(SRTestCase):\n '''Test case core components\n A test case that includes testing the core components should inherit\n this class'''\n def setUp(self):\n super(TestCoreComponent, self).setUp()\n self.xmppconn = None\n self.xmppconn2 = None\n self.process = []\n self.pipes = []\n \n def _initXMPP(self, username, password, resource='UNITTEST'):\n self.msg_queue = Queue()\n self.xmppconn = XMPPMessaging(username, password, config.MSG_SERVER,\n resource, self.msg_queue, prtimeout=1)\n \n def _initXMPP2(self, username, password, resource='UNITTEST'):\n self.msg_queue2 = Queue()\n self.xmppconn2 = XMPPMessaging(username, password, config.MSG_SERVER,\n resource, self.msg_queue2, prtimeout=1)\n\n def _sendCommand(self, to, command, param):\n self.xmppconn.send(to, '{0}={1}'.format(command, param))\n \n def _sendCommand2(self, to, command, param):\n self.xmppconn2.send(to, '{0}={1}'.format(command, param))\n\n def _waitForMessage(self, timeout=3):\n try:\n msg = self.msg_queue.get(True, timeout)\n except:\n return None\n return msg\n \n def _waitForMessage2(self, timeout=3):\n try:\n msg = self.msg_queue2.get(True, timeout)\n except:\n return None\n return msg\n\n def _waitForRow(self, sql, timeout=3):\n timeout = datetime.now() + timedelta(seconds=timeout)\n cursor = self.dbconn.cursor(MySQLdb.cursors.DictCursor)\n while datetime.now() < timeout:\n cursor.execute(sql)\n tmp = cursor.fetchall()\n if not tmp:\n time.sleep(0.1)\n continue\n break\n cursor.close()\n if len(tmp) == 1:\n return tmp[0]\n return tmp\n\n def _createProcess(self, process):\n here, there = multiprocessing.Pipe()\n new_proc = multiprocessing.Process(target=process, args=(there,))\n self.process.append(new_proc)\n self.pipes.append((here, there))\n new_proc.start()\n\n def tearDown(self):\n if self.xmppconn:\n self.xmppconn.stop()\n if self.xmppconn2:\n self.xmppconn2.stop()\n for proc in self.process:\n proc.terminate()\n if self.xmppconn:\n self.xmppconn.join()\n if self.xmppconn2:\n self.xmppconn2.join()\n for proc in self.process:\n proc.join()\n","sub_path":"utest/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"443195194","text":"import time\r\nstartTime = time.clock()\r\nmonths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\r\n#1 is a monday, 7 is a sunday, etc.\r\nweekday = 1\r\nyear = 1901\r\nmonth = 1\r\nmonthDay = 1\r\ntotal = 0\r\nwhile year < 2001:\r\n if weekday == 7 and monthDay == 1:\r\n total += 1\r\n print(\"day\", weekday, \"monthDay\", monthDay, \"month\", month, \"year\", year)\r\n if year % 4 == 0:\r\n months[1] = 29\r\n elif not months[1] == 28:\r\n months[1] = 28\r\n if weekday == 7:\r\n weekday = 1\r\n else:\r\n weekday += 1\r\n if monthDay >= months[month - 1]:\r\n month += 1\r\n monthDay = 1\r\n else:\r\n monthDay += 1\r\n if month > 12:\r\n month = 1\r\n year += 1\r\nprint(\"--- answer: %s ---\" % total)\r\nprint(\"--- %s seconds ---\" % (time.clock() - startTime))\r\n\r\n","sub_path":"Python/11-20/euler_19.py","file_name":"euler_19.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"130317983","text":"import json as json\nimport zmq as zmq\n\nfrom base import SettingsConfig\n\n\nclass Comms:\n\n def __init__(self, sys_settings_fname):\n self.init_pub_sub(sys_settings_fname)\n\n def init_pub_sub(self, sys_settings_fname):\n sc = SettingsConfig(sys_settings_fname)\n pub_facing_endpoint = sc.get_pub_facing_endpoint()\n sub_facing_endpoint = sc.get_sub_facing_endpoint()\n\n self.context = zmq.Context()\n\n self.sub_facing_socket = self.context.socket(zmq.SUB)\n self.sub_facing_socket.connect(sub_facing_endpoint)\n self.sub_facing_socket.setsockopt(zmq.LINGER, 0)\n # self.sub_facing_socket.subscribe('')\n\n self.pub_facing_socket = self.context.socket(zmq.PUB)\n self.pub_facing_socket.connect(pub_facing_endpoint)\n\n def set_subscriptions(self, sub_list):\n for sub_str in sub_list:\n self.sub_facing_socket.subscribe(sub_str)\n\n def close_pub_sub(self):\n self.sub_facing_socket.close()\n self.pub_facing_socket.close()\n self.context.term()\n\n def send_msg(self, msg_type, payload):\n self.pub_facing_socket.send_multipart(\n [msg_type.encode('utf-8'), json.dumps(payload).encode('utf-8')]\n )\n\n def recv_msg(self):\n try:\n msg = self.sub_facing_socket.recv_multipart(flags=zmq.NOBLOCK)\n except zmq.ZMQError:\n msg = None\n\n return msg\n\n\nclass CommsForwarder:\n\n def __init__(self, sys_settings_fname):\n self.setup_forwarder(sys_settings_fname)\n\n def setup_forwarder(self, sys_settings_fname):\n sc = SettingsConfig(sys_settings_fname)\n pub_facing_endpoint = sc.get_pub_facing_endpoint()\n sub_facing_endpoint = sc.get_sub_facing_endpoint()\n\n self.context = zmq.Context()\n\n self.frontend = self.context.socket(zmq.SUB)\n self.frontend.bind(pub_facing_endpoint)\n self.frontend.setsockopt(zmq.LINGER, 0)\n self.frontend.subscribe('')\n\n # Socket facing publishers\n self.backend = self.context.socket(zmq.PUB)\n self.backend.bind(sub_facing_endpoint)\n\n def start_forwarding(self):\n try:\n print(\"Forwarder successfully started\")\n zmq.device(zmq.FORWARDER, self.frontend, self.backend)\n except (Exception, KeyboardInterrupt):\n # print(e)\n print(\"bringing down zmq device\")\n finally:\n self.frontend.close()\n self.backend.close()\n self.context.term()\n\n\nif __name__ == \"__main__\":\n sys_settings_fname = 'sys_settings.json'\n cf = CommsForwarder(sys_settings_fname)\n\n cf.start_forwarding()\n","sub_path":"homenet/comms.py","file_name":"comms.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"569602075","text":"from django.test import TestCase\nfrom .models import *\n\n\nclass Unit_test_list(TestCase):\n def test_ok_create_url(self):\n Urls.objects.create(short_id='QaZx', httpurl='https://www.facebook.com', count=0)\n data = {'short_id': 'QaZx', 'httpurl': 'https://www.facebook.com', 'count': 0}\n response = self.client.post('/makeshort/', data)\n status_code = response.status_code\n x_data = Urls.objects.filter()\n url = x_data.get()\n self.assertEquals(status_code, 200)\n self.assertEquals(url.short_id, data['short_id'])\n self.assertEquals(url.httpurl, data['httpurl'])\n self.assertEquals(url.count, 0)\n","sub_path":"Link_Shortener/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"480935366","text":"# stdlib\nfrom datetime import datetime\nfrom json import dumps, loads\n\n# Zato\nfrom zato.server.service import Service\n\nclass GetClientDetails(Service):\n\n def should_notify_frauds(self, cust_type):\n return cust_type in ('A', 'B', 'C')\n\n def handle(self):\n\n request = dumps(self.request.payload)\n\n self.logger.info('Request: {}'.format(self.request.payload))\n self.logger.info('Request type: {}'.format(type(self.request.payload)))\n\n # Fetch connection to CRM\n crm = self.outgoing.plain_http.get('CRM')\n\n # Fetch connection to Payments\n payments = self.outgoing.plain_http.get('Payments')\n\n # Grab the customer info ..\n cust = crm.conn.send(request)\n cust = loads(cust.text)\n\n # .. and last payment's details\n last_payment = payments.conn.send(request)\n last_payment = loads(last_payment.text)\n\n self.logger.info('Customer details: {}'.format(cust))\n self.logger.info('Last payment: {}'.format(last_payment))\n\n # Create response\n\n response = {}\n response['first_name'] = cust['firstName']\n response['last_name'] = cust['lastName']\n response['last_payment_date'] = last_payment['DATE']\n response['last_payment_amount'] = last_payment['AMOUNT']\n response = dumps(response)\n\n self.logger.info('Response: {}'.format(response))\n\n # Create a request to fraud detection and send it asynchronously\n # but only if customer is of a certain type.\n\n if self.should_notify_frauds(self.request.payload['cust_type']):\n\n fraud_request = {}\n fraud_request['timestamp'] = datetime.utcnow().isoformat()\n fraud_request['request'] = request\n fraud_request['response'] = response\n fraud_request = dumps(fraud_request)\n\n self.outgoing.zmq.send(fraud_request, 'Fraud detection')\n\n else:\n self.logger.info('Skipped fraud detection for CID {}'.format(self.cid))\n\n # And return response to the caller\n self.response.payload = response","sub_path":"services/tutorial2_zmqgetclientdetails.py","file_name":"tutorial2_zmqgetclientdetails.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"174148122","text":"\n__author__ = 'jack'\n\nfrom ubitrack.core import math as utmath\nfrom ubitrack.core import util\nfrom ubitrack.core import measurement\nfrom ubitrack import facade\nimport numpy as np\nimport time\nimport os\n\nlogging_initialized = False\n\ndef setup_facade():\n print(\"set up AdvancedFacade\")\n global logging_initialized\n if not logging_initialized:\n util.initLogging(\"log4cpp.conf\")\n logging_initialized = True\n\n if not \"UBITRACK_COMPONENTS_PATH\" in os.environ:\n print(\"Missing environment variable: UBITRACK_COMPONENTS_PATH - tests are likely to fail\")\n return facade.AdvancedFacade() \n else:\n return facade.AdvancedFacade(os.environ['UBITRACK_COMPONENTS_PATH']) \n\n\ndef teardown_facade(f):\n print(\"tear down AdvancedFacade\")\n if f is not None:\n f.clearDataflow()\n f.killEverything()\n print(\"done\")\n\n\ndef test_basic_facade_components_direct_callback():\n print(\"test_basic_facade_components_direct_callback\")\n\n f = setup_facade()\n thisdir = os.path.dirname(__file__)\n f.loadDataflow(os.path.join(thisdir, \"single_pushsinkpose.dfg\"), True)\n \n results = []\n\n def cb(m):\n results.append(m)\n\n print(\"set callback pose\")\n f.setCallbackPose(\"receiver\", cb)\n print(\"start dataflow\")\n f.startDataflow()\n\n print(\"wait\")\n time.sleep(3)\n\n print(\"stop dataflow\")\n f.stopDataflow()\n f.setCallbackPose(\"receiver\", None)\n\n print(\"assert results\")\n assert len(results) > 0\n print(results[0])\n results.clear()\n\n teardown_facade(f)\n f = None\n\n\ndef test_basic_facade_components_pushsink_object():\n print(\"test_basic_facade_components_pushsink_object\")\n\n f = setup_facade()\n thisdir = os.path.dirname(__file__)\n f.loadDataflow(os.path.join(thisdir, \"single_pushsinkpose.dfg\"), True)\n\n results = []\n\n def cb(m):\n results.append(m)\n\n print(\"get pushsink\")\n x = f.getApplicationPushSinkPose(\"receiver\")\n assert x is not None\n\n print(\"set callback pose\")\n x.setCallback(cb)\n\n print(\"start dataflow\")\n f.startDataflow()\n\n print(\"wait\")\n time.sleep(3)\n\n print(\"stop dataflow\")\n f.stopDataflow()\n\n x.setCallback(None)\n # XXX need to deallocate pushsink otherwise we'll segfault on program exit\n x = None\n\n print(\"assert results\")\n assert len(results) > 0\n print(results[0])\n results.clear()\n\n time.sleep(0.3)\n\n teardown_facade(f)\n f = None\n\n\n\ndef test_pull_positionlist():\n print(\"test_pull_positionlist\")\n\n f = setup_facade()\n thisdir = os.path.dirname(__file__)\n f.loadDataflow(os.path.join(thisdir, \"test_positionlist.dfg\"), True)\n \n x = f.getApplicationPullSinkPositionList(\"receiver\")\n assert x is not None\n\n f.startDataflow()\n \n mps = x.get(measurement.now())\n \n f.stopDataflow()\n # XXX need to deallocate pullsink otherwise we'll segfault on program exit\n x = None\n\n ps = mps.get()\n assert len(ps) == 3\n \n p0 = ps[0]\n assert p0[0] == 1 and p0[1] == 0 and p0[2] == 0\n\n time.sleep(0.3)\n teardown_facade(f)\n f = None\n\n\n\ndef test_pullsource_pose():\n print(\"test_pullsource_pose\")\n f = setup_facade()\n thisdir = os.path.dirname(__file__)\n f.loadDataflow(os.path.join(thisdir, \"test_pull_source_pose.dfg\"), True)\n \n x = f.getApplicationPullSourcePose(\"pose\")\n assert x is not None\n\n def pull_cb(ts):\n from ubitrack.core import math, measurement\n import numpy as np\n p = math.Pose(math.Quaternion(), np.array([1.,2.,3.], dtype=np.double))\n return measurement.Pose(ts, p)\n\n x.setCallback(pull_cb)\n f.startDataflow()\n \n time.sleep(3)\n \n f.stopDataflow()\n x.setCallback(None)\n # XXX need to deallocate pullsource otherwise we'll segfault on program exit\n x = None\n\n time.sleep(0.3)\n teardown_facade(f)\n f = None\n\n\n","sub_path":"tests/test_utFacade.py","file_name":"test_utFacade.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"646691306","text":"import streamlit as st\nfrom utils.stakeholder_card import stakeholder_card\nimport pandas as pd\nimport datetime\nimport numpy as np\nimport math\n\n#TO-DO : extract repeated code into seperate functions\n\ndef load_test_data():\n # Just loading the data\n\n main_data_df = pd.read_csv(\"static/data/Stakeholder_Data.csv\")\n tags_df = pd.read_csv(\"static/data/Stakeholder_Tags.csv\")\n return main_data_df, tags_df\n\ndef edit_row(stakeholder, df, loc):\n df = df.drop(loc,axis=0)\n df = df.append(stakeholder, ignore_index=True)\n df.to_csv(\"static/data/Stakeholder_Data.csv\", index=False)\n \ndef add(main_data_df, tags, stakerholder_lookup):\n \"\"\"\n Adding a new stakeholder to the database\n \"\"\"\n\n st.title(\"Add Stakeholder\")\n \n # Creating two columns for user inputs\n c1, c2 = st.beta_columns((3,3))\n\n with c1:\n \n avatar = st.selectbox(\"Avatar\", [\"avatar\"+str(av) for av in range(1,7)], index=0)\n name = st.text_input(\"Name\", value=\"Frodeo Baggends\")\n company = st.text_input(\"Company\", value=\"Mordeor\")\n job_title = st.text_input(\"Job Title\", value=\"The Shire\")\n address = st.text_input(\"address\", value=\"17, The Shire Road\")\n post_code = st.text_input(\"Post Code\", value=\"SH17 1MOR\")\n last_date_contacted = st.date_input(\"Last Date Contacted\")\n\n with c2:\n \n email = st.text_input(\"Email\", value=\"hobbit@shire.com\")\n phone_num = st.text_input(\"Phone Number\", value=\"+44 123456789\")\n current_employment_length = st.number_input(\"Employment Length\", value=5)\n employment_start_date = st.date_input(\"Employment Start Date\")\n URLs = st.text_input(\"Comma Separated Weblinks\", value=\"bagends_primary.com\")\n tags = st.multiselect(\"Comma Separated Tags\", options=tags, default=None)\n\n\n # New Stakeholder Data \n stakeholder = {\n \"Avatar Number\" : avatar,\n \"Name\" : name,\n \"Email\" : email,\n \"Company\" : company,\n \"Job Title\" : job_title,\n \"Address\" : address,\n \"Post Code\" : post_code,\n \"Phone Number\" : phone_num,\n \"Current Employment Length\" : current_employment_length,\n \"Employment Start Date\" : employment_start_date,\n \"Date Last Contacted\" : last_date_contacted,\n \"URLs\" : URLs,\n \"Tags\" : \",\".join(tags)\n }\n \n add_stakerholder_button = st.button(\"Add Stakeholder Record\")\n\n if add_stakerholder_button:\n # Check if Name Exists\n if (main_data_df['Name'] == name).any():\n st.markdown(\"\"\"
    \n Stakeholder Already Exists Please Edit Instead\n
    \"\"\", unsafe_allow_html=True)\n # Adding to database\n else:\n main_data_df = main_data_df.append(stakeholder, ignore_index=True)\n main_data_df.to_csv(\"static/data/Stakeholder_Data.csv\", index=False)\n # Save successful statement\n st.markdown(\"\"\"
    \n Stakeholder Added\n
    \"\"\", unsafe_allow_html=True)\n\n # Display stakeholder card\n st.markdown(stakeholder_card(stakeholder), unsafe_allow_html=True)\n\ndef edit(main_data_df, tags, stakerholder_lookup):\n \"\"\"\n Editing a current stakeholder\n \"\"\"\n\n st.title(\"Edit Stakeholder\")\n\n # Stakeholder selection and data extraction\n edit_stakeholder = st.selectbox(\"Stakeholder\", main_data_df['Name'].values, index=5)\n loc = int(stakerholder_lookup[edit_stakeholder])\n stakeholder = main_data_df.iloc[loc].to_dict()\n\n # Displays a stakeholder\n st.markdown(stakeholder_card(stakeholder), unsafe_allow_html=True)\n \n st.title(\"Editor\")\n\n # Creating two columns for user inputs\n c1, c2 = st.beta_columns((3,3))\n\n with c1:\n \n avatar = st.selectbox(\"Avatar\", [\"avatar\"+str(av) for av in range(1,7)], index=int(stakeholder['Avatar Number'][-1])-1)\n name = st.text_input(\"Name\", value=stakeholder['Name'])\n company = st.text_input(\"Company\", value=stakeholder['Company'])\n job_title = st.text_input(\"Job Title\", value=stakeholder['Job Title'])\n address = st.text_input(\"address\", value=stakeholder['Address'])\n post_code = st.text_input(\"Post Code\", value=stakeholder[\"Post Code\"])\n stakehodler_last_date = datetime.datetime.strptime(stakeholder['Date Last Contacted'], '%Y-%m-%d')\n last_date_contacted = st.date_input(\"Last Date Contacted\", value=stakehodler_last_date)\n\n with c2:\n \n email = st.text_input(\"Email\", value=stakeholder['Email'])\n phone_num = st.text_input(\"Phone Number\", value=stakeholder['Phone Number'])\n current_employment_length = st.number_input(\"Employment Length\", value=int(stakeholder['Current Employment Length']))\n #Generating Datatime object\n stakehodler_start_date = datetime.datetime.strptime(stakeholder['Employment Start Date'], '%Y-%m-%d')\n employment_start_date = st.date_input(\"Employment Start Date\", stakehodler_start_date)\n\n URLs = st.text_input(\"Comma Separated Weblinks\", value=stakeholder['URLs'])\n if type(stakeholder['Tags']) != str:\n default_tags = None\n else:\n default_tags = list(stakeholder['Tags'].split(\",\"))\n tags = st.multiselect(\"Comma Separated Tags\", options=list(tags), default=default_tags)\n\n stakeholder = {\n \"Name\" : name,\n \"Email\" : email,\n \"Company\" : company,\n \"Job Title\" : job_title,\n \"Phone Number\" : phone_num,\n \"Address\" : address,\n \"Post Code\" : post_code,\n \"Employment Start Date\" : employment_start_date,\n \"Current Employment Length\" : current_employment_length,\n \"Avatar Number\" : avatar,\n \"Date Last Contacted\" : last_date_contacted,\n \"URLs\" : URLs,\n \"Tags\" : \",\".join(tags)\n }\n\n update_button = st.button(\"Update Stakeholder\")\n\n if update_button:\n \n edit_row(stakeholder, main_data_df, loc)\n\n # Save successful statement\n st.markdown(\"\"\"
    \n Stakeholder Updated\n
    \"\"\", unsafe_allow_html=True)\n\n\n\ndef edit_tags(main_data_df, tags, stakerholder_lookup):\n \"\"\"\n Managing the current tags\n \"\"\"\n\n st.title(\"Tag Management\")\n\n # Generating a list of tags, required for the multi-select function\n list_tags = list(tags)\n\n new_tag = st.text_input(\"Please Enter New Tag e.g. WearableInjectables or SpaceStation\")\n\n update_tag_button = st.button(\"Add Tag\")\n\n if update_tag_button:\n # Checking if the tags is already present\n if new_tag not in list_tags:\n tags_df = pd.DataFrame({\"Tags\" : list_tags + [new_tag]})\n tags_df.to_csv(\"static/data/Stakeholder_Tags.csv\", index=False)\n st.markdown(\"\"\"
    \n Tag Successfully Added\n
    \"\"\", unsafe_allow_html=True)\n \n else:\n st.markdown(\"\"\"
    \n Tag already exists, not added to database\n
    \"\"\", unsafe_allow_html=True)\n\n # removing tags\n remove_tags = st.multiselect(\"Tags to Remove\", options=list_tags)\n\n remove_tags_button = st.button(\"Remove Tag(s)\")\n\n if remove_tags_button:\n for tag in remove_tags:\n list_tags.remove(tag)\n # Checking if the tags is already present\n tags_df = pd.DataFrame({\"Tags\" : list_tags})\n tags_df.to_csv(\"static/data/Stakeholder_Tags.csv\", index=False)\n st.markdown(\"\"\"
    \n Tag(s) Successfully Removed\n
    \"\"\", unsafe_allow_html=True)\n\ndef database_admin():\n \"\"\"\n Main function for the database maintance page\n \"\"\"\n\n # Loading the data\n main_data_df , tags_df = load_test_data()\n tags = tags_df['Tags'].values\n\n # Generating a stakerholder lookup dict... Name : Index\n stakerholder_lookup = dict((v,k) for k,v in main_data_df[[\"Name\"]].to_dict()['Name'].items())\n\n # Available pages in Stakeholders Database Management mode\n PAGES = {\n \"Add Stakeholder\" : add,\n \"Edit Stakeholder\" : edit,\n \"Tag Management\" : edit_tags\n }\n\n # Update sidebar for the database maintance page\n page_selection = st.sidebar.radio(\n \"Edit Mode\", \n [\n \"Add Stakeholder\",\n \"Edit Stakeholder\",\n \"Tag Management\"\n ]\n )\n \n\n # Load the page based on the selected option\n page = PAGES[page_selection]\n page(main_data_df, tags, stakerholder_lookup)\n \n","sub_path":"pages/stakeholder_editor.py","file_name":"stakeholder_editor.py","file_ext":"py","file_size_in_byte":8718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"615490661","text":"from flask import Flask, render_template,request, redirect, url_for, flash\nfrom flask_mysqldb import MySQL\n\napp = Flask(__name__)\n\n#MySQL Conexión\n\napp.config['MYSQL_HOST'] = 'localhost'\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = ''\napp.config['MYSQL_DB'] = 'anecdotario'\nmysql = MySQL(app)\n\n#SETTINGS\n\napp.secret_key = 'mysecretkey'\n\n#INDEX\n\n@app.route('/')\ndef Index():\n cur = mysql.connection.cursor()\n tittle = 'Menú Principal'\n return render_template('index.html', tittle = tittle)\n\n#ESTUDIANTES\n\n@app.route('/estudiante', methods=[\"POST\", \"GET\"])\ndef estudiante():\n tittle = 'Estudiante'\n cur = mysql.connection.cursor()\n cur.execute('SELECT * FROM estudiantes')\n datos = cur.fetchall()\n return render_template('estudiante.html', tittle = tittle, datos = datos)\n \n#DOCENTE\n\n@app.route('/docente', methods=[\"POST\",\"GET\"])\ndef docente():\n tittle = \"Docente\"\n cur = mysql.connection.cursor()\n cur.execute('SELECT * FROM docentes')\n datos = cur.fetchall()\n return render_template('docente.html', tittle = tittle, datos = datos)\n\n#ACUDIENTE\n\n@app.route('/acudiente', methods=['POST', 'GET'])\ndef acudiente():\n tittle = 'Acudiente'\n cur = mysql.connection.cursor()\n cur.execute('SELECT * FROM acudientes')\n datos = cur.fetchall()\n return render_template('acudiente.html', tittle = tittle, datos = datos)\n\n#ASIGNATURA\n\n@app.route('/asignatura', methods=['POST', 'GET'])\ndef asignatura():\n tittle = 'Asignatura'\n cur = mysql.connection.cursor()\n cur.execute('SELECT * FROM asignatura')\n datos = cur. fetchall()\n return render_template('asignatura.html', tittle = tittle, datos = datos)\n\n#AGREGAR ESTUDIANTE\n\n@app.route('/agg_estudiante', methods=['POST'])\ndef agg_estudiante():\n if request.method == \"POST\":\n nombres = request.form['nombres']\n apellidos = request.form['apellidos']\n sexo = request.form['sexo']\n grado = request.form['grado']\n fecha_nac = request.form['fecha_nac']\n direccion = request.form['direccion']\n num_doc = request.form['num_doc']\n email = request.form['email']\n telefono = request.form['telefono']\n nombre_acu = request.form['nombre_acu']\n doc_acuiente = request.form['doc_acudiente']\n cur = mysql.connection.cursor()\n cur.execute(\"\"\"INSERT INTO estudiantes (nombres, apellidos, sexo, grado, fecha_nac, direccion, num_doc, email, telefono, nombre_acu, doc_acudiente)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\", \n [nombres, apellidos, sexo, grado, fecha_nac, direccion, \n num_doc, email, telefono, nombre_acu, doc_acuiente])\n mysql.connection.commit()\n flash('Estudiante Agregado Correctamente')\n return redirect(url_for('estudiante'))\n\n#EDITAR ESTUDIANTE\n\n@app.route('/edit_estudiante/')\ndef get_estudiante(id):\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM estudiantes WHERE id=%s\", [id])\n dato = cur.fetchall()\n print(dato[0])\n return render_template('/editar_estudiante.html',dato = dato[0])\n\n@app.route('/update_estudiante/', methods = ['POST'])\ndef update_estudiante(id):\n if request.method == \"POST\":\n nombres = request.form['nombres']\n apellidos = request.form['apellidos']\n sexo = request.form['sexo']\n grado = request.form['grado']\n fecha_nac = request.form['fecha_nac']\n direccion = request.form['direccion']\n num_doc = request.form['num_doc']\n email = request.form['email']\n telefono = request.form['telefono']\n nombre_acu = request.form['nombre_acu']\n doc_acudiente = request.form['doc_acudiente']\n cur = mysql.connection.cursor()\n cur.execute(\"\"\"UPDATE estudiantes SET nombres = %s, \n apellidos = %s, sexo = %s, grado = %s, fecha_nac = %s,\n direccion = %s, num_doc = %s, email = %s, telefono = %s, \n nombre_acu = %s, doc_acudiente = %s WHERE id = %s\"\"\", \n [nombres, apellidos, sexo, grado, fecha_nac, direccion, \n num_doc, email, telefono, nombre_acu, doc_acudiente, id])\n mysql.connection.commit()\n flash('Estudiante Actualizado Correctamente')\n return redirect(url_for('estudiante'))\n\n#ELIMINAR ESTUDIANTE\n\n@app.route('/eliminar_estudiante/')\ndef delete_estudiante(id):\n cur = mysql.connection.cursor()\n cur.execute('DELETE FROM estudiantes WHERE id = {0}'.format(id))\n mysql.connection.commit()\n flash('Estudiante Eliminado Correctamente')\n return redirect(url_for('estudiante'))\n\n#AGREGAR DOCENTE \n\n@app.route('/agg_docente', methods=['POST'])\ndef agg_docente():\n if request.method == 'POST':\n nombre = request.form['nombre']\n apellidos = request.form['apellidos']\n sexo = request.form['sexo']\n asignatura = request.form['asignatura']\n direccion = request.form['direccion']\n telefono = request.form['telefono']\n email = request.form['email']\n cur = mysql.connection.cursor()\n cur.execute(\"\"\"INSERT INTO docentes (nombre, apellidos, sexo, asignatura, direccion, telefono, email)\n VALUES (%s, %s, %s, %s, %s, %s, %s)\"\"\",\n [nombre, apellidos, sexo, asignatura, direccion, telefono, email])\n mysql.connection.commit()\n flash('Docente Agregado Correctamente') \n return redirect(url_for('docente'))\n\n#EDITAR DOCENTE\n\n@app.route('/edit_docente/')\ndef get_docente(id):\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM docentes WHERE id=%s\", [id])\n dato = cur.fetchall()\n print(dato[0])\n return render_template('/editar_docente.html',dato = dato[0])\n\n@app.route('/update_docente/', methods = ['POST'])\ndef update_docente(id):\n if request.method == \"POST\":\n nombre = request.form['nombre']\n apellidos = request.form['apellidos']\n sexo = request.form['sexo']\n asignatura = request.form['asignatura']\n direccion = request.form['direccion']\n telefono = request.form['telefono']\n email = request.form['email']\n cur = mysql.connection.cursor()\n cur.execute(\"\"\"UPDATE docentes SET nombre = %s, \n apellidos = %s, sexo = %s, asignatura = %s, direccion = %s, telefono = %s, \n email = %s WHERE id = %s\"\"\", \n [nombre, apellidos, sexo, asignatura, direccion, telefono, email, id])\n mysql.connection.commit()\n flash('Docente Actualizado Correctamente')\n return redirect(url_for('docente'))\n \n#ELIMINAR DOCENTE\n\n@app.route('/eliminar_docente/')\ndef delete_docente(id):\n cur = mysql.connection.cursor()\n cur.execute('DELETE FROM docentes WHERE id = {0}'.format(id))\n mysql.connection.commit()\n flash('Docente Eliminado Correctamente')\n return redirect(url_for('docente'))\n\n#AGREGAR ACUDIENTE\n\n@app.route('/agg_acudiente', methods=['POST'])\ndef agg_acudiente():\n if request.method == 'POST':\n nombre = request.form['nombre']\n direccion = request.form['direccion']\n num_doc = request.form['num_doc']\n telefono = request.form['telefono']\n email = request.form['email']\n cur = mysql.connection.cursor()\n cur.execute(\"\"\"INSERT INTO acudientes (nombre, direccion, num_doc, telefono, email)\n VALUES (%s, %s, %s, %s, %s)\"\"\",\n [nombre, direccion, num_doc, telefono, email])\n mysql.connection.commit()\n flash('Acudiente Agregado Correctamente') \n return redirect(url_for('acudiente'))\n\n#EDITAR ACUDIENTE\n\n@app.route('/edit_acudiente/')\ndef get_acudiente(id):\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM acudientes WHERE id=%s\", [id])\n dato = cur.fetchall()\n print(dato[0])\n return render_template('/editar_acudiente.html',dato = dato[0])\n\n@app.route('/update_acudiente/', methods = ['POST'])\ndef update_acudiente(id):\n if request.method == \"POST\":\n nombre = request.form['nombre']\n direccion = request.form['direccion']\n num_doc = request.form['num_doc']\n telefono = request.form['telefono']\n email = request.form['email']\n cur = mysql.connection.cursor()\n cur.execute(\"\"\"UPDATE acudientes SET nombre = %s, \n direccion = %s, num_doc = %s, telefono = %s, email = %s WHERE id = %s\"\"\", \n [nombre, direccion, num_doc, telefono, email, id])\n mysql.connection.commit()\n flash('Acudiente Actualizado Correctamente')\n return redirect(url_for('acudiente'))\n\n#ELIMINAR ACUDIENTE\n\n@app.route('/eliminar_acudiente/')\ndef delete_acudiente(id):\n cur = mysql.connection.cursor()\n cur.execute('DELETE FROM acudientes WHERE id = {0}'.format(id))\n mysql.connection.commit()\n flash('Acudiente Eliminado Correctamente')\n return redirect(url_for('acudiente'))\n\n#AGREGAR ASIGNATURA\n\n@app.route('/agg_asignatura', methods=['POST'])\ndef agg_asignatura():\n if request.method == 'POST':\n nombre = request.form['nombre']\n doc_encargado = request.form['doc_encargado']\n horas_semanales = request.form['horas_semanales']\n cur = mysql.connection.cursor()\n cur.execute(\"\"\"INSERT INTO asignatura (nombre, doc_encargado, horas_semanales)\n VALUES (%s, %s, %s)\"\"\",\n [nombre, doc_encargado, horas_semanales])\n mysql.connection.commit()\n flash('Asignatura Agregado Correctamente') \n return redirect(url_for('asignatura'))\n\n#EDITAR ASIGNATURA\n\n@app.route('/edit_asignatura/')\ndef get_asignatura(id):\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM asignatura WHERE id=%s\", [id])\n dato = cur.fetchall()\n print(dato[0])\n return render_template('/editar_asignatura.html',dato = dato[0])\n\n@app.route('/update_asignatura/', methods = ['POST'])\ndef update_asignatura(id):\n if request.method == \"POST\":\n nombre = request.form['nombre']\n doc_encargado = request.form['doc_encargado']\n horas_semanales = request.form['horas_semanales']\n cur = mysql.connection.cursor()\n cur.execute(\"\"\"UPDATE asignatura SET nombre = %s, \n doc_encargado = %s, horas_semanales = %s WHERE id = %s\"\"\", \n [nombre, doc_encargado, horas_semanales, id])\n mysql.connection.commit()\n flash('Asignatura Actualizado Correctamente')\n return redirect(url_for('asignatura'))\n\n#ELIMINAR ASIGNATURA\n\n@app.route('/eliminar_asignatura/')\ndef delete_asignatura(id):\n cur = mysql.connection.cursor()\n cur.execute('DELETE FROM asignatura WHERE id = {0}'.format(id))\n mysql.connection.commit()\n flash('Asignatura Eliminado Correctamente')\n return redirect(url_for('asignatura'))\n\n\nif __name__ == '__main__':\n app.run(port = 3000, debug = True)\n","sub_path":"Python/Anecdotario/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":10865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"170518368","text":"from google.appengine.api import urlfetch\nfrom bs4 import BeautifulSoup as parse\nfrom itertools import izip\n\n\ndef scrape(areaCode):\n\turl = 'http://www.allareacodes.com/' + areaCode\n\tresult = urlfetch.fetch(url=url, method=urlfetch.GET, allow_truncated=False, follow_redirects=True, deadline=10)\n\tif result.status_code == 200:\n\t\thtml = parse(result.content).findAll('table',{'class':'lined'})[1].findAll('td')\n\t\tdata = []\n\t\tfor td in html:\n\t\t\tif td.contents:\n\t\t\t\tanchorTag = parse(str(td.contents[0])).find('a')\n\t\t\t\tif anchorTag:\n\t\t\t\t\tdata.append(anchorTag.contents[0])\n\t\t\t\telse:\n\t\t\t\t\tdata.append(td.contents[0])\n\t\t\telse:\n\t\t\t\tdata.append('')\n\t\treturn list(izip(*[iter(data)]*6))\n\telse:\n\t\treturn None\n\n\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"613020459","text":"#Ashley Teoh Homework8 Question3\n\nglobal movie_list\nmovie_list = ['Spotlight', 'The Big Short', 'Bridge of Spies', 'Brooklyn', 'Mad Max: Fury Road', 'The Martian', 'The Revenant', 'Room']\n\n\ndef get_rating():\n user_rating = []\n print(\"///////////////\\nRating Meanings\\n///////////////\\n0: I did not see the movie\\n1: Terrible\\n2: Bad\\n3: Average\\n4: Good\\n5: Fantastic\\n///////////////\")\n\n for movie in movie_list:\n move_on = False\n while move_on == False:\n try:\n movie_rating = int(input(\"From 1 to 5, rate \" + movie))\n assert 0<= movie_rating <= 5\n except:\n print('please enter a number from 0 to 5!')\n else:\n user_rating.append(movie_rating)\n move_on = True\n return user_rating\n\n\ndef get_user_input():\n global user_record\n user_record = [] \n user_number = 0 \n next_user = \"yes\"\n\n while next_user == \"yes\":\n user_number += 1\n rating = get_rating()\n array = []\n array.append(user_number)\n array.append(rating)\n user_record.append(array)\n next_user = input(\"Is there another user? Enter 'yes' of 'no'\")\n\n #print(user_record)\n return user_record\n \n\ndef ave_user_response(data):\n total_users = len(data)\n #setting up movie data\n for movie_index in range(len(movie_list)):\n movie_name = movie_list[movie_index]\n movie_score = 0\n total_valid_users = 0\n\n for user in data:\n if user[1][movie_index] != 0:\n movie_score += user[1][movie_index]\n total_valid_users += 1\n\n ave_movie_score = movie_score / total_valid_users\n \n print(\"The average movie score for \" + movie_name + \" is \" + str((\"%.2f\" % round(ave_movie_score,2))) + \"\\n\" + str(total_valid_users) + \" people in total saw \" + movie_name + \"\\n\")\n\n\ndef add_oscar_opinions(input):\n file = open('HW8Q3_output.tsv','w')\n file.write(\"User Number \\t\")\n for movie in movie_list:\n file.write(movie +\"\\t\")\n for entry in input:\n file.write(\"\\n\" + str(entry[0]) + \"\\t\")\n for rating in entry[1]:\n file.write(str(rating) + \"\\t\")\n print(\"check [HW8Q3_output.tsv] for the output!\")\n file.close()\n\n\ndef main():\n get_user_input()\n ave_user_response(user_record)\n add_oscar_opinions(user_record)\n\nmain()\n","sub_path":"intro_prog/hw8/Homework8(Q3)-AshleyTeoh.py","file_name":"Homework8(Q3)-AshleyTeoh.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"642265730","text":"import cv2\nimport glob\nimport numpy as np \nfrom numpy import array,mat,sin,cos,dot,eye\nfrom numpy.linalg import norm\nfrom numpy import * \nfrom IPython.core.debugger import Tracer\n\na = np.load('./cam_calibration_output_set2.npz', mmap_mode=None, allow_pickle=True, fix_imports=False, encoding='ASCII')\n\n\ndef rodrigues(r):\n def S(n):\n Sn = array([[0,-n[2],n[1]],[n[2],0,-n[0]],[-n[1],n[0],0]])\n return Sn\n theta = norm(r)\n if theta > 1e-30:\n n = r/theta\n Sn = S(n)\n R = eye(3) + sin(theta)*Sn + (1-cos(theta))*dot(Sn,Sn)\n else:\n Sr = S(r)\n theta2 = theta**2\n R = eye(3) + (1-theta2/6.)*Sr + (.5-theta2/24.)*dot(Sr,Sr)\n return mat(R)\n\t\n\nrvecs = a['rvecs']\ntvecs = a['tvecs']\n#print ('rvecs shape is ', rvecs.shape)\nrot_Mat = np.zeros([19,3,3])\nfor i in range (0,18):\n\trot_Mat [i,:,:] = rodrigues (rvecs[i])\n\n# print Rot_Mat\n# np.save(\"rotation matrix\",rot_Mat)\n\n#CBAK is the matrix from calibration borad to kinect\n\n\nCBAK = np.zeros([19,4,4])\n\n#print ('Rot_Mat shape is',Rot_Mat.shape)\n#print ('tvecs shape is ',tvecs.shape)\n\nfor i in range (0,18):\n\n CBAK[i,0:3, 0:3] = rot_Mat [i]\n CBAK[i,0:3, 3] = tvecs [i].reshape(-1)\n CBAK[i,3, 3] = 1\nprint(\"tvecs is \",tvecs)\n#print(CBAK)\nnp.save(\"CalibrationtoKinect_set2\",CBAK)\n\n#coordinates of the point in kinect frame\nCKF = np.zeros([19,4,1]) \n\nfor i in range (0,19):\n CKF [i,0:4,0:] = CBAK [i].dot(np.array([55,0,0,1]).reshape(-1,1))\n\nprint (\"coordinate is kinect frame is\", CKF) \nprint(CKF.shape)\n#tmp = np.zeros([4,4])\n#tmp[0:3, 0:3] = R \n#tmp[ 3 , 0:3] = t\n#tmp[3,3] = 1\n\n","sub_path":"Sensor/hand_eye_calib/getMatrix(Fei).py","file_name":"getMatrix(Fei).py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"181858499","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import uic\n\nimport MySQLdb as mdb\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvas\nimport matplotlib.pyplot as plt\n\nimport datetime\nimport time\nimport requests\nimport mpl_finance as mpf\nimport pandas as pd\nfrom talib import abstract\nimport threading\n\n\nclass MainWindow(QMainWindow):\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n uic.loadUi('window.ui', self)\n\n self.pushButton.clicked.connect(self.ok_handler)\n self.lay = QVBoxLayout(self.textBrowser)\n\n # click row trigger different table\n self.StockTable.setRowCount(0)\n self.StockTable.setSelectionBehavior(QAbstractItemView.SelectRows)\n # self.StockTable.cellClicked.connect(lambda: self.K_Diagram(self.StockTable.item(self.StockTable.currentRow(), 1)))\n self.StockTable.cellClicked.connect(self.printcc)\n # self.input_stock_id.returnPressed.connect(lambda: self.input_stockID(self.input_stock_id.text()))\n self.input_stock_id.returnPressed.connect(self.input_stockID)\n\n self.load_TWStock_Database()\n self.create_customized_Stock_table(\"board\")\n stock_id = \"2330\"\n self.create_table(stock_id)\n # self.fetch_data(2019, 11, stock_id)\n\n self.K_Diagram(stock_id)\n self.write_customized_Stock_table(\"board\")\n # self.db.close()\n self.show()\n\n def printcc(self):\n stock_id = self.StockTable.item(self.StockTable.currentRow(), 0).text() # 1 is stock id place\n print(stock_id)\n self.Clear_Plot()\n self.K_Diagram(stock_id)\n\n def create_table(self, stock_id):\n mysqlcommand = \"CREATE TABLE if not exists STOCK_\"+ stock_id + \" (date date NOT NULL, stockno varchar(20) NOT NULL, shares bigint(20) NOT NULL, amount bigint(20) NOT NULL, open float NOT NULL, high float NOT NULL, low float NOT NULL, close float NOT NULL,diff float NOT NULL, turnover int(10) NOT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8;\"\n self.cursor.execute(mysqlcommand)\n print(\"create stock table successful\")\n\n def create_customized_Stock_table(self, tablename): # the table of selected stock_id\n mysqlcommand = \"CREATE TABLE if not exists STOCK_\"+ tablename + \" (stockno varchar(20) NOT NULL, shares bigint(20) NOT NULL, amount bigint(20) NOT NULL, open float NOT NULL, high float NOT NULL, low float NOT NULL, close float NOT NULL,diff float NOT NULL, turnover int(10) NOT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8;\"\n self.cursor.execute(mysqlcommand)\n print(\"create customized table successful\")\n\n def get_stock_history(self, date, stock_no, retry=5):\n quotes = []\n url = 'http://www.twse.com.tw/exchangeReport/STOCK_DAY?date=%s&stockNo=%s' % (date, stock_no)\n r = requests.get(url)\n data = r.json()\n return self.transform(data['data'])\n\n def transform_date(self, date):\n y, m, d = date.split('/')\n return str(int(y) + 1911) + '/' + m + '/' + d\n\n def transform_data(self, data):\n data[0] = datetime.datetime.strptime(self.transform_date(data[0]), '%Y/%m/%d')\n data[1] = int(data[1].replace(',', '')) # 把千進位的逗點去除\n data[2] = int(data[2].replace(',', ''))\n data[3] = float(data[3].replace(',', ''))\n data[4] = float(data[4].replace(',', ''))\n data[5] = float(data[5].replace(',', ''))\n data[6] = float(data[6].replace(',', ''))\n data[7] = float(0.0 if data[7].replace(',', '') == 'X0.00' else data[7].replace(',', '')) # +/-/X表示漲/跌/不比價\n data[8] = int(data[8].replace(',', ''))\n return data\n\n def transform(self, data):\n return [self.transform_data(d) for d in data]\n\n def genYM(self, smonth, syear, emonth, eyear): # 產生從syear年smonth月到eyear年emonth月的所有年與月的tuple\n start = 12 * syear + smonth\n end = 12 * eyear + emonth\n for num in range(int(start), int(end) + 1):\n y, m = divmod(num, 12)\n yield y, m\n\n def fetch_data(self, year: int, month: int, stockno): # 擷取從year-month開始到目前為止的所有交易日資料\n raw_data = []\n data = []\n today = datetime.datetime.today()\n for year, month in self.genYM(month, year, today.month, today.year): # 產生year-month到今天的年與月份,用於查詢證交所股票資料\n if month < 10:\n date = str(year) + '0' + str(month) + '01' # 1到9月\n else:\n date = str(year) + str(month) + '01' # 10月\n data = self.get_stock_history(date, stockno) # 到證交所網站依照date抓取該月股票編號為stockno的股價與成交量\n for item in data: # 取出該月的每一天編號為stockno的股票資料\n selectsql = \"select * from STOCK_\" + stockno + \" where date = '%s' and stockno = '%s'\" % (\n item[0], str(stockno)) # 查詢是否已經在資料庫的SQL\n print(selectsql)\n self.cursor.execute(selectsql) # 執行查詢的SQL\n ret = self.cursor.fetchone() # 如果有取出第一筆資料\n if not ret: # 不在資料庫\n insertsql = \"INSERT INTO STOCK_\"+ str(stockno)+\" (date, stockno, shares, amount, open, high, low, close, diff, turnover) \\\n VALUES ('%s', '%s', '%ld', '%ld', '%f', '%f', '%f', '%f', '%f', '%d')\" % (item[0], str(stockno),\n int(item[1]),\n int(item[2]),\n float(item[3]),\n float(item[4]),\n float(item[5]),\n float(item[6]),\n float(item[7]),\n int(item[8])) # 插入資料庫的SQL\n # insertsql = \"INSERT INTO STOCK_board (stockno, shares, amount, open, high, low, close, diff, turnover) \\\n # VALUES ('%s', '%ld', '%ld', '%f', '%f', '%f', '%f', '%f', '%d')\" % (str(stockno),\n # int(item[1]),\n # int(item[2]),\n # float(item[3]),\n # float(item[4]),\n # float(item[5]),\n # float(item[6]),\n # float(item[7]),\n # int(item[8])) # 插入資料庫的SQL\n print(insertsql)\n self.cursor.execute(insertsql) # 插入資料庫\n self.db.commit() # 插入時需要呼叫commit,才會修改資料庫\n time.sleep(7) # 延遲5秒,證交所會根據IP進行流量統計,流量過大會斷線\n\n # TODO 只取當日資料\n def fetch_data_for_customized_table(self, year: int, month: int, stockno): # 擷取從year-month開始到目前為止的所有交易日資料\n raw_data = []\n data = []\n today = datetime.datetime.today()\n insertsql = \"\"\n for year, month in self.genYM(month, year, today.month, today.year): # 產生year-month到今天的年與月份,用於查詢證交所股票資料\n if month < 10:\n date = str(year) + '0' + str(month) + '01' # 1到9月\n else:\n date = str(year) + str(month) + '01' # 10月\n data = self.get_stock_history(date, stockno) # 到證交所網站依照date抓取該月股票編號為stockno的股價與成交量\n\n for item in data: # 取出該月的每一天編號為stockno的股票資料\n selectsql = \"select * from STOCK_\" + stockno + \" where date = '%s' and stockno = '%s'\" % (\n item[0], str(stockno)) # 查詢是否已經在資料庫的SQL\n print(selectsql)\n self.cursor.execute(selectsql) # 執行查詢的SQL\n ret = self.cursor.fetchone() # 如果有取出第一筆資料\n insertsql = \"INSERT INTO STOCK_board (stockno, shares, amount, open, high, low, close, diff, turnover) \\\n VALUES ('%s', '%ld', '%ld', '%f', '%f', '%f', '%f', '%f', '%d') ON DUPLICATE KEY UPDATE stockno='%s'\" % (str(stockno),\n int(item[1]),\n int(item[2]),\n float(item[3]),\n float(item[4]),\n float(item[5]),\n float(item[6]),\n float(item[7]),\n int(item[8]),\n \"STOCK_\"+str(stockno)) # 插入資料庫的SQL\n print(insertsql)\n self.cursor.execute(insertsql) # 插入資料庫\n self.db.commit() # 插入時需要呼叫commit,才會修改資料庫\n time.sleep(6) # 延遲5秒,證交所會根據IP進行流量統計,流量過大會斷線\n\n\n def load_TWStock_Database(self):\n # # user specific ()\n self.db = mdb.connect('localhost', user='root', password='#Pp0988299647', db='TWStock')\n self.cursor = self.db.cursor()\n\n def write_customized_Stock_table(self, stock_table):\n self.cursor.execute(\"SELECT * FROM STOCK_\"+str(stock_table) +\";\")\n result = self.cursor.fetchall()\n # result = self.frame\n self.StockTable.clear()\n self.StockTable.setRowCount(0)\n for row_number, row_data in enumerate(result):\n self.StockTable.insertRow(row_number)\n # continue\n for column_number, data in enumerate(row_data):\n self.StockTable.setItem(row_number, column_number, QTableWidgetItem(str(data)))\n\n def K_Diagram(self, stock_id):\n # read data from MySQL in pandas Dataframe\n frame = pd.read_sql(\"select * from STOCK_\" + stock_id + \" order by date\", self.db)\n pd.set_option('display.expand_frame_repr', False)\n frame.set_index('date', inplace=True)\n\n # 設定index格式\n number_of_label = 10\n frame.index = frame.index.format(formatter=lambda x: x.strftime('%Y-%m-%d'))\n abstract.STOCH(frame).head(10)\n # add subplot\n fig = plt.figure(figsize=(30, 15))\n ax = fig.add_axes([0.06, 0.55, 0.92, 0.4])\n ax2 = fig.add_axes([0.06, 0.35, 0.92, 0.2])\n ax3 = fig.add_axes([0.06, 0.15, 0.92, 0.2]) ##起始點座標 (0,0),長寬 (1,0.2)\n ax.set_xticks(range(0, len(frame.index), number_of_label))\n ax2.set_xticks(range(0, len(frame.index), number_of_label))\n ax3.set_xticks(range(0, len(frame.index), number_of_label))\n ax.set_xticklabels(frame.index[::number_of_label], rotation=90)\n ax2.set_xticklabels(frame.index[::number_of_label], rotation=90)\n ax3.set_xticklabels(frame.index[::number_of_label], rotation=90)\n # set K-characteur\n mpf.candlestick2_ochl(ax, frame['open'], frame['close'], frame['high'], frame['low'], width=0.5, colorup ='r', colordown='g', alpha = 0.85);\n # volumn\n mpf.volume_overlay(ax3, frame['open'], frame['close'], frame['shares'], colorup='r', colordown='g', width=0.5, alpha=0.8)\n # 5 and 30 days average price\n sma_5 = abstract.SMA(frame, 5)\n sma_30 = abstract.SMA(frame, 30)\n ax.plot(sma_5)\n ax.plot(sma_30)\n # 添加KD值進df裡\n KD = abstract.STOCH(frame)\n KD.index = frame['close'].index\n frame['K'] = KD['slowk']\n frame['D'] = KD['slowd']\n frame = frame.fillna(0)\n ax2.plot(frame['K'], label='K')\n ax2.plot(frame['D'], label='D')\n ax2.legend(fontsize='medium')\n\n self.plotWidget = FigureCanvas(fig)\n\n self.lay.addWidget(self.plotWidget)\n self.setLayout(self.lay)\n\n def input_stockID(self):\n stock_id = self.input_stock_id.text()\n self.create_table(stock_id)\n self.fetch_data(2019, 11, stock_id)\n #TODO insert the id to customized_table\n self.input_stock_id.clear()\n self.fetch_data_for_customized_table(2019, 11, stock_id)\n self.write_customized_Stock_table(\"board\")\n\n def delete_stockID(self, stock_id):\n mysqlcommand = \"drop table if exists STOCK_\" + stock_id\n self.cursor.execute(mysqlcommand)\n\n def ok_handler(self):\n print(\"OK!\")\n\n def Clear_Plot(self):\n # self.lay.removeWidget(self.plotWidget)\n self.plotWidget.setParent(None)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = MainWindow()\n\n sys.exit(app.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"21549581","text":"import os\nimport inspect\nimport corner\nimport pickle\nimport numbers\nimport numpy as np\nfrom functools import partial\nimport matplotlib.pyplot as plt\n\n\ndef var_param_dict(name, fit=True, prior='uniform', default=None, tex=None,\n **kwargs):\n \"\"\"Create a dictionary that describes a single parameter of the fit.\n\n Parameters\n ----------\n fit : boolean\n Whether the parameter is varied in the fit.\n prior : string\n The prior used in the fit.\n default : float\n The value used if the parameter is kept fixed.\n tex : string\n LaTeX string describing the parameter.\n **kwargs : dict\n Any additional keywords passed to the prior.\n Returns\n -------\n dict\n Dictionary that describes the parameter of the fit.\n \"\"\"\n return {'name': name, 'fit': fit, 'prior': prior, 'tex': tex,\n 'default': default, **kwargs}\n\n\ndef check_var_param_list(var_param_list):\n try:\n assert isinstance(var_param_list, list)\n for var_param in var_param_list:\n assert isinstance(var_param, dict)\n assert 'name' in var_param\n assert isinstance(var_param['name'], str)\n assert 'fit' in var_param\n assert isinstance(var_param['fit'], bool)\n except AssertionError:\n raise RuntimeError('The fit parameters are not a list of ' +\n 'dictionaries with all required keywords.')\n\n\ndef write_var_param_list(var_param_list, directory):\n\n check_var_param_list(var_param_list)\n\n fstream = open(os.path.join(directory, 'var_param_list.pkl'), 'wb')\n pickle.dump(var_param_list, fstream)\n fstream.close()\n\n\ndef read_var_param_list(directory):\n\n return pickle.load(\n open(os.path.join(directory, 'var_param_list.pkl'), 'rb'))\n\n\ndef _prior(var_param_list, cube, n_dim, n_params):\n\n i = 0\n\n for var_param in var_param_list:\n\n if not var_param['fit']:\n continue\n\n if var_param['prior'] == 'uniform':\n cube[i] = (\n cube[i] * (var_param['max'] - var_param['min']) +\n var_param['min'])\n\n i += 1\n\n\ndef prior(var_param_list):\n check_var_param_list(var_param_list)\n return partial(_prior, var_param_list)\n\n\ndef vector_to_full_vector(vector, var_param_list):\n\n full_vector = np.zeros(len(var_param_list)) * np.nan\n\n # First, fill the values that were fitted.\n for i, var_param in enumerate(var_param_list):\n if var_param['fit']:\n full_vector[i] = vector[\n np.sum([param['fit'] for param in var_param_list[:i]],\n dtype=np.int)]\n\n # Next, fill all values that were not fitted.\n for i, var_param in enumerate(var_param_list):\n if not var_param['fit']:\n if isinstance(var_param['default'], numbers.Number):\n full_vector[i] = var_param['default']\n else:\n name_of_default = var_param['default']\n for k in range(len(var_param_list)):\n if var_param_list[k]['name'] == name_of_default:\n break\n full_vector[i] = full_vector[k]\n\n return full_vector\n\n\ndef vector_to_param_dict(vector, var_param_list):\n\n full_vector = vector_to_full_vector(vector, var_param_list)\n\n param_dict = {}\n for i, var_param in enumerate(var_param_list):\n param_dict[var_param['name']] = full_vector[i]\n\n return param_dict\n\n\ndef read_posterior(directory, format=np.ndarray, equal_weight=True,\n n_samples='max', logl=False):\n\n if equal_weight:\n vectors = np.genfromtxt(os.path.join(\n directory, 'post_equal_weights.dat'))\n else:\n ev = np.genfromtxt(os.path.join(directory, 'ev.dat'))\n vectors_ev = ev[:, :-2]\n weights_ev = (ev[:, -3] + ev[:, -2])\n vol_min = ev[-1, -2]\n\n live = np.genfromtxt(os.path.join(directory, 'phys_live.points'))\n vectors_live = live[:, :-1]\n weights_live = np.repeat(vol_min, len(live)) + live[:, -2]\n\n vectors = np.concatenate([vectors_ev, vectors_live])\n weights = np.concatenate([weights_ev, weights_live])\n\n weights = weights - np.amax(weights)\n weights = np.exp(weights)\n\n if isinstance(n_samples, int) or isinstance(n_samples, np.integer):\n vectors = vectors[np.random.randint(len(vectors), size=n_samples)]\n else:\n if not n_samples == 'max':\n raise RuntimeError('Cannot understand number of samples!' +\n ' Received {}.'.format(n_samples))\n\n if not inspect.isclass(format):\n raise RuntimeError('format must be a class.')\n\n if format == np.ndarray:\n output = vectors\n if not logl:\n output = output[:, :-1]\n elif format == dict:\n var_param_list = read_var_param_list(directory)\n var_param_list.append(var_param_dict('log L', fit=True))\n output = [vector_to_param_dict(vector, var_param_list) for vector in\n vectors]\n else:\n raise RuntimeError('Unkown output format. Received {}.'.format(\n format.__name__))\n\n if equal_weight:\n return output\n else:\n return output, weights\n\n\ndef read_best_fit(directory, format=np.ndarray):\n\n live = np.genfromtxt(os.path.join(directory, 'phys_live.points'))\n vectors_live = live[:, :-2]\n vector = vectors_live[np.argmax(live[:, -2])]\n\n if not inspect.isclass(format):\n raise RuntimeError('format must be a class.')\n\n if format == np.ndarray:\n output = vector\n elif format == dict:\n var_param_list = read_var_param_list(directory)\n output = vector_to_param_dict(vector, var_param_list)\n else:\n raise RuntimeError('Unkown output format. Received {}.'.format(\n format.__name__))\n\n return output\n\n\ndef read_max_log_likelihood(directory):\n\n live = np.genfromtxt(os.path.join(directory, 'phys_live.points'))\n return np.amax(live[:, -2])\n\n\ndef read_log_evidence(directory, ins=True):\n with open(os.path.join(directory, 'stats.dat')) as fstream:\n first_line = fstream.readline()\n second_line = fstream.readline()\n if ins:\n line = second_line\n else:\n line = first_line\n log_ev = float(line.split(\":\")[1].split(\"+/-\")[0])\n fstream.close()\n return log_ev\n\n\ndef tex_labels_of_fit(var_param_list):\n labels = np.array([var_param['tex'] for var_param in var_param_list])\n return labels[[var_param['fit'] for var_param in var_param_list]]\n\n\ndef make_corner_plot(directory, equal_weight=False, truths=None):\n var_param_list = read_var_param_list(directory)\n labels = tex_labels_of_fit(var_param_list)\n if equal_weight:\n samples = read_posterior(directory, equal_weight=True)\n weights = np.ones(len(samples))\n else:\n samples, weights = read_posterior(directory, equal_weight=False)\n\n ndim = len(labels)\n fig, axes = plt.subplots(ndim, ndim, figsize=(7.0, 7.0))\n corner.corner(np.transpose(np.transpose(samples)),\n weights=weights, plot_datapoints=False, plot_density=False,\n labels=labels, color='royalblue', show_titles=False,\n levels=(0.68, 0.95), bins=20, truths=truths,\n range=np.ones(ndim) * 0.99, fill_contours=True, fig=fig,\n hist_kwargs={'color': 'gold', 'histtype': 'stepfilled',\n 'edgecolor': 'black', 'linewidth': 0.5},\n max_n_ticks=3, contour_kwargs={'linewidths': 0.5,\n 'colors': 'black'})\n\n axes = np.array(fig.axes).reshape((ndim, ndim))\n for yi in range(ndim):\n for xi in range(yi + 1):\n ax = axes[yi, xi]\n ax.tick_params(axis='x', labelsize=8, rotation=90)\n ax.xaxis.set_label_coords(0.5, -0.5)\n ax.tick_params(axis='y', labelsize=8, rotation=0)\n ax.yaxis.set_label_coords(-0.5, 0.5)\n\n for yi in range(ndim):\n ax = axes[yi, yi]\n ax.tick_params(axis='y', which='both', left=False, right=False)\n\n plt.tight_layout(pad=0.3)\n plt.subplots_adjust(wspace=0.05, hspace=0.05)\n plt.savefig(os.path.join(directory, 'posterior.pdf'))\n plt.savefig(os.path.join(directory, 'posterior.png'), dpi=300)\n plt.close()\n","sub_path":"multinesttools/multinesttools.py","file_name":"multinesttools.py","file_ext":"py","file_size_in_byte":8377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"303519587","text":"# Given an integer array nums, return an array answer such that answer[i] is equal to the product of all the elements of nums except nums[i].\n\n# The product of any prefix or suffix of nums is guaranteed to fit in a 32-bit integer.\n\n# You must write an algorithm that runs in O(n) time and without using the division operation.\n\nclass Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n \n res = [1] * len(nums)\n \n # Start from left to right\n # Calculate the product of numbers before an index and store that value in res at that index\n prefix = 1\n for i in range(len(nums)):\n res[i] = prefix\n prefix = prefix * nums[i]\n \n # Start from right to left\n # Calculate the product of numbers after an index and multiply it with existing value in res at that index\n # These 2 calculation will give the product of numbers before and after the index \n postfix = 1\n for i in range(len(nums)-1,-1,-1):\n res[i] = postfix * res[i]\n postfix = postfix * nums[i]\n \n return res","sub_path":"src/238. Product of Array Except Self.py","file_name":"238. Product of Array Except Self.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"532289219","text":"# -*- coding: utf-8 -*-\n# 面向对象\n# 有意义的面向对象代码\n# 类 = 面向对象 (行为与特征)\n# 类最基本的作用:封装\nclass Student():\n # 类变量\n name = '小白' #数据成员\n age = 0\n sum = 0\n __score = 0\n\n # 构造函数 只能返回None\n def __init__(self, name, age):\n # 初始化对象的属性\n self.name = name\n self.age = age\n self.__score = 0\n # 不能在实例方法中直接访问类变量\n # print(name)\n #可以用slef.类变量来表示\n #print(self.name)\n\n # 在实例方法中操作类变量\n print(Student.sum)\n print(self.__class__.sum)\n\n # self.__class__.sum += 1\n # print(\"当前班级学生总数为:\" + str(self.__class__.sum))\n\n #在变量或者函数前边添加双下划线代表私有\n def marking(self, score):\n if score < 0:\n print('分数不能为负')\n self.__score = score\n print(self.name + '同学本次考试的分数为:' + str(self.__score))\n\n # 行为 与 特征\n def doHomeWork(self):\n self.__class__.sum += 1\n print(\"当前班级学生总数为:\" + str(self.__class__.sum))\n\n print('do homework')\n\n # 类方法\n @classmethod\n def plus_sum(cls):\n cls.sum += 1\n print(cls.sum) \n\n # 静态方法\n @staticmethod\n def add(x, y):\n print(Student.sum)\n print('this is a static method')\n\n def print_file(self): #实例方法\n print('name = ' + self.name)\n print('age = ' + str(self.age))\n\n# 类的实例化\nstudent = Student('张三', 18)\nstudent1 = Student('王五', 20)\nstudent2 = Student('李四', 15)\n# a = student.__init__()\n# print(a)\n# print(type(a))\nprint(student.name) # 实例变量\nprint(Student.name) # 类变量\nstudent.print_file()\nstudent.doHomeWork()\nstudent1.doHomeWork()\nstudent2.doHomeWork()\n\n# 类方法\nStudent.plus_sum()\n# 对象调用类方法\nstudent2.plus_sum()\n\nstudent.add(1,2)\n\n\nstudent.marking(59)\n# 私有变量不能直接读取\n# print(student.__score)\n\n# 读取私有变量的方法\nprint(student._Student__score)\n\n# 查看该实例对象的字典\nprint(student.__dict__)\n\n\n\n","sub_path":"basic-learn/对象/class1.py","file_name":"class1.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"644426045","text":"#!/usr/bin/env python\n#pylint: skip-file\n\"\"\"\nCopyright 2016 Cisco Systems\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nclass LinkWrapper(object):\n\n\n\n def __init__(self):\n \"\"\"\n Attributes:\n swaggerTypes (dict): The key is attribute name and the value is attribute type.\n attributeMap (dict): The key is attribute name and the value is json key in definition.\n \"\"\"\n self.swaggerTypes = {\n\n 'source': 'str',\n\n\n 'id': 'str',\n\n\n 'endPortID': 'str',\n\n\n 'startPortID': 'str',\n\n\n 'greyOut': 'bool',\n\n\n 'linkStatus': 'str',\n\n\n 'startPortIpv4Address': 'str',\n\n\n 'startPortIpv4Mask': 'str',\n\n\n 'endPortIpv4Address': 'str',\n\n\n 'endPortIpv4Mask': 'str',\n\n\n 'endPortName': 'str',\n\n\n 'endPortSpeed': 'str',\n\n\n 'startPortName': 'str',\n\n\n 'startPortSpeed': 'str',\n\n\n 'tag': 'str',\n\n\n 'target': 'str'\n\n }\n\n self.attributeMap = {\n\n 'source': 'source',\n\n 'id': 'id',\n\n 'endPortID': 'endPortID',\n\n 'startPortID': 'startPortID',\n\n 'greyOut': 'greyOut',\n\n 'linkStatus': 'linkStatus',\n\n 'startPortIpv4Address': 'startPortIpv4Address',\n\n 'startPortIpv4Mask': 'startPortIpv4Mask',\n\n 'endPortIpv4Address': 'endPortIpv4Address',\n\n 'endPortIpv4Mask': 'endPortIpv4Mask',\n\n 'endPortName': 'endPortName',\n\n 'endPortSpeed': 'endPortSpeed',\n\n 'startPortName': 'startPortName',\n\n 'startPortSpeed': 'startPortSpeed',\n\n 'tag': 'tag',\n\n 'target': 'target'\n\n }\n\n\n #Device ID correspondng to the source device\n\n self.source = None # str\n\n #Unified identifier for device\n\n self.id = None # str\n\n #Device port ID corresponding to end devices\n\n self.endPortID = None # str\n\n #Device port ID corresponding to start devices\n\n self.startPortID = None # str\n\n #Device greyout\n\n self.greyOut = None # bool\n\n #Indicates whether link is working\n\n self.linkStatus = None # str\n\n #Interface port IPv4 address corresponding to start devices\n\n self.startPortIpv4Address = None # str\n\n #Interface port IPv4 mask corresponding to start devices\n\n self.startPortIpv4Mask = None # str\n\n #Interface port IPv4 address corresponding to end devices\n\n self.endPortIpv4Address = None # str\n\n #Interface port IPv4 mask corresponding to end devices\n\n self.endPortIpv4Mask = None # str\n\n #Interface port name corresponding to end devices\n\n self.endPortName = None # str\n\n #Interface port speed corresponding to end devices\n\n self.endPortSpeed = None # str\n\n #Interface port name corresponding to start devices\n\n self.startPortName = None # str\n\n #Interface port speed corresponding to start devices\n\n self.startPortSpeed = None # str\n\n #Tag for the devices\n\n self.tag = None # str\n\n #Device ID corresponding to the target device\n\n self.target = None # str\n\n","sub_path":"apis/nb/clients/topology_client/models/LinkWrapper.py","file_name":"LinkWrapper.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"515561116","text":"from django.http import JsonResponse\nfrom django.shortcuts import redirect\nfrom .models import Map\nfrom django.views.decorators.csrf import csrf_exempt\n\n@csrf_exempt\ndef submit(request):\n\tif request.method != \"POST\":\n\t\treturn JsonResponse({\"status\": \"failure\"})\n\ttry:\n\t\tnew_obj = Map(\n\t\t\tmap_type = request.POST.get(\"map_type\", ''),\n\t\t\tcategory = request.POST.get(\"category\", ''),\n\t\t\ttitle = request.POST.get(\"title\", ''),\n\t\t\tdesc = request.POST.get(\"desc\", ''),\n\t\t\taddress = request.POST.get(\"address\", ''),\n\t\t\tlat = request.POST.get(\"lat\", ''),\n\t\t\tlng = request.POST.get(\"lng\", '')\n\t\t)\n\t\tnew_obj.save()\n\t\treturn redirect(\"default\")\n\texcept:\n\t\treturn redirect(\"/#failure\")\n\ndef retrieve(request, map_type):\n\tresult_set = Map.objects.filter(map_type__exact=map_type)\n\tresults = [obj.as_json() for obj in result_set]\n\treturn JsonResponse(results, safe=False)\n\n","sub_path":"02-day-final/zmap/basicmaps/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"419160501","text":"# 2021.07.04 modified\r\n# kbo 에서 구장별 날씨\r\nfrom selenium import webdriver\r\nimport pymysql\r\nimport datetime\r\n\r\n\r\n# save in db\r\ndef _save_in_weather_db(date=None, env_list=None):\r\n try:\r\n db = pymysql.connect(host='localhost',\r\n user='root',\r\n password='chldlstns1!',\r\n charset='utf8',\r\n db='baseball')\r\n except Exception as e:\r\n print(e)\r\n return\r\n\r\n cursor = db.cursor()\r\n for env in env_list:\r\n sql = \"INSERT INTO weather (date, stadium, time, temperature, humidity, rain_prob, wind)\" \\\r\n \"VALUES(%s, %s, %s, %s, %s, %s, %s)\"\r\n cursor.execute(sql, (date, env[0], env[1], env[2], env[3], env[4], env[5]))\r\n db.commit()\r\n\r\n db.close()\r\n\r\n\r\ndef get_weather_today():\r\n options = webdriver.ChromeOptions()\r\n options.add_argument('--headless')\r\n options.add_argument('--no-sandbox')\r\n options.add_argument('--disable-dev-shm-usage')\r\n\r\n path = 'C:/Users/soo81/webcrawling/chromedriver.exe'\r\n driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver', chrome_options=options)\r\n URL = 'https://www.koreabaseball.com/Schedule/Weather.aspx#none'\r\n driver.get(URL)\r\n driver.implicitly_wait(3)\r\n\r\n weather_list = []\r\n stadium_list = driver.find_element_by_xpath('//*[@id=\"ulStadiumList\"]').find_elements_by_tag_name('li')\r\n for stadium in stadium_list:\r\n stadium.click()\r\n tmp_table = stadium.find_element_by_xpath('//*[@id=\"tblForecast\"]')\r\n tmp_table_trs = tmp_table.find_elements_by_tag_name('tr')\r\n tmp_list = [stadium.get_attribute('data-stadium')]\r\n for tr in tmp_table_trs:\r\n tmp_tr = tr.text.split()[-1:]\r\n tmp_list.append(tmp_tr[0])\r\n\r\n weather_list.append(tmp_list)\r\n\r\n today = datetime.datetime.now().strftime('%Y-%m-%d')\r\n _save_in_weather_db(today, weather_list)\r\n print(today)\r\n\r\n\r\nget_weather_today()\r\n","sub_path":"baseball_predict/get_weather_info.py","file_name":"get_weather_info.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"116036244","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/taurus/core/evaluation/test/res/ipap_example.py\n# Compiled at: 2019-08-19 15:09:29\n\"\"\"\nExamples on using the evaluation scheme for exposing icepap driver values\nas taurus attributes\n\"\"\"\nfrom __future__ import print_function\nATTR_IPAP_POS = 'eval:@ipap=pyIcePAP.EthIcePAP(\"icepap06\", port=5000)' + '/float(ipap.readParameter(1,\"POS\"))'\n\ndef _test1():\n import taurus.core\n a = taurus.Attribute(ATTR_IPAP_POS)\n print('axis pos:', a.read().rvalue)\n\n\ndef _test2():\n import sys\n from taurus.qt.qtgui.application import TaurusApplication\n from taurus.qt.qtgui.display import TaurusLabel\n app = TaurusApplication(cmd_line_parser=None)\n tl = TaurusLabel()\n tl.setModel(ATTR_IPAP_POS)\n tl.show()\n sys.exit(app.exec_())\n return\n\n\nif __name__ == '__main__':\n _test1()\n _test2()","sub_path":"pycfiles/taurus-4.6.1-py2.7/ipap_example.py","file_name":"ipap_example.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"350605518","text":"from unidecode import unidecode\nimport re\n\n'''\nlibrary to expose useful methods for address like\n\n guess_typeof_street:\n evaluate type of street (from its label, splited as words)\n guess_strong_word:\n evaluate strong word (from its label, splited as words)\n'''\n\n# I to XXIII : ^[X]*(I{1,3}|[I]?V|V[I]{0,3}|[I]?X)$\n# all : '^M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$'\nIS_ROMAN_NUMBER_RE = re.compile(r'^M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$', re.I)\n\ndef is_roman_number(word):\n \"\"\"\n determine if given word is a roman number\n :param word:\n :return: boolean result\n \"\"\"\n roman = IS_ROMAN_NUMBER_RE.match(word)\n return (roman != None)\n\n\ndef is_numeric(word):\n \"\"\"\n determine if given word is a number\n :param word:\n :return: boolean result\n \"\"\"\n try:\n integer = int(word)\n except ValueError as e:\n return False\n return True\n\n\nclass AddrGroup:\n \"\"\"\n class Address of level Group (Voie, Lieu-dit, Quartier)\n \"\"\"\n\n # typeof street as list of key words, sorted by descendant number of occurences\n TYPEOF_STREET_VALUES = [\n 'RUE'\n , 'LIEU DIT'\n , 'CHEMIN'\n , 'IMPASSE'\n , 'ROUTE'\n , 'ALLEE'\n , 'PLACE'\n , 'AVENUE'\n , 'LOTISSEMENT'\n , 'QUARTIER'\n , 'RESIDENCE'\n , 'BOULEVARD'\n , 'RUELLE'\n , 'SQUARE'\n , 'PASSAGE'\n , 'HAMEAU'\n , 'MOULIN'\n , 'FERME'\n , 'SENTE'\n , 'CITE'\n , 'CHEZ'\n , 'CLOS'\n , 'DOMAINE'\n , 'MONTEE'\n , 'MAS'\n , 'CHATEAU'\n , 'COUR'\n , 'QUAI'\n , 'TRAVERSE'\n , 'ZONE ARTISANALE'\n , 'PONT'\n , 'BOIS'\n , 'ROND POINT'\n , 'VOIE'\n , 'VILLA'\n , 'COTE'\n , 'VENELLE'\n , 'VILLAGE'\n , 'PARC'\n , 'COURS'\n , 'PROMENADE'\n , 'ZONE INDUSTRIELLE'\n , 'IMMEUBLE'\n , 'PETITE RUE'\n , 'HLM'\n , 'PRE'\n , 'CARREFOUR'\n , 'ESPLANADE'\n , 'CENTRE COMMERCIAL'\n , 'PLAN'\n , 'MAIL'\n , 'ECLUSE'\n , 'FAUBOURG'\n , 'ANCIEN CHEMIN'\n , 'VAL'\n , 'ESPACE'\n , 'MAISON FORESTIERE'\n , 'ZONE D AMENAGEMENT CONCERTE'\n , 'CAMP'\n , 'PORT'\n , 'ZONE'\n , 'CAMPAGNE'\n , 'GRAND RUE'\n , 'CHAUSSEE'\n , 'PLAINE'\n , 'ESCALIER'\n , 'ANCIENNE ROUTE'\n , 'JARDIN'\n , 'BOURG'\n , 'ETANG'\n , 'MANOIR'\n , 'COL'\n , 'PORTE'\n , 'FONTAINE'\n , 'ROC'\n , 'CAMPING'\n , 'RAMPE'\n , 'BOUCLE'\n , 'CORNICHE'\n , 'AIRE'\n , 'CARRIERE'\n , 'CENTRAL'\n , 'ILE'\n , 'PAVILLON'\n , 'PETIT CHEMIN'\n , 'VIEUX CHEMIN'\n , 'GARE'\n , 'DESCENTE'\n , 'ENCLOS'\n , 'TOUR'\n , 'PARVIS'\n , 'PARKING'\n , 'GALERIE'\n , 'PLATEAU'\n , 'TERRASSE'\n , 'CAVEE'\n , 'CHALET'\n , 'CASTEL'\n , 'POINTE'\n , 'ENCEINTE'\n , 'VIEILLE ROUTE'\n , 'CONTOUR'\n , 'FORT'\n , 'PASSERELLE'\n , 'ABBAYE'\n , 'PASSE'\n , 'PLAGE'\n , 'TERRAIN'\n , 'STATION'\n , 'DIGUE'\n , 'CHAPELLE'\n , 'FOSSE'\n , 'CHEMINEMENT'\n , 'BARRIERE'\n , 'LEVEE'\n , 'GROUPE'\n , 'REMPART'\n , 'TERTRE'\n , 'VIA'\n , 'PASSAGE A NIVEAU'\n , 'CARRE'\n , 'PETITE ROUTE'\n , 'ANSE'\n , 'BUTTE'\n , 'FOYER'\n , 'MARCHE'\n , 'PLACIS'\n , 'COLLINE'\n , 'COTTAGE'\n , 'STADE'\n , 'AUTOROUTE'\n , 'EGLISE'\n , 'CALE'\n , 'RACCOURCI'\n , 'PRESQU ILE'\n , 'TERRE PLEIN'\n , 'AGGLOMERATION'\n , 'CARREAU'\n , 'GARENNE'\n , 'ARCADE'\n , 'CHEMIN VICINAL'\n , 'HALLE'\n , 'BERGE'\n , 'GRILLE'\n , 'PETITE AVENUE'\n , 'PALAIS'\n , 'FORUM'\n , 'PETITE ALLEE'\n , 'BASTIDE'\n , 'PETITE IMPASSE'\n , 'POURTOUR'\n , 'DEGRE'\n , 'BEGUINAGE'\n , 'CLOITRE'\n , 'MUSEE'\n , 'PATIO'\n , 'RAIDILLON'\n , 'ROTONDE'\n , 'JETEE'\n , 'POTERNE'\n , 'NOUVELLE ROUTE'\n , 'ROQUET'\n , 'PORTIQUE'\n , 'PERISTYLE'\n , 'BAS CHEMIN'\n , 'PERIPHERIQUE'\n , 'METRO'\n , 'HIPPODROME'\n , 'DARSE'\n , 'GRIMPETTE'\n , 'HAUT CHEMIN'\n , 'CHARMILLE'\n , 'GRAND BOULEVARD'\n , 'GROUPEMENT'\n , 'ZONE D AMENAGEMENT DIFFERE'\n , 'ZONE A URBANISER EN PRIORITE'\n , 'BASTION'\n , 'ENCLAVE'\n ]\n\n STRONG_WORD_EXCLUDE_LAPOSTE = [\n 'INFERIEUR'\n , 'INFERIEURE'\n , 'INFERIEURS'\n , 'INFERIEURES'\n , 'SUPERIEUR'\n , 'SUPERIEURE'\n , 'SUPERIEURS'\n , 'SUPERIEURES'\n , 'PROLONGE'\n , 'PROLONGEE'\n , 'PROLONGEES'\n ]\n\n STRONG_WORD_EXCLUDE_IGN = [\n 'PAIR'\n , 'PAIRE'\n , 'IMPAIR'\n , 'IMPAIRE'\n , 'BIS'\n , 'TER'\n , 'QUATER'\n , 'NO'\n , 'NR'\n , 'NORD'\n , 'EST'\n , 'SUD'\n , 'OUEST'\n , 'HAMEAU'\n , 'SUR'\n , 'SOUS'\n , 'HAUT'\n , 'HAUTS'\n , 'HAUTE'\n , 'HAUTES'\n , 'BAS'\n , 'BASSE'\n , 'BASSES'\n , 'BRAZ'\n , 'VRAZ'\n , 'BRAS'\n , 'VRAS'\n , 'BIHAN'\n , 'VIHAN'\n , 'BIAN'\n , 'VIAN'\n , 'HUEL'\n , 'IZEL'\n , 'HUELLA'\n , 'UHELLA'\n , 'IZELLA'\n , 'H'\n , 'PELLA'\n , 'TOSTA'\n , 'NEVEZ'\n , 'NEVE'\n , 'NEHUE'\n , 'NEUE'\n , 'AL'\n , 'AR'\n , 'AN'\n , 'ER'\n , 'UR'\n , 'UN'\n , 'COZ'\n , 'CREIS'\n , 'KREIS'\n , 'CREIZ'\n , 'KREIZ'\n , 'DU'\n , 'IHUEL'\n , 'UHEL'\n , 'GUEN'\n , 'GWEN'\n ]\n\n ARTICLES_VALUES = [\n 'LE'\n , 'LA'\n , 'LES'\n , 'L'\n , 'DE'\n , 'DU'\n , 'DES'\n , 'D'\n , 'A'\n , 'AU'\n , 'AUX'\n , 'UN'\n , 'UNE'\n ]\n\n # eval number of word(s) for each\n TYPEOF_STREET_WORDS_COUNT = [x.count(\" \") + 1 for x in TYPEOF_STREET_VALUES]\n\n IS_WELL_CAPITALIZE_RE = re.compile(r'^(?:[A-Z][a-z]*[- \\']?)$')\n IS_ONLY_UPPERCASE_RE = re.compile(r'^[- \\'A-Z]*$')\n SPLIT_LABEL_AS_WORD_RE = re.compile(r\"[\\w]+\", re.U | re.X)\n\n LABEL_ONLY_UPPERCASE_ERROR = 1\n LABEL_BAD_CAPITALIZE_ERROR = 2\n LABEL_WITH_REPETITION_ERROR = 4\n\n\n def __init__(self, label, evalDescriptor=False):\n self.label = label\n # split as word(s), transforming accents (unidecode)\n self._words = self.SPLIT_LABEL_AS_WORD_RE.findall(unidecode(self.label))\n self._wordsUpper = list(map(lambda x: x.upper(), self._words))\n\n\n def _guess_strong_word(self):\n \"\"\"\n evaluate strong word of a label of street, from splited words\n :return: strong word\n \"\"\"\n\n # not a roman number\n # not an arabic number\n # not an excluded word (LAPOSTE, IGN)\n # not an article\n excludes = (self.STRONG_WORD_EXCLUDE_LAPOSTE + self.STRONG_WORD_EXCLUDE_IGN\n + self.ARTICLES_VALUES)\n for word in reversed(self._wordsUpper):\n if is_roman_number(word) or is_numeric(word) or word in excludes:\n continue\n return word\n\n # default is last word\n return self._wordsUpper[-1]\n\n\n def _guess_typeof_street(self):\n \"\"\"\n identify type of street (tos) of label, from splited words\n :return: type of street\n \"\"\"\n\n '''\n bugs:\n 1- gives instead of \n '''\n t = len(self._wordsUpper)\n for i, kw in enumerate(self.TYPEOF_STREET_VALUES):\n if self.TYPEOF_STREET_WORDS_COUNT[i] >= t:\n continue\n\n # multiple tos w/ the same 1st word, search them starting w/ longest (loop w/ desc order)\n if self._wordsUpper[0] in (\"CHEMIN\", \"ZONE\", \"PASSAGE\"):\n w = [(s, self.TYPEOF_STREET_VALUES.index(s)) for s in self.TYPEOF_STREET_VALUES\n if s.startswith(self._wordsUpper[0])]\n ws = sorted(w, key=lambda x: self.TYPEOF_STREET_WORDS_COUNT[x[1]], reverse=True)\n for tos, id in ws:\n if \" \".join(self._wordsUpper[:self.TYPEOF_STREET_WORDS_COUNT[id]]) == tos:\n return tos\n else:\n if \" \".join(self._wordsUpper[:self.TYPEOF_STREET_WORDS_COUNT[i]]) == kw:\n return kw\n\n # w/o type of street\n return ''\n\n\n def _guess_stateof_label(self):\n \"\"\"\n evaluate state of label (suspicious errors)\n :return: bitstream\n \"\"\"\n rc = 0\n\n # only w/ uppercase?\n ouc = self.IS_ONLY_UPPERCASE_RE.match(self.label)\n if (ouc != None):\n rc |= self.LABEL_ONLY_UPPERCASE_ERROR\n else:\n # each word well capitalized?\n for w in self._words:\n if w.upper() in self.ARTICLES_VALUES or is_roman_number(w) or is_numeric(w):\n continue\n wc = self.IS_WELL_CAPITALIZE_RE.match(w)\n if (wc == None):\n rc |= self.LABEL_BAD_CAPITALIZE_ERROR\n break\n\n # w/ duplicate successive word?\n dup = self.is_with_repetition()\n if dup:\n rc |= self.LABEL_WITH_REPETITION_ERROR\n\n return rc\n\n def is_with_repetition(self):\n \"\"\"\n identifie dans la liste de mots si des mots qui se suivent sont identiques\n :return: boolean\n \"\"\"\n\n for nbWord in range(len(self._wordsUpper)-1):\n if self._wordsUpper[nbWord] == self._wordsUpper[nbWord+1]:\n return True\n return False\n\n @property\n def guess_strong_word(self):\n if not hasattr(self, '_strong_word'):\n self._strong_word = self._guess_strong_word()\n return self._strong_word\n\n\n @property\n def guess_typeof_street(self):\n if not hasattr(self, '_typeof_street'):\n self._typeof_street = self._guess_typeof_street()\n return self._typeof_street\n\n\n @property\n def is_label_only_uppercased(self):\n if not hasattr(self, '_stateof_label'):\n self._stateof_label = self._guess_stateof_label()\n return ((self._stateof_label & self.LABEL_ONLY_UPPERCASE_ERROR) == self.LABEL_ONLY_UPPERCASE_ERROR)\n\n\n @property\n def is_label_bad_capitalized(self):\n if not hasattr(self, '_stateof_label'):\n self._stateof_label = self._guess_stateof_label()\n return ((self._stateof_label & self.LABEL_BAD_CAPITALIZE_ERROR) == self.LABEL_BAD_CAPITALIZE_ERROR)\n\n\n @property\n def is_label_with_repetition(self):\n if not hasattr(self, '_stateof_label'):\n self._stateof_label = self._guess_stateof_label()\n return ((self._stateof_label & self.LABEL_WITH_REPETITION_ERROR) == self.LABEL_WITH_REPETITION_ERROR)\n\n\ndef construction_message(addr):\n\n \"\"\" construit le message d'alerte en fonction des éléments détectés \"\"\"\n\n message_alert = ''\n message_content = []\n\n if addr.is_label_only_uppercased:\n message_content.append('Tous les caractères sont en majuscule')\n if (addr.is_label_bad_capitalized):\n message_content.append('Des caractères sont mal capitalisés')\n if (addr.is_label_with_repetition):\n message_content.append('Des mots sont en double')\n\n if len(message_content) != 0:\n message_alert = 'Mauvais libellé :'\n for value in message_content:\n message_alert = message_alert + '\\n - ' + value\n\n return message_alert\n\n# for tests\nif __name__ == '__main__':\n while True:\n street = input('\\nEntrer la Voie: ')\n\n addr = AddrGroup(street)\n print(' mot important : ' + addr.guess_strong_word)\n print(' type de la Voie : ' + addr.guess_typeof_street)\n print(' état du libellé : ')\n if (addr.is_label_only_uppercased):\n print(' en majuscule seulement!')\n if (addr.is_label_bad_capitalized):\n print(' non correctement capitalisé!')\n if (addr.is_label_with_repetition):\n print(' avec redondance de libellé!')\n\n","sub_path":"collectivites/addr_utils.py","file_name":"addr_utils.py","file_ext":"py","file_size_in_byte":12656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"98789332","text":"from datetime import datetime\n\nfrom db.crud.base import Base\nfrom utils.snowflake import get_id\nfrom models.role import RoleInDB, \\\n RoleInCreate, \\\n RoleInUpdate\n\n\nclass Role(Base):\n async def list_roles(self, offset, limit) -> list:\n count = await self.exec(\"count_roles\")\n if not count:\n return list(), 0\n\n roles = list()\n\n records = await self.exec(\"list_roles\", offset=offset, limit=limit)\n if records:\n roles = [RoleInDB(**record) for record in records]\n\n return roles, count\n\n async def add_role(self, role: RoleInCreate\n ) -> RoleInDB:\n record = await self.exec(\"add_role\",\n id=get_id(),\n name=role.name,\n description=role.description,\n )\n\n return await self.get_role_by_id(record[0])\n\n async def get_role_by_id(self, id) -> RoleInDB:\n record = await self.exec(\"get_role_by_id\", id)\n if record:\n return RoleInDB(**record)\n\n return None\n\n async def delete_role_by_id(self, id) -> None:\n return await self.exec(\"delete_role_by_id\", id)\n\n async def update_role_by_id(self, id: int, role: RoleInUpdate\n ) -> datetime:\n record = await self.exec(\"update_role_by_id\",\n id=id,\n name=role.name,\n description=role.description,\n )\n\n return record\n","sub_path":"backend-py/src/db/crud/role.py","file_name":"role.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"282623766","text":"#!/usr/bin/env python\n\"\"\"\nCreate simple binner style plots for ugali or simple candidate lists\n\"\"\"\n__author__ = \"Sidney Mau\"\n\nimport os\nimport sys\nimport time\nimport subprocess\nimport glob\nimport healpy\nimport numpy\nimport numpy as np\n\nimport ugali.utils.healpix\nimport fitsio as fits\n\nimport yaml\n\n############################################################\n\nwith open('config.yaml', 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n\n survey = cfg['survey']\n simple_dir = cfg['setup']['simple_dir']\n jobs = cfg['batch']['jobs']\n candidate_list = cfg[survey]['candidate_list']\n basis_1 = cfg[survey]['basis_1']\n basis_2 = cfg[survey]['basis_2']\n\nsave_dir = os.path.join(os.getcwd(), cfg['output']['save_dir'])\nif not os.path.exists(save_dir):\n os.mkdir(save_dir)\n\nlog_dir = os.path.join(os.getcwd(), cfg['output']['save_dir'], cfg['output']['log_dir'])\nif not os.path.exists(log_dir):\n os.mkdir(log_dir)\n\ntry:\n sig_cut = float(sys.argv[1])\nexcept:\n sig_cut = 5.5\n\nprint('Plotting hotspots with sig > {}'.format(sig_cut))\n\n#candidate_list = fits.read(candidate_list)\ncandidate_list = np.load(candidate_list)\ntry: # simple\n candidate_list = candidate_list[candidate_list['SIG'] > sig_cut]\nexcept: # ugali\n candidate_list = candidate_list[candidate_list['TS'] > 25]\n\n# for PS1\n#candidate_list = candidate_list[candidate_list['DEC'] > -15]\n\nprint('{} candidates found...').format(len(candidate_list))\n\n############################################################\n\n#for candidate in [candidate_list[:10]]:\nfor candidate in candidate_list:\n try: # simple\n sig = round(candidate['SIG'], 2)\n except: # ugali\n sig = round(candidate['TS'], 2)\n ra = round(candidate[basis_1], 2)\n dec = round(candidate[basis_2], 2)\n mod = round(candidate['MODULUS'], 2)\n mc_source_id = round(candidate['MC_SOURCE_ID'], 2)\n if 'N_MODEL' in candidate_list.dtype.names:\n field_density = round(candidate['N_MODEL'] / (np.pi * (candidate['R'] * 60.)**2), 4) # field density (arcmin^-2)\n \n logfile = '{}/candidate_{}_{}.log'.format(log_dir, ra, dec)\n #batch = 'csub -n {} -o {} '.format(jobs, logfile)\n batch = 'csub -n {} -o {} --host all '.format(jobs, logfile) # testing condor updates\n if 'N_MODEL' in candidate_list.dtype.names:\n command = 'python {}/plotting/make_plot.py {:0.2f} {:0.2f} {:0.2f} {:0.2f} {:0.2f} {:0.4f}'.format(simple_dir, ra, dec, mod, sig, mc_source_id, field_density)\n else:\n command = 'python {}/plotting/make_plot.py {:0.2f} {:0.2f} {:0.2f} {:0.2f} {:0.2f}'.format(simple_dir, ra, dec, mod, sig, mc_source_id)\n command_queue = batch + command\n\n #print(command)\n #os.system(command)\n print(command_queue)\n os.system(command_queue) # Submit to queue\n","sub_path":"simple/plotting/farm_plots.py","file_name":"farm_plots.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"456979742","text":"#%%\nimport os\nimport sys\nimport glob\nimport csv\nimport conlog_parse as cp\n\ndef collect_out(condir):\n retval = []\n for root, dirs, files in os.walk(condir): \n for file in files:\n if file.endswith(\".out\"):\n out_path = os.path.join(root,file)\n retval.append(out_path)\n else:\n pass\n print(len(retval))\n return retval\n\ndef xrdfragcp_args(rows): #input must be a list with lines from the output file\n retval = {}\n \n for row in rows:\n if 'Going to run' in row:\n cmdline = row\n cmdparts = []\n cmdparts = cmdline.split()\n\n retval.update({\n 'rdsize':cmdparts[6],#the total requested size in bytes\n 'reqs':cmdparts[7],#the number of requests it will make i.e. 10\n 'time':cmdparts[8], #the calculated amount of time it will take\n 'url':cmdparts[9]}) #the url the for the requested fragment\n else:\n pass\n if retval == {}:\n retval.update({'rdsize':0,'reqs':0,'time':0,'url':'none'})\n return retval\n\ndef gettotaltime(lines):\n beg = 0\n end = 0\n for line in lines:\n if \"Start time:\" in line:\n beg = float(line.split()[2])\n elif \"End time:\" in line:\n end = float(line.split()[2])\n else:\n pass\n time = end - beg\n if time == 0:\n print(\"no start or end time given\")\n else:\n pass\n return time\n\ndef parse_out(outfile): \n retval = {}\n\n fl_handle = open(outfile, 'r')\n fl_data = fl_handle.readlines()\n fl_handle.close()\n\n arguments = xrdfragcp_args(fl_data)\n\n if fl_data == []:\n retval['empty']=True\n retval.update({'url':\"none\",'reqs':0,'reqsize':0,'total_time':0,'expected time': 0})\n else:\n retval['empty']=False\n retval['url'] = arguments['url']\n retval['reqs'] = arguments['reqs']\n retval['expected time'] = arguments['time']\n retval['total_time'] = gettotaltime(fl_data)\n retval['reqsize'] = int(arguments['rdsize'])/(int(arguments['reqs']) * 1024 * 1024)\n return retval\n\ndef average_rate(dir): #of a single run of a single concurrency\n retval = []\n paths = collect_out(dir)\n for path in paths:\n reqs = int(parse_out(path)['reqs'])\n reqsize = parse_out(path)['reqsize']\n tot_size = float(reqsize*reqs)\n tot_time = float(parse_out(path)['total_time'])\n if tot_time != 0:\n tot_rate = tot_size/tot_time\n retval.append(tot_rate)\n else:\n pass\n average = sum(retval)/len(retval)\n return average\n\ndef expected_rate(dir): #of a single run of a single concurrency\n retval = []\n paths = collect_out(dir)\n for path in paths:\n reqs = int(parse_out(path)['reqs'])\n reqsize = parse_out(path)['reqsize']\n tot_size = float(reqsize*reqs)\n exp_time = float(parse_out(path)['expected time'])\n if exp_time != 0:\n exp_rate = tot_size/exp_time\n retval.append(exp_rate)\n else:\n pass\n average = sum(retval)/len(retval)\n print(average)\n return average\n\ndef collect_empty_files(conDir):\n paths = collect_out(conDir)\n empty_out = []\n for path in paths:\n if parse_out(path)['empty'] == True:\n empty_out.append(path)\n else:\n pass\n if empty_out == []:\n print(\"No empty output files.\")\n return empty_out\n\ndef avg_hostfreq(log_path):\n parseddata = cp.parse_job_data(log_path)\n procdata = cp.data_rows_by_proc(parseddata)\n slothosts = cp.slothosts_to_eviction_counts(procdata)\n \n all_host_data = list(slothosts.values())\n rel_host_data = all_host_data[1:]\n\n number_hosted = [item[0] for item in rel_host_data]\n\n avg_host_freq = sum(number_hosted)/len(number_hosted)\n\n return avg_host_freq\n\ndef parse_by_con(testDir,con):\n path = testDir\n x = str(con)\n\n log_paths = glob.glob(path+\"con_\"+x+\"_*.log\")\n con_dirs = glob.glob(path+\"concurrency_\"+x+\"_*/\")\n \n rates = [] \n exp_rates = []\n host_freq = []\n empty_files = []\n\n for dir in con_dirs:\n avg_rate = average_rate(dir)\n rates.append(avg_rate)\n exp_rate = expected_rate(dir)\n exp_rates.append(exp_rate)\n list_empty = collect_empty_files(dir)\n if list_empty == []:\n print(\"empty list\")\n empty_files.append(0)\n else:\n empty_files.append(len(list_empty))\n for log_path in log_paths:\n host_freq.append(avg_hostfreq(log_path))\n \n data = [rates, exp_rates, host_freq, empty_files]\n\n return data\n\n#%%\n\ntestDir = sys.argv[1]\ncon_list = sys.argv[2:]\ndata_list = []\n\nfor con in con_list:\n con_data = parse_by_con(testDir, con)\n print(con_data)\n for i in range(0,len(con_data[0])):\n data_list.append([int(con)] + [i+1] + [item[i] for item in con_data])\n\nwith open('parsed.csv','w+') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['concurrency','run','rates','exp rates','hosted','failed'])\n for data in data_list:\n writer.writerow(data)\n#%%s","sub_path":"mine_output/output_parse.py","file_name":"output_parse.py","file_ext":"py","file_size_in_byte":5185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"70129866","text":"import numpy as np\nimport tarfile\nimport os\nimport fnmatch\nimport theano.tensor as T\nimport theano\nfrom scipy.linalg import svd\nimport matplotlib.pyplot as plt\n\n\ndef minibatch_indices(X, minibatch_size, lb=None, ub=None):\n if lb is None:\n lb = 0\n if ub is None:\n ub = len(X)\n minibatch_indices = np.arange(lb, ub, minibatch_size)\n minibatch_indices = np.asarray(list(minibatch_indices) + [ub])\n start_indices = minibatch_indices[:-1]\n end_indices = minibatch_indices[1:]\n return zip(start_indices, end_indices)\n\n\ndef kmeans(X, W=None, n_clusters=10, n_epochs=10, learningrate=0.01,\n batchsize=100, random_state=None, verbose=True):\n \"\"\"\nCode modded from R. Memisevic.\nCopyright (c) 2013, Roland Memisevic\nAll rights reserved.\n\nmemisevr[at]iro[dot]umontreal[dot]ca\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice,\nthis list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING\nIN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY\nOF SUCH DAMAGE.\n \"\"\"\n if random_state is None:\n random_state = np.random.RandomState(None)\n if W is None:\n W = 0.1 * random_state.randn(n_clusters, X.shape[1])\n\n X2 = (X ** 2).sum(1)[:, None]\n for epoch in range(n_epochs):\n for i in range(0, X.shape[0], batchsize):\n D = -2 * np.dot(W, X[i:i+batchsize, :].T) + (W ** 2).sum(1)[:, None]\n D = D + X2[i:i+batchsize].T\n S = (D == D.min(0)[None, :]).astype(\"float\").T\n W += learningrate * (np.dot(S.T, X[i:i+batchsize, :]) -\n S.sum(0)[:, None] * W)\n if verbose:\n print(\"epoch\", epoch, \"of\", n_epochs, \" cost: \", D.min(0).sum())\n return W\n\n\ndef patchify(imgs, patch_shape=(10, 10), patch_stride=(1, 1)):\n \"\"\"\n imgs is an array of (n_images, X, Y, color)\n e.g. CIFAR10 is (50000, 32, 32, 3)\n Modified from\n http://stackoverflow.com/questions/16774148/fast-way-to-slice-image-into-overlapping-patches-and-merge-patches-to-image\n Can test with CIFAR10 and\n assert np.all(imgs[0, :10, :10] == patches[0, 0, 0])\n # with 2, 2 patch_stride\n assert np.all(imgs[0, 20:30, 20:30] == patches[0, -1, -1])\n assert np.all(imgs[-1, :10, :10] == patches[-1, 0, 0])\n assert np.all(imgs[-1, 20:30, 20:30] == patches[-1, -1, -1])\n \"\"\"\n imgs = np.ascontiguousarray(imgs) # won't make a copy if not needed\n n, X, Y, c = imgs.shape\n x, y = patch_shape\n shape = (n, (X - x) / patch_stride[0], (Y - y) / patch_stride[1], x,\n y, c)\n # The right strides can be thought by:\n # 1) Thinking of `img` as a chunk of memory in C order\n # 2) Asking how many items through that chunk of memory are needed\n # when indices i,j,k,l are incremented by one\n strides = imgs.itemsize * np.array([X * Y * c, patch_stride[0] * Y * c,\n patch_stride[1] * c, Y * c, c, 1])\n patches = np.lib.stride_tricks.as_strided(imgs, shape=shape,\n strides=strides)\n return patches\n\n\ndef unpickle(f):\n import cPickle\n fo = open(f, 'rb')\n d = cPickle.load(fo)\n fo.close()\n return d\n\ntry:\n import urllib\n urllib.urlretrieve('http://google.com')\nexcept AttributeError:\n import urllib.request as urllib\n\nurl = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\ndata_file = '../cifar-10-python.tar.gz'\nif not os.path.exists(data_file):\n print(\"Downloading cifar10\")\n urllib.urlretrieve(url, data_file)\n tar = tarfile.open(data_file)\n os.chdir('..')\n tar.extractall()\n tar.close()\n print(\"Download complete\")\n\ndata_dir = '../cifar-10-batches-py/'\ntrain_files = []\nfor filepath in fnmatch.filter(os.listdir(data_dir), 'data*'):\n train_files.append(os.path.join(data_dir, filepath))\n\ntest_files = []\nfor filepath in fnmatch.filter(os.listdir(data_dir), 'test*'):\n test_files.append(os.path.join(data_dir, filepath))\n\nname2label = {k: v for v, k in enumerate(\n unpickle(os.path.join(data_dir, 'batches.meta'))['label_names'])}\nlabel2name = {v: k for k, v in name2label.items()}\n\n\nprint(\"loading data...\")\ntrain_files = sorted(train_files, key=lambda x: x.split(\"_\")[-1])\ntrain_x = []\ntrain_y = []\nfor f in train_files:\n d = unpickle(f)\n t = d['data'].reshape(d['data'].shape[0], 3, 32 * 32)\n t = t.transpose(0, 2, 1)\n train_x.append(t)\n train_y.append(d['labels'])\ntrain_x = np.array(train_x)\ntrain_y = np.array(train_y)\ntrain_x = train_x.reshape(50000, 32, 32, 3)\ntrain_y = train_y.reshape(len(train_x)).astype('int32')\n\nd = unpickle(test_files[0])\ntest_x = d['data'].reshape(d['data'].shape[0], 3, 32 * 32)\ntest_x = test_x.transpose(0, 2, 1)\ntest_x = test_x.reshape(10000, 32, 32, 3)\ntest_y = np.array(d['labels']).astype('int32')\n\nn_classes = len(np.unique(train_y))\npatch_x = patchify(train_x)\n\n\ndef preprocess(patch_x):\n print(\"normalizing...\")\n random_state = np.random.RandomState(1999)\n n_patch = 5000\n idx = random_state.randint(0, len(patch_x), n_patch)\n i1 = random_state.randint(0, patch_x.shape[1], n_patch)\n i2 = random_state.randint(0, patch_x.shape[2], n_patch)\n train_x_subset = patch_x[idx, i1, i2]\n train_x_subset = train_x_subset.reshape(len(train_x_subset), -1)\n m = train_x_subset.mean(axis=0)\n train_x_subset -= m[None]\n s = train_x_subset.std(axis=0)\n s += 1E-3\n train_x_subset /= (s[None])\n print(\"computing zca...\")\n # ZCA on subset\n U, S, V = svd(train_x_subset)\n Z = np.dot(V.T * np.sqrt(1.0 / (S ** 2 / len(train_x_subset) + .1)), V)\n \"\"\"\n print(\"computing pca...\")\n U, S, V = svd(train_x_subset)\n # Keep top 10% of components\n Z = V[:30].T\n \"\"\"\n return Z, m, s\n\nrandom_state = np.random.RandomState(1999)\nZ, m, s = preprocess(patch_x)\n\nprint(\"computing kmeans...\")\nn_patch = 5000\nidx = random_state.randint(0, len(patch_x), n_patch)\ni1 = random_state.randint(0, patch_x.shape[1], n_patch)\ni2 = random_state.randint(0, patch_x.shape[2], n_patch)\ntrain_x_subset = patch_x[idx, i1, i2]\nshp = train_x_subset.shape\ntrain_x_subset = train_x_subset.reshape(shp[0], -1)\ntrain_x_subset = (train_x_subset - m[None]) / s[None]\ntrain_x_subset = np.dot(train_x_subset, Z)\nW = kmeans(train_x_subset, n_epochs=150, n_clusters=50,\n random_state=random_state).T\n\nepochs = 50\nminibatch_size = 500\nlearning_rate = .1\n\n# create logistic regression\nX = T.tensor4()\ny = T.ivector()\nshp = patch_x[:minibatch_size].shape\ntx = patch_x[:minibatch_size].reshape(shp[0], shp[1], shp[2], -1)\nty = train_y[:minibatch_size]\nX.tag.test_value = tx\ny.tag.test_value = ty\nb1 = shp[1] // 2\nb2 = shp[2] // 2\n\nW_sym = theano.shared(W)\nZ_sym = theano.shared(Z)\nnormed = (X - m[None]) / s[None]\nactivation = T.dot(T.dot(normed, Z_sym), W_sym)\n# relu\nactivation = activation * (activation > 1E-6)\n# Quadrant pooling\nfinal_activation = activation.mean(axis=(1, 2))\n# Quadrants == * 4\nsW = theano.shared(0.1 * (random_state.rand(W.shape[1], n_classes) - 0.5))\nsb = theano.shared(np.zeros(n_classes))\npre_s = T.dot(final_activation, sW) + sb\nout = T.nnet.softmax(pre_s)\ncost = -T.mean(T.log(out)[T.arange(y.shape[0]), y])\nparams = [sW, sb]\ngrads = T.grad(cost, params)\nupdates = [(param_i, param_i - learning_rate * grad_i)\n for param_i, grad_i in zip(params, grads)]\ntrain_function = theano.function([X, y], cost, updates=updates)\npredict_function = theano.function([X], out)\n\ntest_patch = patchify(test_x)\ntrain_patch = patch_x\nfor e in range(epochs):\n for n, (i, j) in enumerate(minibatch_indices(patch_x, minibatch_size)):\n shp = patch_x[i:j].shape\n img_patch = patch_x[i:j].reshape(shp[0], shp[1], shp[2], -1)\n img_labels = train_y[i:j]\n batch_cost = train_function(img_patch, img_labels)\n print(\"epoch %i, batch %i, cost %f\" % (e, n, batch_cost))\n\n test_pred = []\n for n, (i, j) in enumerate(minibatch_indices(test_patch, minibatch_size)):\n shp = test_patch[i:j].shape\n img_patch = test_patch[i:j].reshape(shp[0], shp[1], shp[2], -1)\n pred_x = np.argmax(predict_function(img_patch), axis=1)\n test_pred.append(pred_x)\n test_pred = np.array(test_pred).ravel()\n print(\"Test error %f\" % np.mean(test_pred == test_y))\n\n# Final predictions\ntrain_pred = []\nfor n, (i, j) in enumerate(minibatch_indices(train_patch, minibatch_size)):\n shp = train_patch[i:j].shape\n img_patch = train_patch[i:j].reshape(shp[0], shp[1], shp[2], -1)\n pred_x = np.argmax(predict_function(img_patch), axis=1)\n train_pred.append(pred_x)\ntrain_pred = np.array(train_pred).ravel()\nprint(\"Train error %f\" % np.mean(train_pred == train_y))\n\ntest_pred = []\nfor n, (i, j) in enumerate(minibatch_indices(test_patch, minibatch_size)):\n shp = test_patch[i:j].shape\n img_patch = test_patch[i:j].reshape(shp[0], shp[1], shp[2], -1)\n pred_x = np.argmax(predict_function(img_patch), axis=1)\n test_pred.append(pred_x)\ntest_pred = np.array(test_pred).ravel()\nprint(\"Test error %f\" % np.mean(test_pred == test_y))\n","sub_path":"hw3/color_kmeans_theano.py","file_name":"color_kmeans_theano.py","file_ext":"py","file_size_in_byte":10044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"632375633","text":"import eel, os, random, hashlib\n\n\neel.init('web')\n\n\n@eel.expose\ndef pick_file(folder):\n def get_hash_md5(file):\n m = hashlib.md5()\n m.update(file.encode('utf-8'))\n print(m.hexdigest())\n return m.hexdigest()\n if os.path.isdir(folder):\n path = os.listdir(folder)\n file = random.choice(path)\n print(file)\n return file, get_hash_md5(file)\n else:\n return 'Ошибочный путь 😩'\n\neel.start('file_access.html', size=(960, 400))\n\n","sub_path":"file_access_.py","file_name":"file_access_.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"234155427","text":"from typing import Any, Dict, List, Optional\n\nimport torch\nimport h5py\nimport pickle\nimport json\n\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom data.preprocess.init_glove import Vocabulary\n\nclass VisDialDataset(Dataset):\n \"\"\"\n A full representation of VisDial v1.0 (train/val/test) dataset. According\n to the appropriate split, it returns dictionary of question, image,\n history, ground truth answer, answer options, dense annotations etc.\n \"\"\"\n\n def __init__(\n self,\n hparams,\n overfit: bool = False,\n split: str = \"\",\n old_split=None,\n ):\n super().__init__()\n self.hparams = hparams\n\n self.split = split\n self.vocabulary = Vocabulary(hparams.word_counts_json, min_count=hparams.vocab_min_count)\n\n # train, val, test\n text_features_hdfpath = hparams.text_features_h5 % (self.hparams.model_train_type, self.split)\n img_features_h5_path = hparams.img_features_h5 % (self.hparams.img_feature_type, self.split)\n\n\n self.hdf_reader = DataHdfReader(hparams, text_features_hdfpath, img_features_h5_path, self.hparams.fake_label_path, self.hparams.fake_label_img_ids_path, self.split, old_split)\n\n # Keep a list of image_ids as primary keys to access data.\n self.text_feat_image_ids = list(self.hdf_reader.text_feature_id_l)\n print(\"image ids\", len(self.text_feat_image_ids))\n if overfit:\n self.text_feat_image_ids = self.text_feat_image_ids[:5]\n self.float_variables = [\"img_feat\", \"gt_relevance\", \"fake_label_logit\"]\n\n def __len__(self):\n return len(self.text_feat_image_ids)\n\n def __getitem__(self, index):\n\n curr_features = self.hdf_reader[index]\n\n for f_key in curr_features.keys():\n if f_key in self.float_variables:\n curr_features[f_key] = torch.tensor(curr_features[f_key]).float()\n continue\n curr_features[f_key] = torch.tensor(curr_features[f_key]).long()\n\n\n return curr_features\n\n def collate_fn(self, batch):\n merged_batch = {key: [d[key] for d in batch] for key in batch[0]}\n max_np = max(merged_batch[\"num_proposals\"])\n # max_np = 100\n for key in merged_batch:\n if key in ['img_feat']:\n for batch_idx, features in enumerate(merged_batch[key]):\n if key == 'img_feat':\n pad_features = torch.zeros((max_np - len(features), features.size()[1])).float()\n merged_batch[key][batch_idx] = torch.cat((features, pad_features), dim=0)\n else:\n pad_features = torch.zeros(max_np, max_np)\n clo, row = features.size()\n pad_features[:clo, :row] = features\n merged_batch[key][batch_idx] = pad_features\n\n merged_batch[key] = torch.stack(merged_batch[key], 0)\n\n return merged_batch\n\n\nclass VisualDialogOldVersion(object):\n def __init__(self):\n pass\n\n def get_train_img_ids(self, train_jsonpath):\n with open(train_jsonpath, \"r\") as visdial_file:\n visdial_data = json.load(visdial_file)\n train_img_ids = [dialog_for_image[\"image_id\"] for dialog_for_image in visdial_data[\"data\"][\"dialogs\"]]\n\n return train_img_ids\n\n def get_val_img_ids(self, val_jsonpath):\n with open(val_jsonpath, \"r\") as visdial_file:\n visdial_data = json.load(visdial_file)\n val_img_ids = [dialog_for_image[\"image_id\"] for dialog_for_image in visdial_data[\"data\"][\"dialogs\"]]\n\n return val_img_ids\n\n\nclass DataHdfReader(object):\n \"\"\"\n A reader for HDF files containing pre-extracted image features. A typical HDF file is expected\n to have a column named \"image_id\", and another column named \"features\".\n\n Example of an HDF file:\n ```\n visdial_train_faster_rcnn_bottomup_features.h5\n |--- \"image_id\" [shape: (num_images, )]\n |--- \"features\" [shape: (num_images, num_proposals, feature_size)]\n +--- .attrs (\"split\", \"train\")\n ```\n Refer ``$PROJECT_ROOT/data/extract_bottomup.py`` script for more details about HDF structure.\n\n Parameters\n ----------\n features_hdfpath : str\n Path to an HDF file containing VisDial v1.0 train, val or test split image features.\n in_memory : bool\n Whether to load the whole HDF file in memory. Beware, these files are sometimes tens of GBs\n in size. Set this to true if you have sufficient RAM - trade-off between speed and memory.\n \"\"\"\n\n def __init__(self, hparams, text_features_h5_path: str, img_features_h5_path: str, fake_label_path: str, fake_label_img_id_path: str, split=None, old_split=None):\n\n self.text_features_h5_path = text_features_h5_path\n self.img_features_h5_path = img_features_h5_path\n self.fake_laebl_path = fake_label_path\n self.fake_laebl_image_id_path = fake_label_img_id_path\n self._split = split\n self.hparams = hparams\n\n ###### for fake label ############\n if self._split == 'train':\n with open(self.fake_laebl_image_id_path, 'rb') as id_file:\n self.image_ids = pickle.load(id_file)\n with open(self.fake_laebl_path, 'rb') as data_file:\n self.fake_label = pickle.load(data_file)\n\n # text\n with h5py.File(self.text_features_h5_path, \"r\") as text_features_h5:\n self.feature_keys = list(text_features_h5.keys())\n print(\"feature_keys\", self.feature_keys)\n self._split = split\n assert split == self._split\n print(\"data split :\", self._split)\n\n # visdial 0.9 or 1.0\n if self.hparams.dataset_version == '0.9':\n self.text_feature_id_l = self.get_old_img_ids(self.hparams.visdial_json % old_split)\n self.train_text_feature_id_set = set(self.get_old_img_ids(self.hparams.visdial_json % \"train\"))\n self.val_text_feature_id_set = set(self.get_old_img_ids(self.hparams.visdial_json % \"val\"))\n print(self.hparams.visdial_json % old_split)\n self.text_feature_h5_id_l = list(text_features_h5[\"img_ids\"])\n self.old_split = old_split\n\n else:\n self.text_feature_id_l = list(text_features_h5[\"img_ids\"])\n\n # image\n if hparams.img_feature_type == \"dan_faster_rcnn_x101\":\n # get imgid2id dicionary\n self.img_feature_id_l = list(pickle.load(open(hparams.imgid2idx_path % split, \"rb\")))\n with h5py.File(self.img_features_h5_path, \"r\") as features_hdf:\n self.pos_boxes = np.array(features_hdf.get(\"pos_boxes\"))\n else:\n with h5py.File(self.img_features_h5_path, \"r\") as img_features_h5:\n self.img_feature_id_l = list(img_features_h5[\"image_id\"])\n\n def __len__(self):\n return len(self.text_feature_id_l)\n def area(self, boxes):\n area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n return area\n\n def boxlist_iou(self, boxlist1, boxlist2):\n\n \"\"\"Compute the intersection over union of two set of boxes.\n The box order must be (xmin, ymin, xmax, ymax).\n Arguments:\n box1: (BoxList) bounding boxes, sized [N,4].\n box2: (BoxList) bounding boxes, sized [M,4].\n Returns:\n (tensor) iou, sized [N,M].\n \"\"\"\n # N = boxlist1.shape[0]\n # M = boxlist2.shape[1]\n area1 = self.area(boxlist1)\n area2 = self.area(boxlist2)\n lt = torch.max(boxlist1[:, None, :2], boxlist2[:, :2]) # [N,M,2]\n rb = torch.min(boxlist1[:, None, 2:], boxlist2[:, 2:]) # [N,M,2]\n wh = (rb - lt).clamp(min=0) # [N,M,2]\n inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]\n iou = inter / (area1[:, None] + area2 - inter)\n return iou\n def __getitem__(self, index: int):\n\n features = {}\n text_feature_index = index\n\n if self.hparams.dataset_version == '0.9':\n image_id = self.text_feature_id_l[index]\n text_feature_index = self.text_feature_h5_id_l.index(image_id)\n\n if self.old_split == \"train\":\n assert image_id in self.train_text_feature_id_set\n elif self.old_split == \"val\":\n assert image_id in self.val_text_feature_id_set\n\n # text\n with h5py.File(self.text_features_h5_path, \"r\") as text_features_hdf:\n for f_key in self.feature_keys:\n features[f_key] = text_features_hdf[f_key][text_feature_index]\n image_id = text_features_hdf[\"img_ids\"][text_feature_index]\n\n assert image_id == self.text_feature_id_l[index]\n\n ##### for fake_label ######\n if self._split == 'train' :\n fake_label_index = self.image_ids[image_id]\n fake_label_logit = self.fake_label[fake_label_index]['fake_label_logit']\n features['fake_label_logit'] = fake_label_logit\n\n img_feature_index = self.img_feature_id_l.index(image_id) # text / img index same??\n\n if self.hparams.img_feature_type == \"dan_faster_rcnn_x101\":\n with h5py.File(self.img_features_h5_path, \"r\") as features_hdf:\n image_features = features_hdf[\"image_features\"][self.pos_boxes[img_feature_index][0]: self.pos_boxes[img_feature_index][1], :]\n\n features[\"img_feat\"] = image_features\n features[\"num_proposals\"] = len(image_features)\n # features['adj'] = adj\n else:\n with h5py.File(self.img_features_h5_path, \"r\") as img_features_hdf:\n features[\"img_feat\"] = img_features_hdf[\"features\"][img_feature_index]\n assert image_id == img_features_hdf[\"image_id\"][img_feature_index]\n\n return features\n\n def keys(self) -> List[int]:\n return self.text_feature_id_l\n\n @property\n def split(self):\n return self._split\n\n def get_old_img_ids(self, visdial_jsonpath):\n with open(visdial_jsonpath, \"r\") as visdial_file:\n visdial_data = json.load(visdial_file)\n\n return [dialog_for_image[\"image_id\"] for dialog_for_image in visdial_data[\"data\"][\"dialogs\"]]","sub_path":"MM_upload/data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":9376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"623327255","text":"import cv2\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\nsmile_cascade = cv2.CascadeClassifier('haarcascade_smile.xml')\n\ndef detect(gray, frame):\n faces = face_cascade.detectMultiScale(gray,1.3,5)\n for (x,y,w,h) in faces:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)\n roi_gray = gray[x:x+w,y:y+h]\n roi_color = frame[x:x+w,y:y+h]\n smile = smile_cascade.detectMultiScale(roi_gray,1.7,22)\n for (sx,sy,sw,sh) in smile:\n cv2.rectangle(roi_color,(sx,sy),(sx+sw,sy+sh),(0,255,0),2) \n return frame\n\n# Doing some Face Recognition with the webcam\nvideo_capture = cv2.VideoCapture(0)\nwhile True:\n ret, frame = video_capture.read()\n if ret is True:\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n else:\n continue\n canvas = detect(gray, frame)\n cv2.imshow('Video', canvas)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\nvideo_capture.release()\ncv2.destroyAllWindows()\n","sub_path":"face_smile_detection.py","file_name":"face_smile_detection.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"390335716","text":"import numpy as np\nimport cv2\n\n\nclass BrailleFilter(object):\n\n def __init__(self, filter_type, dot_colour):\n # Print all array values setting\n np.set_printoptions(threshold=np.inf)\n self.mFilterType = filter_type\n self.mDotColour = dot_colour\n self.mStructureElement3 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n self.mStructureElement6 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (6, 6))\n self.mClaheKernel = cv2.createCLAHE(clipLimit=3, tileGridSize=(8, 8))\n\n def __applyLocalLightnessEqualization(self, input_image):\n # Convert image to LAB color model\n image_lab = cv2.cvtColor(input_image, cv2.COLOR_BGR2LAB)\n # Split the image into L, A, and B channels\n l_channel, a_channel, b_channel = cv2.split(image_lab)\n # Create CLAHE Kernel and apply to lightness channel\n adjusted_l_channel = self.mClaheKernel.apply(l_channel)\n # Merge the CLAHE enhanced L channel with the original A and B channel\n merged_channels = cv2.merge((adjusted_l_channel, a_channel, b_channel))\n # Convert image from LAB color model back to RGB color model\n lightness_equalized_image = cv2.cvtColor(merged_channels, cv2.COLOR_LAB2BGR)\n # Convert image to grayscale\n lightness_equalized_image = cv2.cvtColor(lightness_equalized_image, cv2.COLOR_BGR2GRAY)\n return lightness_equalized_image\n\n def __applyGlobalHistogramEqualization(self, input_image):\n # Calculate histogram of image and fined corresponding CDF\n hist, bins = np.histogram(input_image.flatten(), 256, [0, 256])\n cdf = hist.cumsum()\n cdf_m = np.ma.masked_equal(cdf, 0)\n cdf_m = (cdf_m - cdf_m.min()) * 255 / (cdf_m.max() - cdf_m.min())\n cdf = np.ma.filled(cdf_m, 0).astype('uint8')\n # Apply CDF transform to grayscaled input image\n global_equalized_image = cdf[input_image]\n # Apply median filter to remove high frequency noise\n blurred_global_equalized_image = cv2.medianBlur(global_equalized_image, 7)\n return blurred_global_equalized_image\n\n def __applyThresholding(self, input_image):\n # White areas indicate the top of each braille dot\n if self.mDotColour == \"white\":\n ret, white_image = cv2.threshold(input_image, 246, 255, cv2.THRESH_BINARY)\n elif self.mDotColour == \"black\":\n ret, white_image = cv2.threshold(input_image, 15, 255, cv2.THRESH_BINARY)\n else:\n exit()\n eroded_white_image = cv2.erode(white_image, self.mStructureElement3, iterations=2)\n dilated_white_image = cv2.dilate(eroded_white_image, self.mStructureElement6, iterations=1)\n if self.mDotColour == \"black\":\n dilated_white_image = cv2.bitwise_not(dilated_white_image)\n\n # Find connected components and extract the mean area\n cc_output = cv2.connectedComponentsWithStats(dilated_white_image, 4, cv2.CV_8U)\n stats = cc_output[2]\n stats = np.delete(stats, 0, 0) # delete background component\n mean_area = np.mean(stats[:, cv2.CC_STAT_AREA])\n\n # Remove components that are < mean_area/1.5\n for stat in stats:\n if stat[cv2.CC_STAT_AREA] < mean_area / 2:\n x1 = stat[cv2.CC_STAT_LEFT]\n x2 = x1 + stat[cv2.CC_STAT_WIDTH]\n y1 = stat[cv2.CC_STAT_TOP]\n y2 = y1 + stat[cv2.CC_STAT_HEIGHT]\n cv2.rectangle(dilated_white_image, (x1, y1), (x2, y2), 0, cv2.FILLED)\n\n cc_output = cv2.connectedComponentsWithStats(dilated_white_image, 4, cv2.CV_8U)\n stats = cc_output[2]\n centroids = cc_output[3]\n braille_dot_stats = np.delete(stats, 0, 0) # delete background component\n braille_dot_centres = np.delete(centroids, 0, 0) # delete background component\n braille_dot_locations_list = []\n\n for stat, dot in zip(braille_dot_stats, braille_dot_centres):\n x1 = stat[cv2.CC_STAT_LEFT]\n x2 = x1 + stat[cv2.CC_STAT_WIDTH]\n y1 = stat[cv2.CC_STAT_TOP]\n y2 = y1 + stat[cv2.CC_STAT_HEIGHT]\n cv2.rectangle(dilated_white_image, (x1, y1), (x2, y2), 0, cv2.FILLED)\n cv2.circle(dilated_white_image, (int(dot[0]), int(dot[1])), 3, 255, cv2.FILLED)\n braille_dot_locations_list.append([int(dot[0]), int(dot[1])])\n\n # sort dot locations based on row\n braille_dot_locations = np.asarray(braille_dot_locations_list)\n if len(braille_dot_locations) > 1:\n braille_dot_locations = braille_dot_locations[braille_dot_locations[:, 1].argsort(kind='mergesort')]\n\n return dilated_white_image, braille_dot_locations\n\n def extractBrailleDots(self, input_image):\n if self.mFilterType == \"Single-Sided\":\n lightness_equalized_image = self.__applyLocalLightnessEqualization(input_image)\n global_equalized_image = self.__applyGlobalHistogramEqualization(lightness_equalized_image)\n output_image, braille_dot_locations = self.__applyThresholding(global_equalized_image)\n return output_image, braille_dot_locations\n","sub_path":"Braille-Algorithms/ImageFilter/BrailleFilter.py","file_name":"BrailleFilter.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"204527444","text":"import numpy as np\nimport pandas as pd\n\ndef load_clusterMap(path):\n '''load district hash pair information\n\n Notes:\n\n Args:\n dir: the directory path to the cluster_map file\n\n Return:\n hashToNumber: a dictionary, hash(key) to number(value)\n numberToHash: a dictionary, number(key) to hash(value)\n\n '''\n hashToNumber = {}\n numberToHash = {}\n with open(path, 'r') as f:\n lines = f.readlines()\n # dataLines = data.split('\\n')\n for line in lines:\n line = line.strip()\n # print line\n hashToNumber[line.split('\\t')[0]] = line.split('\\t')[1]\n numberToHash[line.split('\\t')[1]] = line.split('\\t')[0]\n\n return hashToNumber, numberToHash\n","sub_path":"load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"307596572","text":"import tensorflow as tf\nfrom easydict import EasyDict as edict\n\nfrom models.generators.latent_to_image import conditional_random_to_image, random_to_image\n\n\nclass TestGenerators(tf.test.TestCase):\n \n def test_random_to_image_generator_output_shape(self):\n input_params = edict({\n 'hidden_size': 100\n })\n g = random_to_image.RandomToImageGenerator(input_params)\n z = tf.random.normal(shape=[1, 100])\n output_img = g(z)\n expected_shape = (1, 28, 28, 1)\n self.assertEqual(output_img.shape, expected_shape)\n \n def test_conditional_random_to_image_generator_output_shape(self):\n input_params = edict({\n 'hidden_size': 100,\n 'num_classes': 10\n })\n g = conditional_random_to_image.RandomToImageConditionalGenerator(input_params)\n z = tf.random.normal(shape=(1, 100))\n class_id = tf.zeros(shape=(1,))\n output_img = g([z, class_id])\n expected_shape = (1, 28, 28, 1)\n self.assertEqual(output_img.shape, expected_shape)\n","sub_path":"tests/test_generators.py","file_name":"test_generators.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"92335146","text":"import os\nimport numpy as np\nimport cv2\nfrom torch.utils.data import Dataset\nimport logging\nimport torchvision.transforms as transforms\nfrom skimage import transform\nimport torch\nimport random\n\nCROP_FLAG=False\nCROP_SIZE=500\nSCALES=[0.8, 1.2]\nROTATION=15\n\nall_transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.485,0.456,0.406], [0.229,0.224,0.225])\n])\n\ndef get_list(dataset_dir, val_i):\n train_list=[]\n val_list=[]\n with open(dataset_dir, 'r') as f:\n lines=f.readlines()\n for l in lines:\n strs = l.strip().split(' ')\n val_i_true=int(strs[3])\n if val_i_true==val_i:\n val_list.append([strs[0], strs[1], strs[2]])\n else:\n train_list.append([strs[0], strs[1], strs[2]])\n return train_list, val_list\n\ndef rand_crop(data,label):\n height1 = random.randint(0, data.shape[0] - CROP_SIZE)\n width1 = random.randint(0, data.shape[1] - CROP_SIZE )\n height2 = height1 + CROP_SIZE\n width2 = width1 + CROP_SIZE\n \n data=data[height1:height2, width1:width2]\n label=label[height1:height2, width1:width2]\n \n return data,label\n\ndef rand_resize(data, label):\n scale_act = SCALES[0] + (SCALES[1]-SCALES[0])*np.random.random()\n # data = cv2.resize(data, dsize=(0, 0), fx=scale_act, fy=scale_act)\n # label = cv2.resize(label, dsize=(0, 0), fx=scale_act, fy=scale_act)\n data = transform.resize(data,[round(data.shape[0]*scale_act),round(data.shape[1]*scale_act)], mode=\"constant\", clip=False,preserve_range=True)\n label = transform.resize(label,[round(label.shape[0]*scale_act),round(label.shape[1]*scale_act)], mode=\"constant\", clip=False,preserve_range=True) \n return np.round(data), np.array(label>0, np.uint8)\n\ndef random_flip(data, label):\n if np.random.random()>0.5:\n data=cv2.flip(data, 1)\n label=cv2.flip(label, 1)\n if np.random.random()>0.5:\n data=cv2.flip(data, 0)\n label=cv2.flip(label, 0)\n return data, label\n\ndef random_rotation(crop_img, crop_seg, rich_crop_max_rotation, mean_value):\n \"\"\"\n ??????????\n\n Args?\n crop_img(numpy.ndarray): ????\n crop_seg(numpy.ndarray): ???\n rich_crop_max_rotation(int)????????0-90\n mean_value(list)???, ??????????????????\n\n Returns?\n ??????????\n\n \"\"\"\n ignore_index = 0\n if rich_crop_max_rotation > 0:\n (h, w) = crop_img.shape[:2]\n do_rotation = np.random.uniform(-rich_crop_max_rotation,\n rich_crop_max_rotation)\n pc = (w // 2, h // 2)\n r = cv2.getRotationMatrix2D(pc, do_rotation, 1.0)\n cos = np.abs(r[0, 0])\n sin = np.abs(r[0, 1])\n\n nw = int((h * sin) + (w * cos))\n nh = int((h * cos) + (w * sin))\n\n (cx, cy) = pc\n r[0, 2] += (nw / 2) - cx\n r[1, 2] += (nh / 2) - cy\n dsize = (nw, nh)\n crop_img = cv2.warpAffine(\n crop_img,\n r,\n dsize=dsize,\n flags=cv2.INTER_LINEAR,\n borderMode=cv2.BORDER_CONSTANT,\n borderValue=mean_value)\n crop_seg = cv2.warpAffine(\n crop_seg,\n r,\n dsize=dsize,\n flags=cv2.INTER_NEAREST,\n borderMode=cv2.BORDER_CONSTANT,\n borderValue=ignore_index)\n return crop_img, crop_seg\n\ndef preprocess_img_gt(img, gt, gt_contour, train_flag):\n img[img>255]=255\n # img = np.array(img, dtype=np.float)\n img = np.array(img, dtype=np.uint8)\n gt = np.array(gt, dtype=np.uint8)\n gt_contour = np.array(gt_contour, dtype=np.uint8)\n if train_flag:\n # img, gt=random_flip(img, gt)\n # img, gt=rand_resize(img, gt)\n # img, gt=random_rotation(crop_img=img, crop_seg=gt, rich_crop_max_rotation=ROTATION, mean_value=93)\n if CROP_FLAG:\n img, gt, gt_contour=rand_crop(img, gt, gt_contour)\n\n img = np.repeat(img[:,:,None],3,axis=2)\n img = all_transform(img)\n # img=img.repeat([3,1,1])\n\n gt = torch.Tensor(gt)\n gt = torch.unsqueeze(gt, 0)\n gt_contour = torch.Tensor(gt_contour)\n gt_contour = torch.unsqueeze(gt_contour, 0)\n return img, gt, gt_contour\n\ndef make_dataset(root_dir, dataset_dir, val_i):\n train_list, val_list = get_list(dataset_dir, val_i)\n\n train_dataset = BasicDataset(root_dir, train_list, True)\n val_dataset = BasicDataset(root_dir, val_list, False)\n\n return train_dataset, val_dataset, train_list, val_list\n\nclass BasicDataset(Dataset):\n def __init__(self, root_dir, imgs_list, train_flag):\n self.root_dir = root_dir\n self.imgs_list = imgs_list\n self.train_flag = train_flag\n\n logging.info(f'Creating dataset with {len(self.imgs_list)} examples')\n\n def __len__(self):\n return len(self.imgs_list)\n\n def __getitem__(self, i):\n img = cv2.imread(os.path.join(self.root_dir, self.imgs_list[i][0]), -1)\n gt = cv2.imread(os.path.join(self.root_dir, self.imgs_list[i][1]), -1)\n gt_contour = cv2.imread(os.path.join(self.root_dir, self.imgs_list[i][2]), -1)\n\n img, gt, gt_contour = preprocess_img_gt(img, gt, gt_contour, self.train_flag)\n\n return {'image': img, 'mask': gt, 'mask_contour': gt_contour}\n\n\n############# crop and tile on test img when train img is randomly cropped #########################\ndef crop_pos(input_size, output_size):\n num_pos = input_size // output_size + 1\n overlap = float(output_size*num_pos-input_size)/float(num_pos-1)\n all_pos = []\n for i in range(num_pos):\n start = i * (output_size-overlap)\n end = start + output_size\n all_pos.append((int(start), int(end)))\n return all_pos\n\ndef crop_tile(model, test_img, crop_size=CROP_SIZE):\n # crop test_img according to output_size and tile the result\n # test_img [b, c, h, w]\n if isinstance(crop_size, int):\n crop_size=[CROP_SIZE, CROP_SIZE]\n \n input_h=test_img.shape[2]\n input_w=test_img.shape[3]\n crop_h=crop_size[0]\n crop_w=crop_size[1]\n h_pos=crop_pos(input_h, crop_h)\n w_pos=crop_pos(input_w, crop_w)\n\n output_prob = []\n for index_i in range(test_img.shape[0]):\n imgi=test_img[index_i]\n img_patchs=[]\n img_count=torch.zeros([input_h, input_w]).cuda()\n for h_posi in h_pos:\n for w_posi in w_pos:\n img_patchs.append(imgi[:, h_posi[0]:h_posi[1], w_posi[0]:w_posi[1]])\n img_count[h_posi[0]:h_posi[1], w_posi[0]:w_posi[1]] += 1\n \n img_patchs = torch.stack(img_patchs, dim=0) #[n, c, output_h, output_w]\n prob_patchs = model(img_patchs) #[n, output_h, output_w]\n \n patch_i=0\n prob_sum=torch.zeros([input_h, input_w]).cuda()\n for h_posi in h_pos:\n for w_posi in w_pos:\n prob_sum[h_posi[0]:h_posi[1], w_posi[0]:w_posi[1]] += prob_patchs[patch_i,0]\n patch_i+=1\n\n output_prob.append(prob_sum/img_count)\n \n output = torch.stack(output_prob, dim=0) #[bs, input_h, input_w]\n return torch.unsqueeze(output, dim=1)\n\n\n\n","sub_path":"cell_seg/run08_ten/utils/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":7080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"588503297","text":"from __future__ import division\r\n\r\n\r\ndef reverse_words(sentence):\r\n x = list(sentence)\r\n if len(x) == 1:\r\n return(x)\r\n else:\r\n for i in range(len(x)):\r\n if x[i] == \" \":\r\n end = i - 1\r\n break\r\n return(process_text(0, end, x))\r\n\r\ndef process_text(start, end, x):\r\n change_position = (end - start) >> 1\r\n for i in range(change_position + 1):\r\n x[start + i], x[end - i] = x[end - i], x[start + i]\r\n if end + 2 > len(x) - 1:\r\n print(''.join(x))\r\n return\r\n for i in range(end + 2, len(x)):\r\n if x[i] == \" \":\r\n new_end = i - 1\r\n break\r\n if i == len(x) - 1:\r\n new_end = len(x) - 1\r\n process_text(end + 2, new_end, x)\r\n \r\nx = 'the princess banana of sendai'\r\nreverse_words(x)","sub_path":"Reverse Words.py","file_name":"Reverse Words.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"565783119","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : test.py\n@Time : 2020/12/01 19:48:05\n@Author : lzh\n@Version : 1.0\n@Contact : robinlin163@163.com\n@Desc : bilstm模型测试结果\n'''\n# %%\nfrom numpy.lib.function_base import average\nimport torch\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\nfrom sklearn.metrics import f1_score\nfrom utils import Voc, TextDataset, pad_collate\nfrom models.bilstm_att_pool import BilstmAspectAttPool\n\n\nclass Configs0(object):\n\n def __init__(self):\n self.hid_dim = 256\n self.voc_num = 7075\n self.aspect_num = 20\n self.emb_dim = 128\n self.fc1_dim = 128\n self.fc2_dim = 4 # class num\n self.dropout = 0.5\n self.num_layers = 1\n self.pool_kernal = 4\n self.dim_after_pool = int(np.ceil((self.hid_dim * 2 - self.pool_kernal) / self.pool_kernal) + 1)\n self.aspect_dim = 128\n\n\nclass Configs1(object):\n\n def __init__(self):\n self.hid_dim = 256\n self.voc_num = 7075\n self.aspect_num = 20\n self.emb_dim = 128\n self.fc1_dim = 64\n self.fc2_dim = 4 # class num\n self.dropout = 0.5\n self.num_layers = 2\n self.pool_kernal = 4\n self.dim_after_pool = int(np.ceil((self.hid_dim * 2 - self.pool_kernal) / self.pool_kernal) + 1)\n self.aspect_dim = 64\n\n\nfilename = \"./data/char.valid.csv\"\n# %%\nconfigs = Configs1()\nmodel = BilstmAspectAttPool(configs)\nmodel.load_state_dict(torch.load(\"./model-zoo/bilstm_aspect_att_pool2.pt\"))\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel = model.to(device)\ntestset = TextDataset(filename, \"./data/voc.json\")\ntest_loader = DataLoader(testset, batch_size=4, shuffle=False, collate_fn=pad_collate)\n\n# %%\noutput_list = []\ny_list = []\n\n# %%\nfor batch in tqdm(test_loader):\n seq, y, seq_len = batch\n seq = seq.to(device)\n \n output = model(seq, seq_len)\n # print(output.size())\n output = output.to(torch.device(\"cpu\"))\n output = output.argmax(-1)\n \n output_list.append(output)\n y_list.append(y)\n# %%\noutput = torch.cat(output_list).numpy()\ny = torch.cat(y_list).numpy()\ndf = pd.read_csv(filename)\ncolumns = df.columns[-20:]\nf1_list = []\nfor i in range(20):\n score = f1_score(y_true=y[:, i], y_pred=output[:, i], average=\"macro\")\n f1_list.append(score)\n print(f\"{columns[i]}: {score}\")\nprint(f\"total: {np.mean(f1_list)}\")\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"305690978","text":"import logging\nimport shlex\nimport time\nimport traceback\nfrom datetime import timedelta\nfrom io import BytesIO\nfrom textwrap import dedent\n\nimport asyncio\nimport pytimeparse\nfrom musicbot.commands import command\nfrom musicbot.constants import DISCORD_MSG_CHAR_LIMIT\nfrom musicbot.exceptions import (CommandError, PermissionsError, RetryPlay,\n WrongEntryTypeError)\nfrom musicbot.structures import Response\nfrom musicbot.utils import fixg, sane_round_int\n\nlog = logging.getLogger(__name__)\n\n\n@command(\"play\")\nasync def cmd_play(self, player, channel, author, permissions, leftover_args, song_url):\n \"\"\"\n Usage:\n {command_prefix}play song_link\n {command_prefix}play text to search for\n\n Adds the song to the playlist. If a link is not provided, the first\n result from a youtube search is added to the queue.\n \"\"\"\n\n song_url = song_url.strip('<>')\n\n if permissions.max_songs and player.playlist.count_for_user(author) >= permissions.max_songs:\n raise PermissionsError(\n \"You have reached your enqueued song limit (%s)\" % permissions.max_songs, expire_in=30\n )\n\n if leftover_args:\n song_url = ' '.join([song_url, *leftover_args])\n\n if song_url.startswith(\"prepend:\"):\n song_url = song_url.lstrip(\"prepend:\")\n prepend = True\n else:\n prepend = False\n\n try:\n info = await self.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)\n except Exception as e:\n raise CommandError(e, expire_in=30)\n\n if not info:\n raise CommandError(\"That video cannot be played.\", expire_in=30)\n\n # abstract the search handling away from the user\n # our ytdl options allow us to use search strings as input urls\n if info.get('url', '').startswith('ytsearch'):\n # log.info(\"[Command:play] Searching for \\\"%s\\\"\" % song_url)\n info = await self.downloader.extract_info(\n player.playlist.loop,\n song_url,\n download=False,\n process=True, # ASYNC LAMBDAS WHEN\n on_error=lambda e: asyncio.ensure_future(\n self.safe_send_message(channel, \"```\\n%s\\n```\" % e, expire_in=120), loop=self.loop),\n retry_on_error=True\n )\n\n if not info:\n raise CommandError(\n \"Error extracting info from search string, youtubedl returned no data. \"\n \"You may need to restart the bot if this continues to happen.\", expire_in=30\n )\n\n if not all(info.get('entries', [])):\n # empty list, no data\n return\n\n song_url = info['entries'][0]['webpage_url']\n info = await self.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)\n # Now I could just do: return await self.cmd_play(player, channel, author, song_url)\n # But this is probably fine\n\n # TODO: Possibly add another check here to see about things like the bandcamp issue\n # TODO: Where ytdl gets the generic extractor version with no processing, but finds two different urls\n\n if 'entries' in info:\n # I have to do exe extra checks anyways because you can request an arbitrary number of search results\n if not permissions.allow_playlists and ':search' in info['extractor'] and len(info['entries']) > 1:\n raise PermissionsError(\"You are not allowed to request playlists\", expire_in=30)\n\n # The only reason we would use this over `len(info['entries'])` is if we add `if _` to this one\n num_songs = sum(1 for _ in info['entries'])\n\n if permissions.max_playlist_length and num_songs > permissions.max_playlist_length:\n raise PermissionsError(\n \"Playlist has too many entries (%s > %s)\" % (num_songs, permissions.max_playlist_length),\n expire_in=30\n )\n\n # This is a little bit weird when it says (x + 0 > y), I might add the other check back in\n if permissions.max_songs and player.playlist.count_for_user(author) + num_songs > permissions.max_songs:\n raise PermissionsError(\n \"Playlist entries + your already queued songs reached limit (%s + %s > %s)\" % (\n num_songs, player.playlist.count_for_user(author), permissions.max_songs),\n expire_in=30\n )\n\n if info['extractor'].lower() in ['youtube:playlist', 'soundcloud:set', 'bandcamp:album']:\n try:\n return await play_playlist_async(self, player, channel, author, permissions, song_url, info['extractor'])\n except CommandError:\n raise\n except Exception as e:\n traceback.print_exc()\n self.sentry.captureException()\n raise CommandError(\"Error queuing playlist:\\n%s\" % e, expire_in=30)\n\n t0 = time.time()\n\n # My test was 1.2 seconds per song, but we maybe should fudge it a bit, unless we can\n # monitor it and edit the message with the estimated time, but that's some ADVANCED SHIT\n # I don't think we can hook into it anyways, so this will have to do.\n # It would probably be a thread to check a few playlists and get the speed from that\n # Different playlists might download at different speeds though\n wait_per_song = 1.2\n\n procmesg = await self.safe_send_message(\n channel,\n 'Gathering playlist information for {} songs{}'.format(\n num_songs,\n ', ETA: {} seconds'.format(fixg(\n num_songs * wait_per_song)) if num_songs >= 10 else '.'))\n\n # TODO: I can create an event emitter object instead, add event functions, and every play list might be asyncified\n # Also have a \"verify_entry\" hook with the entry as an arg and returns the entry if its ok\n\n entry_list, position = await player.playlist.import_from(song_url, channel=channel, author=author)\n\n tnow = time.time()\n ttime = tnow - t0\n listlen = len(entry_list)\n drop_count = 0\n\n if permissions.max_song_length:\n for e in entry_list.copy():\n if e.duration > permissions.max_song_length:\n player.playlist.entries.remove(e)\n entry_list.remove(e)\n drop_count += 1\n # Im pretty sure there's no situation where this would ever break\n # Unless the first entry starts being played, which would make this a race condition\n if drop_count:\n log.info(\"Dropped %s songs\" % drop_count)\n\n try:\n log.info(\"Processed {} songs in {} seconds at {:.2f}s/song, {:+.2g}/song from expected ({}s)\".format(\n listlen,\n fixg(ttime),\n ttime / listlen,\n ttime / listlen - wait_per_song,\n fixg(wait_per_song * num_songs))\n )\n except ZeroDivisionError:\n pass\n\n await self.safe_delete_message(procmesg)\n\n if not listlen - drop_count:\n raise CommandError(\n \"No songs were added, all songs were over max duration (%ss)\" % permissions.max_song_length,\n expire_in=30\n )\n\n reply_text = \"Enqueued **%s** songs to be played. Position in queue: %s\"\n btext = str(listlen - drop_count)\n\n else:\n if permissions.max_song_length and info.get('duration', 0) > permissions.max_song_length:\n raise PermissionsError(\n \"Song duration exceeds limit (%s > %s)\" % (info['duration'], permissions.max_song_length),\n expire_in=30\n )\n\n try:\n entry, position = await player.playlist.add_entry(song_url, channel=channel, author=author, prepend=prepend)\n except RetryPlay:\n new_url = song_url.replace(\"/\", \" \")\n return await cmd_play(self, player, channel, author, permissions, leftover_args, new_url)\n except WrongEntryTypeError as e:\n if e.use_url == song_url:\n log.info(\"[Warning] Determined incorrect entry type, but suggested url is the same. Help.\")\n\n return await cmd_play(self, player, channel, author, permissions, leftover_args, e.use_url)\n\n reply_text = \"Enqueued **%s** to be played. Position in queue: %s\"\n btext = entry.title\n\n if position == 1 and player.is_stopped:\n position = 'Up next!'\n reply_text %= (btext, position)\n\n else:\n try:\n time_until = await player.playlist.estimate_time_until(position, player)\n reply_text += ' - estimated time until playing: %s'\n except:\n traceback.print_exc()\n self.sentry.captureException()\n time_until = ''\n\n reply_text %= (btext, position, time_until)\n\n return Response(reply_text, delete_after=30)\n\n\nasync def play_playlist_async(self, player, channel, author, permissions, playlist_url, extractor_type):\n \"\"\"\n Secret handler to use the async wizardry to make playlist queuing non-\"blocking\"\n \"\"\"\n\n info = await self.downloader.extract_info(player.playlist.loop, playlist_url, download=False, process=False)\n\n if not info:\n raise CommandError(\"That playlist cannot be played.\")\n\n num_songs = sum(1 for _ in info['entries'])\n t0 = time.time()\n\n busymsg = await self.safe_send_message(\n channel, \"Processing %s songs...\" % num_songs) # TODO: From playlist_title\n\n entries_added = 0\n if extractor_type == 'youtube:playlist':\n try:\n entries_added = await player.playlist.async_process_youtube_playlist(\n playlist_url, channel=channel, author=author)\n # TODO: Add hook to be called after each song\n # TODO: Add permissions\n\n except Exception:\n traceback.print_exc()\n self.sentry.captureException()\n raise CommandError('Error handling playlist %s queuing.' % playlist_url, expire_in=30)\n\n elif extractor_type.lower() in ['soundcloud:set', 'bandcamp:album']:\n try:\n entries_added = await player.playlist.async_process_sc_bc_playlist(\n playlist_url, channel=channel, author=author)\n # TODO: Add hook to be called after each song\n # TODO: Add permissions\n\n except Exception:\n traceback.print_exc()\n self.sentry.captureException()\n\n raise CommandError('Error handling playlist %s queuing.' % playlist_url, expire_in=30)\n\n songs_processed = len(entries_added)\n drop_count = 0\n skipped = False\n\n if permissions.max_song_length:\n for e in entries_added.copy():\n if e.duration > permissions.max_song_length:\n try:\n player.playlist.entries.remove(e)\n entries_added.remove(e)\n drop_count += 1\n except:\n pass\n\n if drop_count:\n log.info(\"Dropped %s songs\" % drop_count)\n\n if player.current_entry and player.current_entry.duration > permissions.max_song_length:\n await self.safe_delete_message(self.server_specific_data[channel.server]['last_np_msg'])\n self.server_specific_data[channel.server]['last_np_msg'] = None\n skipped = True\n player.skip()\n entries_added.pop()\n\n await self.safe_delete_message(busymsg)\n\n songs_added = len(entries_added)\n tnow = time.time()\n ttime = tnow - t0\n wait_per_song = 1.2\n # TODO: actually calculate wait per song in the process function and return that too\n\n # This is technically inaccurate since bad songs are ignored but still take up time\n log.info(\"Processed {}/{} songs in {} seconds at {:.2f}s/song, {:+.2g}/song from expected ({}s)\".format(\n songs_processed,\n num_songs,\n fixg(ttime),\n ttime / num_songs,\n ttime / num_songs - wait_per_song,\n fixg(wait_per_song * num_songs))\n )\n\n if not songs_added:\n basetext = \"No songs were added, all songs were over max duration (%ss)\" % permissions.max_song_length\n if skipped:\n basetext += \"\\nAdditionally, the current song was skipped for being too long.\"\n\n raise CommandError(basetext, expire_in=30)\n\n return Response(\"Enqueued {} songs to be played in {} seconds\".format(\n songs_added, fixg(ttime, 1)), delete_after=30)\n\n\n@command(\"search\")\nasync def cmd_search(self, player, channel, author, permissions, leftover_args):\n \"\"\"\n Usage:\n {command_prefix}search [service] [number] query\n\n Searches a service for a video and adds it to the queue.\n - service: any one of the following services:\n - youtube (yt) (default if unspecified)\n - soundcloud (sc)\n - yahoo (yh)\n - number: return a number of video results and waits for user to choose one\n - defaults to 1 if unspecified\n - note: If your search query starts with a number,\n you must put your query in quotes\n - ex: {command_prefix}search 2 \"I ran seagulls\"\n \"\"\"\n\n if permissions.max_songs and player.playlist.count_for_user(author) > permissions.max_songs:\n raise PermissionsError(\n \"You have reached your enqueued song limit (%s)\" % permissions.max_songs,\n expire_in=30\n )\n\n def argcheck():\n if not leftover_args:\n raise CommandError(\n \"Please specify a search query.\\n%s\" % dedent(\n cmd_search.__doc__.format(command_prefix=self.config.command_prefix)),\n expire_in=60\n )\n\n argcheck()\n\n try:\n leftover_args = shlex.split(' '.join(leftover_args))\n except ValueError:\n raise CommandError(\"Please quote your search query properly.\", expire_in=30)\n\n service = 'youtube'\n items_requested = 3\n max_items = 10 # this can be whatever, but since ytdl uses about 1000, a small number might be better\n services = {\n 'youtube': 'ytsearch',\n 'soundcloud': 'scsearch',\n 'yahoo': 'yvsearch',\n 'yt': 'ytsearch',\n 'sc': 'scsearch',\n 'yh': 'yvsearch'\n }\n\n if leftover_args[0] in services:\n service = leftover_args.pop(0)\n argcheck()\n\n if leftover_args[0].isdigit():\n items_requested = int(leftover_args.pop(0))\n argcheck()\n\n if items_requested > max_items:\n raise CommandError(\"You cannot search for more than %s videos\" % max_items)\n\n # Look jake, if you see this and go \"what the fuck are you doing\"\n # and have a better idea on how to do this, i'd be delighted to know.\n # I don't want to just do ' '.join(leftover_args).strip(\"\\\"'\")\n # Because that eats both quotes if they're there\n # where I only want to eat the outermost ones\n if leftover_args[0][0] in '\\'\"':\n lchar = leftover_args[0][0]\n leftover_args[0] = leftover_args[0].lstrip(lchar)\n leftover_args[-1] = leftover_args[-1].rstrip(lchar)\n\n search_query = '%s%s:%s' % (services[service], items_requested, ' '.join(leftover_args))\n\n search_msg = await self.send_message(channel, \"Searching for videos...\")\n\n try:\n info = await self.downloader.extract_info(player.playlist.loop, search_query, download=False, process=True)\n\n except Exception as e:\n await self.safe_edit_message(search_msg, str(e), send_if_fail=True)\n return\n else:\n await self.safe_delete_message(search_msg)\n\n if not info:\n return Response(\"No videos found.\", delete_after=30)\n\n def check(m):\n return (\n m.content.lower()[0] in 'yn' or\n # hardcoded function name weeee\n m.content.lower().startswith('{}{}'.format(self.config.command_prefix, 'search')) or\n m.content.lower().startswith('exit'))\n\n for e in info['entries']:\n result_message = await self.safe_send_message(channel, \"Result %s/%s: %s\" % (\n info['entries'].index(e) + 1, len(info['entries']), e['webpage_url']))\n\n confirm_message = await self.safe_send_message(channel, \"Is this ok? Type `y`, `n` or `exit`\")\n response_message = await self.wait_for_message(30, author=author, channel=channel, check=check)\n\n if not response_message:\n await self.safe_delete_message(result_message)\n await self.safe_delete_message(confirm_message)\n return Response(\"Ok nevermind.\", delete_after=30)\n\n # They started a new search query so lets clean up and bugger off\n elif response_message.content.startswith(self.config.command_prefix) or \\\n response_message.content.lower().startswith('exit'):\n\n await self.safe_delete_message(result_message)\n await self.safe_delete_message(confirm_message)\n return\n\n if response_message.content.lower().startswith('y'):\n await self.safe_delete_message(result_message)\n await self.safe_delete_message(confirm_message)\n await self.safe_delete_message(response_message)\n\n await cmd_play(self, player, channel, author, permissions, [], e['webpage_url'])\n\n return Response(\"Alright, coming right up!\", delete_after=30)\n else:\n await self.safe_delete_message(result_message)\n await self.safe_delete_message(confirm_message)\n await self.safe_delete_message(response_message)\n\n return Response(\"Oh well :frowning:\", delete_after=30)\n\n\n@command(\"np\")\nasync def cmd_np(self, player, channel, server, message):\n \"\"\"\n Usage:\n {command_prefix}np\n\n Displays the current song in chat.\n \"\"\"\n\n if player.current_entry:\n if self.server_specific_data[server]['last_np_msg']:\n await self.safe_delete_message(self.server_specific_data[server]['last_np_msg'])\n self.server_specific_data[server]['last_np_msg'] = None\n\n song_progress = str(timedelta(seconds=player.progress)).lstrip('0').lstrip(':')\n song_total = str(timedelta(seconds=player.current_entry.duration)).lstrip('0').lstrip(':')\n prog_str = '`[%s/%s]`' % (song_progress, song_total)\n\n if player.current_entry.meta.get('channel', False) and player.current_entry.meta.get('author', False):\n np_text = \"Now Playing: **%s** added by **%s** %s\\n\" % (\n player.current_entry.title, player.current_entry.meta['author'].name, prog_str)\n else:\n np_text = \"Now Playing: **%s** %s\\n\" % (player.current_entry.title, prog_str)\n\n self.server_specific_data[server]['last_np_msg'] = await self.safe_send_message(channel, np_text)\n await self._manual_delete_check(message)\n else:\n return Response(\n 'There are no songs queued! Queue something with {}play.'.format(self.config.command_prefix),\n delete_after=30\n )\n\n\n@command(\"pause\")\nasync def cmd_pause(self, player):\n \"\"\"\n Usage:\n {command_prefix}pause\n\n Pauses playback of the current song.\n \"\"\"\n\n if player.is_playing:\n player.pause()\n\n else:\n raise CommandError('Player is not playing.', expire_in=30)\n\n\n@command(\"resume\")\nasync def cmd_resume(self, player):\n \"\"\"\n Usage:\n {command_prefix}resume\n\n Resumes playback of a paused song.\n \"\"\"\n\n if player.is_paused:\n player.resume()\n\n else:\n raise CommandError('Player is not paused.', expire_in=30)\n\n\n@command(\"shuffle\")\nasync def cmd_shuffle(self, channel, player, leftover_args, seed=None):\n \"\"\"\n Usage:\n {command_prefix}shuffle [seed]\n\n Shuffles the playlist.\n \"\"\"\n\n if leftover_args:\n seed = ' '.join([seed, *leftover_args])\n\n player.playlist.shuffle(seed)\n\n return Response(\"Shuffled playlist!\", delete_after=15)\n\n\n@command(\"clear\")\nasync def cmd_clear(self, player, author):\n \"\"\"\n Usage:\n {command_prefix}clear\n\n Clears the playlist.\n \"\"\"\n\n player.playlist.clear()\n return Response(':put_litter_in_its_place:', delete_after=20)\n\n\n@command(\"skip\")\nasync def cmd_skip(self, player, channel, author, message, permissions, voice_channel):\n \"\"\"\n Usage:\n {command_prefix}skip\n\n Skips the current song when enough votes are cast, or by the bot owner.\n \"\"\"\n\n if player.is_stopped:\n raise CommandError(\"Can't skip! The player is not playing!\", expire_in=20)\n\n if not player.current_entry:\n if player.playlist.peek():\n if player.playlist.peek()._is_downloading:\n return Response(\"The next song (%s) is downloading, please wait.\" % player.playlist.peek().title)\n\n elif player.playlist.peek().is_downloaded:\n log.info(\"The next song will be played shortly. Please wait.\")\n else:\n log.info(\"Something odd is happening. \"\n \"You might want to restart the bot if it doesn't start working.\")\n else:\n log.info(\"Something strange is happening. \"\n \"You might want to restart the bot if it doesn't start working.\")\n\n if permissions.instaskip or author == player.current_entry.meta.get(\"author\", None):\n player.skip()\n await self._manual_delete_check(message)\n return\n\n # TODO: ignore person if they're deaf or take them out of the list or something?\n # Currently is recounted if they vote, deafen, then vote\n\n num_voice = sum(1 for m in voice_channel.voice_members if not (\n m.deaf or m.self_deaf or m.id in [self.user.id]))\n\n num_skips = player.skip_state.add_skipper(author.id, message)\n\n skips_remaining = min(\n self.config.skips_required,\n sane_round_int(num_voice * self.config.skip_ratio_required)\n ) - num_skips\n\n if skips_remaining <= 0:\n player.skip()\n return Response(\n 'your skip for **{title}** was acknowledged.'\n '\\nThe vote to skip has been passed.{extra}'.format(\n title=player.current_entry.title,\n extra=' Next song coming up!' if player.playlist.peek() else ''\n ),\n reply=True,\n delete_after=20\n )\n\n else:\n # TODO: When a song gets skipped, delete the old x needed to skip messages\n return Response(\n 'your skip for **{title}** was acknowledged.'\n '\\n**{remaining}** more {votes} required to vote to skip this song.'.format(\n title=player.current_entry.title,\n remaining=skips_remaining,\n votes='person is' if skips_remaining == 1 else 'people are'\n ),\n reply=True,\n delete_after=20\n )\n\n\n@command(\"volume\")\nasync def cmd_volume(self, message, player, new_volume=None):\n \"\"\"\n Usage:\n {command_prefix}volume (+/-)[volume]\n\n Sets the playback volume. Accepted values are from 1 to 100.\n Putting + or - before the volume will make the volume change relative to the current volume.\n \"\"\"\n\n if not new_volume:\n return Response('Current volume: `%s%%`' % int(player.volume * 100), reply=True, delete_after=20)\n\n relative = False\n if new_volume[0] in '+-':\n relative = True\n\n try:\n new_volume = int(new_volume)\n\n except ValueError:\n raise CommandError('{} is not a valid number'.format(new_volume), expire_in=20)\n\n if relative:\n vol_change = new_volume\n new_volume += (player.volume * 100)\n\n old_volume = int(player.volume * 100)\n\n if 0 < new_volume <= 100:\n player.volume = new_volume / 100.0\n\n return Response('updated volume from %d to %d' % (old_volume, new_volume), reply=True, delete_after=20)\n\n else:\n if relative:\n raise CommandError(\n 'Unreasonable volume change provided: {}{:+} -> {}%. Provide a change between {} and {:+}.'.format(\n old_volume,\n vol_change,\n old_volume + vol_change, 1 - old_volume, 100 - old_volume\n ), expire_in=20)\n else:\n raise CommandError(\n 'Unreasonable volume provided: {}%. Provide a value between 1 and 100.'.format(new_volume),\n expire_in=20\n )\n\n\n@command(\"queue\")\nasync def cmd_queue(self, channel, player, sendas=None):\n \"\"\"\n Usage:\n {command_prefix}queue\n\n Prints the current song queue.\n \"\"\"\n\n if sendas:\n sendall = (sendas.lower() in ['file', 'full', 'all'])\n else:\n sendall = False\n\n lines = []\n unlisted = 0\n andmoretext = '* ... and %s more*' % ('x' * len(player.playlist.entries))\n\n if player.current_entry:\n song_progress = str(timedelta(seconds=player.progress)).lstrip('0').lstrip(':')\n song_total = str(timedelta(seconds=player.current_entry.duration)).lstrip('0').lstrip(':')\n prog_str = '`[%s/%s]`' % (song_progress, song_total)\n\n if player.current_entry.meta.get('channel', False) and player.current_entry.meta.get('author', False):\n lines.append(\"Now Playing: **%s** added by **%s** %s\\n\" % (\n player.current_entry.title, player.current_entry.meta['author'].name, prog_str))\n else:\n lines.append(\"Now Playing: **%s** %s\\n\" % (player.current_entry.title, prog_str))\n\n for i, item in enumerate(player.playlist, 1):\n if item.meta.get('channel', False) and item.meta.get('author', False):\n nextline = '`{}.` **{}** added by **{}**'.format(i, item.title, item.meta['author'].name).strip()\n else:\n nextline = '`{}.` **{}**'.format(i, item.title).strip()\n\n currentlinesum = sum(len(x) + 1 for x in lines) # +1 is for newline char\n\n if (currentlinesum + len(nextline) + len(andmoretext) > DISCORD_MSG_CHAR_LIMIT) and not sendall:\n if currentlinesum + len(andmoretext):\n unlisted += 1\n continue\n\n lines.append(nextline)\n\n if unlisted:\n lines.append('\\n*... and %s more*' % unlisted)\n\n if not lines:\n lines.append(\n 'There are no songs queued! Queue something with {}play.'.format(self.config.command_prefix))\n\n message = '\\n'.join(lines)\n\n if sendall:\n with BytesIO() as data:\n data.writelines(x.encode('utf8') + b'\\n' for x in lines)\n data.seek(0)\n return await self.send_file(\n channel,\n data,\n filename='musicbot-full-queue.txt'\n )\n\n return Response(message, delete_after=30)\n\n\n@command(\"seek\")\nasync def cmd_seek(self, message, player, leftover_args, seek=None):\n \"\"\"\n Usage:\n {command_prefix}seek [seconds]\n\n Seeks the player to a specific time in seconds.\n \"\"\"\n\n if player.is_stopped:\n raise CommandError(\"Can't seek! The player is not playing!\", expire_in=20)\n\n if not seek:\n return Response('A time is required to seek.', reply=True, delete_after=20)\n\n try:\n original_seek = seek\n\n seek = ' '.join([seek, *leftover_args])\n seek = pytimeparse.parse(seek)\n\n if not seek:\n seek = int(original_seek)\n\n if seek < 0:\n raise ValueError()\n except (TypeError, ValueError):\n return Response('The time you have given is invalid.', reply=True, delete_after=20)\n\n try:\n player.seek(seek)\n except ValueError as e:\n return Response(str(e), delete_after=20)\n\n return Response('Seeked video to %s!' % (\n str(timedelta(seconds=seek)).lstrip('0').lstrip(':')\n ), delete_after=20)\n","sub_path":"musicbot/commands/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":27643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"576715734","text":"import numpy as np\nN= int(input())\na_list=[]\nb_list=[]\nfor _ in range(N):\n a,b = map(int,input().split())\n a_list.append(a)\n b_list.append(b)\n\nx = round(np.median(a_list))\ny = round(np.median(b_list))\n\nans = 0\nfor i in range(N):\n a = a_list[i]\n b = b_list[i]\n ans+= abs(a-x) + b-a + abs(b-y)\n\nprint(int(ans))","sub_path":"AtCoder/other/精選100/008.py","file_name":"008.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"115978665","text":"\nfrom skimage.io import imread,imshow\nfrom skimage import exposure, img_as_ubyte\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef main():\n\n #read each image in as grayscale\n gray_day = img_as_ubyte(imread('day.jpg', True))\n gray_night = img_as_ubyte(imread('night.jpg', True))\n\n #display the images side by side\n display = np.hstack((gray_day,gray_night)) \n imshow(display)\n\n #call function to get coefficient\n BC = BhattacharyyaCoefficient(gray_day,gray_night)\n print(\"the Bhattacharyya Coefficient is: {: .4f}\".format(BC))\n\n\ndef BhattacharyyaCoefficient(gray_day,gray_night):\n \n #calculate normalized histograms for both images\n gray_day_hist,bins = exposure.histogram(gray_day,nbins=256,normalize = True)\n gray_night_hist,bins = exposure.histogram(gray_night,nbins=256,normalize = True)\n\n #calculate coefficient (the sum, of the square root, of the product, of the values of each normalized histogram at corresponding indexes)\n B_C = 0\n for i in range(256):\n B_C += np.sqrt(gray_night_hist[i] * gray_day_hist[i])\n\n return B_C\n\nmain()\n","sub_path":"Bhattacharyya Coefficient/BC.py","file_name":"BC.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"425162544","text":"#! python \n\"\"\"\nArthor: Xinran Hu\n\nGraph ADT unit test file\n\"\"\"\nimport unittest\nfrom GraphADT import *\n\nclass TestGraph(unittest.TestCase):\n def test_init(self):\n g = GraphADT()\n g.create_graph(4)\n self.assertEqual(0, g.size)\n\n def test_printMatrix(self):\n g = GraphADT()\n g.create_graph(4)\n g.add_vertex()\n g.add_vertex()\n g.add_vertex()\n g.add_edge(0, 1)\n g.add_edge(1, 0)\n g.add_edge(1, 2)\n g.printMatrix()\n\n def test_add_vertex(self):\n g = GraphADT()\n g.create_graph(4)\n g.add_vertex()\n self.assertEqual(1, g.size)\n g.add_vertex()\n self.assertEqual(2, g.size)\n g.add_vertex()\n self.assertEqual(3, g.size)\n \n def test_remove_vertex(self):\n g = GraphADT()\n g.create_graph(4)\n g.add_vertex()\n g.add_vertex()\n g.add_vertex()\n self.assertEqual(3, g.size)\n g.remove_vertex()\n self.assertEqual(2, g.size)\n g.remove_vertex()\n self.assertEqual(1, g.size)\n g.remove_vertex()\n self.assertEqual(0, g.size)\n\n\n def test_add_edge(self):\n g = GraphADT()\n g.create_graph(4)\n g.add_vertex()\n g.add_vertex()\n g.add_vertex()\n g.add_edge(0, 1)\n self.assertEqual(True, g.adj[0][1])\n g.add_edge(1, 2)\n self.assertEqual(True, g.adj[1][2])\n g.add_edge(2, 0)\n self.assertEqual(True, g.adj[2][0])\n\n def test_remove_edge(self):\n g = GraphADT()\n g.create_graph(4)\n g.add_vertex()\n g.add_vertex()\n g.add_vertex()\n g.add_edge(0, 1)\n g.add_edge(1, 2)\n g.add_edge(2, 0)\n \n g.remove_edge(0, 1)\n self.assertEqual(False, g.adj[0][1])\n g.remove_edge(1, 2)\n self.assertEqual(False, g.adj[1][2])\n g.remove_edge(2, 0)\n self.assertEqual(False, g.adj[2][0])\n \n def test_dfs(self):\n g = GraphADT()\n g.create_graph(4)\n g.add_vertex()\n g.add_vertex()\n g.add_vertex()\n g.add_vertex()\n g.add_edge(0, 2) \n g.add_edge(0, 1)\n g.add_edge(1, 0)\n g.add_edge(2, 3)\n g.add_edge(2, 0)\n g.add_edge(3, 1)\n v = g.dfs(2)\n self.assertEqual([2 ,0 , 1, 3], v)\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"610/assignment4/TestGraph.py","file_name":"TestGraph.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"151877104","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\npath = '/home/zbloss/Github/PythonDS-MLBootcamp/Python-Data-Science-and-Machine-Learning-Bootcamp/Machine Learning Sections/Recommender-Systems/'\n\ncolumns_names = ['user_id', 'item_id', 'rating', 'timestamp']\n# use sep='\\t' to denote the file is tab-delimitted\ndf = pd.read_csv(path + 'u.data', sep='\\t', names=columns_names)\nmt = pd.read_csv(path+ 'Movie_Id_Titles')\n\ndf = pd.merge(df, mt, on='item_id')\ndf.head()\n# grabbing the number of unique users and items\nn_users = df.user_id.nunique()\nn_items = df.item_id.nunique()\n\nn_items\nn_users\n\ntrain_data, test_data = train_test_split(df, test_size=0.25)\n\n#Create two user-item matrices, one for training and another for testing\ntrain_data_matrix = np.zeros((n_users, n_items))\n\nfor line in train_data.itertuples():\n train_data_matrix[line[1]-1, line[2]-1] = line[3]\n\ntest_data_matrix = np.zeros((n_users, n_items))\nfor line in test_data.itertuples():\n test_data_matrix[line[1]-1, line[2]-1] = line[3]\n\n# Here we calculate the cosine similarity\nfrom sklearn.metrics.pairwise import pairwise_distances\nuser_similarity = pairwise_distances(train_data_matrix, metric='cosine')\nitem_similarity = pairwise_distances(train_data_matrix.T, metric='cosine')\n\n\ndef predict(ratings, similarity, type='user'):\n if type == 'user':\n mean_user_rating = ratings.mean(axis=1)\n #You use np.newaxis so that mean_user_rating has same format as ratings\n ratings_diff = (ratings - mean_user_rating[:, np.newaxis])\n pred = mean_user_rating[:, np.newaxis] + similarity.dot(ratings_diff) / np.array([np.abs(similarity).sum(axis=1)]).T\n elif type == 'item':\n pred = ratings.dot(similarity) / np.array([np.abs(similarity).sum(axis=1)])\n return pred\n\nitem_prediction = predict(train_data_matrix, item_similarity, type='item')\nuser_prediction = predict(train_data_matrix, user_similarity, type='user')\n\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\ndef rmse(prediction, ground_truth):\n prediction = prediction[ground_truth.nonzero()].flatten()\n ground_truth = ground_truth[ground_truth.nonzero()].flatten()\n return sqrt(mean_squared_error(prediction, ground_truth))\n\n\nrmse(user_prediction, test_data_matrix)\nrmse(item_prediction, test_data_matrix)\n","sub_path":"Python-Data-Science-and-Machine-Learning-Bootcamp/Machine Learning Sections/Recommender-Systems/AdvancedRS.py","file_name":"AdvancedRS.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"643013344","text":"import json\n# requires Python SDK version 1.3 or higher\nfrom algosdk.v2client import indexer\n\n# instantiate indexer client\n\n# myindexer_token = 'B3SU4KcVKi94Jap2VXkK83xx38bsv95K5UZm2lab'\n# myindexer_address = 'https://testnet-algorand.api.purestake.io/idx2/'\n# myindexer_header = {'X-Api-key': myindexer_token}\n\nmyindexer_address = 'http://localhost:8981'\nindexer_token=\"\"\nmyindexer = indexer.IndexerClient(\n indexer_token, myindexer_address)\n\n# myindexer_address = \"https://testnet-algorand.api.purestake.io/ps2\"\n# myindexer_token = \"\"\n# headers = {\n# \"X-API-Key\": \"B3SU4KcVKi94Jap2VXkK83xx38bsv95K5UZm2lab\",\n# }\n# myindexer = indexer.IndexerClient(myindexer_token, myindexer_address, headers=headers)\n# myindexer = indexer.IndexerClient(\n# indexer_token=\"\", indexer_address=myindexer_address)\n\nnexttoken = \"\"\nnum_accounts = 1\n# loop using next_page to paginate until there are no more accounts\n# in the response\n# (max is 100 default\n# unless limit is used for max 1000 per request on accounts)\nwhile (num_accounts > 0):\n response = myindexer.accounts(\n application_id=12867764, limit=2, round_num=12227042, next_page=nexttoken)\n accounts = response['accounts']\n num_accounts = len(accounts)\n if (num_accounts > 0):\n nexttoken = response['next-token']\n # Pretty Printing JSON string\n print(\"Account Info for Application ID: \" + json.dumps(response, indent=2, sort_keys=True))\n","sub_path":"algorandsamples/v2/mypythondemo/indexer/accounts_application_id_paging.py","file_name":"accounts_application_id_paging.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"154061294","text":"import globalPluginHandler\nimport ui\nimport subprocess\n\nclass GlobalPlugin(globalPluginHandler.GlobalPlugin):\n\n def get_list_of_console_processes(self):\n call = 'TASKLIST', '/FO', 'CSV'\n output = subprocess.check_output(call)\n output = output.replace(b'\"', b'').split(b'\\r\\n')\n keys = output[0].split(b',')\n proc_list = [i.split(b',') for i in output[1:] if i]\n console_list = []\n for app in proc_list:\n if b\"Console\" in app[2]:\n if any(app[0].decode(\"utf-8\") in sublist for sublist in console_list) == False: \n try:\n memory_usage = app[4].decode(\"utf-8\")\n memory_usage = int(memory_usage)\n except:\n memory_usage = memory_usage.split(\" \")[0]\n memory_usage = int(memory_usage)\n console_list.append([app[0].decode(\"utf-8\"), memory_usage])\n\n return console_list\n\n def script_topFive(self, gesture):\n \n processes = self.get_list_of_console_processes()\n processes.sort(key = lambda x: x[1], reverse=True)\n message = \"\"\n for process in processes[0:5]:\n message += process[0] + \", \"\n\n ui.message(\"The 5 processes with the highest memory usage are: \" + message)\n\n def script_allProcesses(self, gesture):\n\n processes = self.get_list_of_console_processes()\n message = \"\"\n for process in processes:\n message += process[0] + \", \"\n\n ui.message(\"All running processes: \" + message)\n\n\n def script_runningPrograms(self, gesture):\n call = 'powershell \"gps | where {$_.MainWindowTitle } | select Description'\n output = subprocess.check_output(call)\n output = output.split(b\"\\r\\n\")\n appList = []\n for app in output[3:]:\n appList.append(app.decode(\"utf-8\").strip())\n \n message = \"\"\n for app in appList:\n if app != \"\":\n message += app + \", \"\n \n ui.message(\"All running programs: \" + message)\n\n\n __gestures={\n \"kb:control+NVDA+1\":\"topFive\",\n \"kb:Control+NVDA+2\":\"allProcesses\",\n \"kb:Control+NVDA+3\":\"runningPrograms\"\n }","sub_path":"running processes.py","file_name":"running processes.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"243933914","text":"def search4vowels(word):\r\n '''Return a boolean based on any vowels found'''\r\n vowels = set('aeiou')\r\n return vowels.intersection(set(word))\r\n\r\nsearch4vowels('hitch-hiker')\r\n\r\nsearch4vowels('galaxy')\r\n\r\nsearch4vowels('life, the universe and everything')\r\n\r\nsearch4vowels('sky')\r\n","sub_path":"vsearch.py","file_name":"vsearch.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"216580631","text":"#!/usr/bin/env python\n# coding=utf-8\n'''\n@version:\n@Author: steven\n@Date: 2020-05-27 22:20:22\n@LastEditors: steven\n@LastEditTime: 2020-05-28 22:19:51\n@Description:\n'''\nrows = [\n {'address': '5412 N CLARK', 'date': '07/01/2018'},\n {'address': '5232 N CLARK', 'date': '07/04/2018'},\n {'address': '5542 E 58ARK', 'date': '07/02/2018'},\n {'address': '5152 N CLARK', 'date': '07/03/2018'},\n {'address': '7412 N CLARK', 'date': '07/02/2018'},\n {'address': '6789 w CLARK', 'date': '07/03/2018'},\n {'address': '9008 N CLARK', 'date': '07/01/2018'},\n {'address': '2227 W CLARK', 'date': '07/04/2018'}\n]\n\nfrom operator import itemgetter\nfrom itertools import groupby\n\n#必须先排序,因为groupby()只能检查连续的项。\nrows.sort(key=itemgetter('date'))\n\nfor date, items in groupby(rows, key=itemgetter('date')):\n print(date)\n for i in items:\n print(' ', i)\n\nfrom collections import defaultdict\n\nrows_by_date = defaultdict(list)\n\nfor row in rows:\n rows_by_date[row['date']].append(row)\n\nfor r in rows_by_date['07/04/2018']:\n print(r)\n","sub_path":"第2章/2-3/fenzu.py","file_name":"fenzu.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"210967163","text":"import ctypes, os, openpyxl, math, datetime\nfrom tkinter import *\nfrom PIL import Image, ImageTk\nfrom random import random, sample\n\nclass PairComparisonUI:\n MODE_SELECT_UI = 0\n SIDE_BY_SIDE_MODE = 1\n SWAP_IMAGE_MODE = 2\n IMAGE_EXTENSIONS = {\".jpg\", \".jpeg\", \".png\"} # list of acceptable image extensions (add if necessary)\n\n def __init__(self, master):\n self.master = master\n self.mode = None\n self.image_list = []\n\n self.load_images()\n self.create_mode_select_ui()\n\n # loads images from the images folder into image_list. If the images folder does not exist, it is created here\n def load_images(self):\n # creates images folder if one does not exist, then moves the CWD to that folder\n if not os.path.exists(os.path.join(os.getcwd(), \"output\")):\n os.makedirs(os.path.join(os.getcwd(), \"output\"))\n os.chdir(os.path.join(os.getcwd(), \"output\"))\n\n self.image_list.clear()\n\n # loads images with acceptable extensions into image_list and sorts them alphabetically\n for file in os.listdir(os.getcwd()):\n if any(file.endswith(ext) for ext in self.IMAGE_EXTENSIONS):\n self.image_list.append(file)\n self.image_list.sort()\n\n # creates the initial UI where the user can choose which mode they would like to use to compare images\n def create_mode_select_ui(self):\n self.mode = self.MODE_SELECT_UI\n self.clear_ui()\n\n # creates a full-screen, non-resizable window\n self.master.title(\"Image Comparison\")\n self.master.geometry('{}x{}'.format(ctypes.windll.user32.GetSystemMetrics(0), ctypes.windll.user32.GetSystemMetrics(1)))\n self.master.resizable(width=FALSE, height=FALSE)\n self.master.state(\"zoomed\")\n\n # creates the menu bar\n self.menu_bar = Menu(self.master)\n self.master.config(menu=self.menu_bar)\n\n self.file_menu = Menu(self.menu_bar, tearoff=0)\n self.file_menu.add_command(label=\"Exit\", command=self.close_window)\n\n self.options_menu = Menu(self.menu_bar, tearoff=0)\n self.options_menu.add_command(label=\"Change Mode\", command=self.change_mode)\n self.options_menu.add_command(label=\"Start Over\", command=self.start_over)\n\n self.help_menu = Menu(self.menu_bar, tearoff=0)\n self.help_menu.add_command(label=\"Instructions\", command=self.show_instructions)\n\n self.menu_bar.add_cascade(label=\"File\", menu=self.file_menu)\n self.menu_bar.add_cascade(label=\"Options\", menu=self.options_menu)\n self.menu_bar.add_cascade(label=\"Help\", menu=self.help_menu)\n\n self.master.grid_rowconfigure(0, weight=0)\n self.master.grid_rowconfigure(1, weight=1)\n self.master.grid_columnconfigure(0, weight=1)\n\n self.top_frame = Frame(master=self.master, bg=\"#7C7C7C\")\n self.top_frame.grid(row=0, sticky=\"ew\")\n self.top_frame.grid_columnconfigure(0, weight=1)\n\n self.main_frame = Frame(master=self.master, bg=\"#7C7C7C\")\n self.main_frame.grid(row=1, sticky=\"ns\")\n self.main_frame.grid_rowconfigure(0, weight=1)\n self.main_frame.grid_columnconfigure(0, weight=1)\n self.main_frame.grid_columnconfigure(1, weight=1)\n\n self.instructions = Label(self.top_frame, text=\"Select how you would like to compare images\", font=(\"helvetica\", 14), fg=\"light gray\", bg=\"#7C7C7C\")\n self.instructions.grid()\n\n # creates buttons for the user to pick the comparison mode of choice\n self.side_by_side = Button(self.main_frame, width=int(self.master.winfo_screenwidth() / 2), text=\"Side-by-side\", font=(\"georgia\", 20), fg=\"black\", bg=\"light gray\", command=self.enter_side_by_side_mode)\n self.side_by_side.grid(row=0, column=0, sticky=\"ns\", padx=(20, 10), pady=(0, 20))\n\n self.swap_image = Button(self.main_frame, width=int(self.master.winfo_screenwidth() / 2), text=\"Swap Images\", font=(\"georgia\", 20), fg=\"black\", bg=\"light gray\", command=self.enter_swap_image_mode)\n self.swap_image.grid(row=0, column=1, sticky=\"ns\", padx=(10, 20), pady=(0, 20))\n\n # takes the user into side-by-side comparison. This is called when the user makes an image selection in side-by-side mode, in order to ready the next image. It is also called when the user changes modes to side-by-side mode. The parameter comparison_in_progress is False when this function is called from the initial mode select screen, but True otherwise since a comparison is in progress. When it is False, which should only happen once per pair comparison, additional segments of code are carried out.\n def enter_side_by_side_mode(self, comparison_in_progress=False):\n # displays error dialogs if image_list is empty or if image naming convention was not followed\n if len(self.image_list) == 0:\n self.display_empty_images_folder_dialog()\n elif not (self.images_named_properly() or comparison_in_progress):\n self.display_images_named_improperly_dialog()\n else:\n # creates the side-by-side comparison UI. This code only runs if the user has just changed modes from a different UI\n if(self.mode != self.SIDE_BY_SIDE_MODE):\n self.mode = self.SIDE_BY_SIDE_MODE\n self.clear_ui()\n\n self.master.grid_rowconfigure(0, weight=1)\n self.master.grid_rowconfigure(1, weight=0)\n self.master.grid_columnconfigure(0, weight=1)\n\n self.main_frame = Frame(master=self.master, bg=\"#7C7C7C\")\n self.main_frame.grid(row=0, sticky=\"nsew\")\n self.main_frame.grid_rowconfigure(0, weight=1)\n self.main_frame.grid_rowconfigure(2, weight=1)\n self.main_frame.grid_columnconfigure(0, weight=1)\n self.main_frame.grid_columnconfigure(1, weight=1)\n\n self.bottom_frame = Frame(master=self.master, bg=\"#7C7C7C\")\n self.bottom_frame.grid(row=1, sticky=\"ew\")\n self.bottom_frame.grid_columnconfigure(0, weight=1)\n self.bottom_frame.grid_columnconfigure(1, weight=1)\n\n # creates the buttons that correspond with each image\n self.left_image_button = Button(self.bottom_frame, width=int(self.master.winfo_screenwidth() / 2), text=\"Image 1\", font=(\"georgia\", 20), fg=\"black\", bg=\"light gray\", command=lambda: self.remove_images())\n self.left_image_button.grid(row=0, column=0, padx=(20, 10), pady=20)\n\n self.right_image_button = Button(self.bottom_frame, width=int(self.master.winfo_screenwidth() / 2), text=\"Image 2\", font=(\"georgia\", 20), fg=\"black\", bg=\"light gray\", command=lambda: self.remove_images())\n self.right_image_button.grid(row=0, column=1, padx=(10, 20), pady=20)\n else:\n # if the user was already in side-by-side comparison mode, then only the previous set of images needs to be replaced\n self.left_image_panel.destroy()\n self.right_image_panel.destroy()\n\n # loads the next pair of images for comparison\n self.left_image = ImageTk.PhotoImage(Image.open(self.image_list[0]))\n self.left_image_panel = Label(self.main_frame, image=self.left_image)\n self.left_image_panel.grid(row=1, column=0, padx=(20, 10), pady=(20, 0))\n\n self.right_image = ImageTk.PhotoImage(Image.open(self.image_list[1]))\n self.right_image_panel = Label(self.main_frame, image=self.right_image)\n self.right_image_panel.grid(row=1, column=1, padx=(10, 20), pady=(20, 0))\n\n # takes the user into swap-image comparison. This is called when the user makes an image selection in swap-image mode, in order to ready the next image. It is also called when the user changes modes to swap-image mode. The parameter comparison_in_progress is False when this function is called from the initial mode select screen, but True otherwise since a comparison is in progress. When it is False, which should only happen once per pair comparison, additional segments of code are carried out.\n def enter_swap_image_mode(self, comparison_in_progress=False):\n # displays error dialogs if image_list is empty or if image naming convention was not followed\n if len(self.image_list) == 0:\n self.display_empty_images_folder_dialog()\n elif not (self.images_named_properly() or comparison_in_progress):\n self.display_images_named_improperly_dialog()\n else:\n # creates the swap-image comparison UI. This code only runs if the user has just changed modes from a different UI\n if(self.mode != self.SWAP_IMAGE_MODE):\n self.mode = self.SWAP_IMAGE_MODE\n self.clear_ui()\n\n # when this is False, the first image in the image pair is shown. Each time the swap image button is clicked, this boolean swaps between True and False\n self.show_other_image = False\n\n self.master.grid_rowconfigure(0, weight=1)\n self.master.grid_rowconfigure(1, weight=0)\n self.master.grid_columnconfigure(0, weight=1)\n\n self.main_frame = Frame(master=self.master, bg=\"#7C7C7C\")\n self.main_frame.grid(row=0, rowspan=2, column=0, columnspan=2, sticky=\"nsew\")\n self.main_frame.grid_rowconfigure(0, weight=1)\n self.main_frame.grid_columnconfigure(0, weight=1)\n\n self.bottom_frame = Frame(master=self.master, bg=\"#7C7C7C\")\n self.bottom_frame.grid(row=1, column=0, columnspan=2, sticky=\"ew\")\n self.bottom_frame.grid_columnconfigure(0, weight=1)\n self.bottom_frame.grid_columnconfigure(2, weight=1)\n\n self.right_frame = Frame(master=self.master, bg=\"#7C7C7C\")\n self.right_frame.grid(row=0, rowspan=2, column=1, sticky=\"ns\")\n self.right_frame.grid_rowconfigure(0, weight=1)\n self.right_frame.grid_rowconfigure(2, weight=1)\n\n # when clicked, store that the user chose the image currently being shown\n self.choose_image_button = Button(self.bottom_frame, text=\"Choose this image\", font=(\"georgia\", 20), fg=\"black\", bg=\"light gray\", command=lambda: self.remove_images())\n self.choose_image_button.grid(row=0, column=1, padx=20, pady=20)\n\n # when clicked, swap which image of the current image pair is being shown\n self.swap_image_button = Button(self.right_frame, text=\"Swap\", font=(\"georgia\", 20), fg=\"black\", bg=\"light gray\", command=self.swap_images)\n self.swap_image_button.grid(row=1, column=0, padx=20, pady=20)\n else:\n # if the user was already in swap-image comparison mode, then only the previous shown image needs to be replaced\n self.image_panel.destroy()\n\n # loads the next shown image\n self.shown_image = ImageTk.PhotoImage(Image.open(self.image_list[1 if self.show_other_image else 0]))\n self.image_panel = Label(self.main_frame, image=self.shown_image)\n self.image_panel.grid(row=0, column=0, padx=(20, 0), pady=(20, 0))\n\n # clears all widgets in the tkinter UI\n def clear_ui(self):\n for item in self.master.grid_slaves():\n item.destroy()\n\n # closes the program window\n def close_window(self):\n self.master.destroy()\n\n # displays a pop-up with instructions on how to load and name images\n def show_instructions(self):\n ctypes.windll.user32.MessageBoxW(None, \"Place all images for comparison in the 'output' folder before running the program. Only images using JPEG or PNG file formats may be recognized. Each image must be paired with another image for comparison. Each image pair must share the same root file name (case sensitive), followed by '_A' for the first image and '_B' for the second image (also case sensitive). For example, an image named 'cat_A.png' must be paired with an image named 'cat_B.png'.\", \"Instructions\", 0x00040000 | 0x00000040)\n\n # displays a error indicating that the images folder is empty\n def display_empty_images_folder_dialog(self):\n ctypes.windll.user32.MessageBoxW(None, \"There are no images in the 'output' folder!\", \"Error\", 0x00040000 | 0x00000030)\n\n # displays an error indicating that one or more images are named improperly\n def display_images_named_improperly_dialog(self):\n ctypes.windll.user32.MessageBoxW(None, \"One or more images in the 'output' folder are named improperly!\", \"Error\", 0x00040000 | 0x00000030)\n\n # returns whether all images are named properly\n def images_named_properly(self):\n for x in range(0, len(self.image_list), 2):\n if x + 1 >= len(self.image_list):\n #makes sure each image has a pair\n return False\n else:\n image_A = os.path.splitext(self.image_list[x])[0]\n image_B = os.path.splitext(self.image_list[x + 1])[0]\n\n #checks to make sure each image pair ends in \"_A\" and \"_B\"\n if not (image_A[:-2] == image_B[:-2] and image_A[-2:] == \"_A\" and image_B[-2:] == \"_B\"):\n return False\n return True\n\n # swaps the image being shown in swap-image mode\n def swap_images(self):\n self.show_other_image = not self.show_other_image\n self.enter_swap_image_mode(True)\n\n # pop images from list\n def remove_images(self):\n # the image pair is removed from image_list\n self.image_list.pop(0)\n self.image_list.pop(0)\n\n # saves all the user's choices to an excel file and returns the user to the mode select screen if there are no images left to compare. Otherwise, the user is shown the next pair of images in whatever mode they were in\n if len(self.image_list) == 0:\n ctypes.windll.user32.MessageBoxW(None, \"Pair comparison complete. Returning to mode select screen.\", \"Notice\", 0x00040000)\n self.start_over()\n elif self.mode == self.SIDE_BY_SIDE_MODE:\n self.enter_side_by_side_mode(True)\n elif self.mode == self.SWAP_IMAGE_MODE:\n self.enter_swap_image_mode(True)\n\n # if the user is in a comparison mode, change to the other comparison mode\n def change_mode(self):\n if self.mode == self.SIDE_BY_SIDE_MODE:\n self.enter_swap_image_mode(True)\n elif self.mode == self.SWAP_IMAGE_MODE:\n self.enter_side_by_side_mode(True)\n\n # restarts pair comparison\n def start_over(self):\n os.chdir(os.path.dirname(os.getcwd()))\n self.load_images()\n self.create_mode_select_ui()\n\nroot = Tk()\nui = PairComparisonUI(root)\n\nroot.mainloop()\n","sub_path":"opencv_test/compare_images.py","file_name":"compare_images.py","file_ext":"py","file_size_in_byte":14821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"67817768","text":"#!/usr/bin/python3\n\nimport datetime\nimport logging\n\nfrom utils import files\nfrom utils import logs\nfrom utils import misc\nfrom utils import sort\n\nlog = logs.setup_logging('Sorter')\nlog.setLevel(logging.INFO)\n\norigin_path = \"incoming\"\ndestination_path = \"SortedPhotos\"\nincoming_list = list()\n\nsorted_incoming = files.find_photos(origin_path, True)\n\nlog.debug(\"Sorted incoming list:\")\nfor i in sorted_incoming:\n log.debug(f\" {i}\")\n\n# Start going through the list of incoming files, one month at a time\nend_ts = misc.find_end_of_month_timestamp(sorted_incoming[0]['timestamp'])\nlog.debug(f\"Got end of first month timestamp={end_ts}\")\n\nbatch = list()\nlog.debug(f\"Before loop, end_ts={end_ts}\")\n\nfor i in sorted_incoming[:]:\n if i['timestamp'] > end_ts:\n end_ts = misc.find_end_of_month_timestamp(i['timestamp'])\n log.debug(f\"- Finished a batch, moving end_ts to {end_ts}\")\n # Process this month's photos\n sort.process_incoming(destination_path, batch)\n batch[:] = []\n log.debug(f\"Working on photo {i['name']}, ts={i['timestamp']}\")\n batch.append(i)\nelse:\n if batch:\n sort.process_incoming(destination_path, batch)\n log.debug('- Finished last batch')\n\n\n\n\n\n\n","sub_path":"sorter.py","file_name":"sorter.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"551705589","text":"from django.shortcuts import render\nfrom .models import Destination\n\n# Create your views here.\n\ndef index(request):\n\n dest1 = Destination()\n dest1.name = 'Mumbai'\n dest1.desc = 'The city that never sleeps'\n dest1.one = '1st-big-item.jpg'\n dest1.img = '1st-item.jpg'\n dest1.price = 200\n dest1.check = True\n\n dest2 = Destination()\n dest2.name = 'Muhabe'\n dest2.desc = 'Also we never sleep'\n dest2.one = '2nd-big-item.jpg'\n dest2.img = '2nd-item.jpg'\n dest2.price = 150\n dest2.check = False\n\n dest3 = Destination()\n dest3.name = 'Honshyu'\n dest3.desc = 'Same here'\n dest3.one = '6th-big-item.jpg'\n dest3.img = '6th-item.jpg'\n dest3.price = 180\n dest3.check = True\n\n dests = [dest1, dest2, dest3] # PUT THE OBJECTS TO A LIST THEN PASS JUST THE LIST\n\n return render(request, 'index.html',{'dests':dests})\n\n # THIS IS HOW TO PASS OBJECTS TO HTML PAGES\n","sub_path":"ProjectOne/travello/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"304696013","text":"import logging\n\nfrom PySide2.QtWidgets import QFrame, QLabel, QVBoxLayout, QHBoxLayout, QScrollArea, QSizePolicy, \\\n QTableWidget, QTableWidgetItem\nfrom PySide2.QtCore import Qt, QSize\n\nfrom ...ui.dialogs.new_state import SrcAddrAnnotation\n\nl = logging.getLogger('ui.widgets.qconstraint_viewer')\n\n\nclass QConstraintViewer(QFrame):\n\n COLUMNS = [ \"Constraint\", \"Src Address\", \"Cardinality\", \"Depth\", \"# Variables\" ]\n\n def __init__(self, state, parent, workspace):\n super(QConstraintViewer, self).__init__(parent)\n\n self._state = state\n self.workspace = workspace\n\n self.table = None\n \n self._state.am_subscribe(self._watch_state)\n\n \n #\n # Public methods\n #\n\n def reload(self):\n self.table.setRowCount(0)\n for constraint in self._state.solver.constraints:\n count = self.table.rowCount()\n self.table.insertRow(count)\n self.table.setItem(count, 0, QTableWidgetItem(constraint.shallow_repr()))\n\n src_addr = next(a for a in constraint.annotations if type(a) == SrcAddrAnnotation).addr\n self.table.setItem(count, 1, QTableWidgetItem(hex(src_addr)))\n\n self.table.setItem(count, 2, QTableWidgetItem(str(constraint.cardinality)))\n self.table.setItem(count, 3, QTableWidgetItem(str(constraint.depth)))\n self.table.setItem(count, 4, QTableWidgetItem(str(len(list(constraint.recursive_leaf_asts)))))\n\n #\n # Private methods\n #\n\n def _init_widgets(self):\n if self._state.am_none():\n return\n\n layout = QVBoxLayout()\n area = QScrollArea()\n area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)\n area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)\n area.setWidgetResizable(True)\n\n table = QTableWidget(0, 0)\n table.setColumnCount(len(self.COLUMNS))\n table.setHorizontalHeaderLabels(self.COLUMNS)\n\n self.table = table\n layout.addWidget(table)\n\n # common ones\n layout.setSpacing(0)\n layout.addStretch(0)\n layout.setContentsMargins(2, 2, 2, 2)\n\n # the container\n container = QFrame()\n container.setAutoFillBackground(True)\n palette = container.palette()\n palette.setColor(container.backgroundRole(), Qt.white)\n container.setPalette(palette)\n container.setLayout(layout)\n\n area.setWidget(container)\n\n base_layout = QVBoxLayout()\n base_layout.addWidget(area)\n self.setLayout(base_layout)\n\n\n def _watch_state(self, **kwargs):\n if self.table is None:\n self._init_widgets()\n self.reload()\n","sub_path":"angrmanagement/ui/widgets/qconstraint_viewer.py","file_name":"qconstraint_viewer.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"212549100","text":"print('start')\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.common.exceptions import TimeoutException\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.common.exceptions import NoSuchElementException\r\nfrom selenium.common.exceptions import NoAlertPresentException\r\nimport sys\r\n\r\nfrom selenium.webdriver.chrome.options import Options\r\nimport unittest, time, re\r\nfrom bs4 import BeautifulSoup as bs\r\nimport pandas as pd\r\nfrom dateutil import parser\r\nimport itertools\r\n\r\n\r\ndef crawlingTwitter(laman, fileName, numberOfScrolling, numberOftweet):\r\n #Initiate the driver. We use Chrome\r\n print('initiate selenium')\r\n chrome_options = Options()\r\n driver = webdriver.Chrome(\"D:/Gamatec/Webdriver/chromedriver.exe\", chrome_options = chrome_options)\r\n\r\n #Determine the URL about specific search\r\n driver.base_url = laman #\"https://twitter.com/search?l=id&q=since%3A2018-12-27%20until%3A2018-12-28&src=typd\"\r\n\r\n \"\"\"\r\n contoh url yang lain\r\n berdasarkan username : https://twitter.com/GiaPratamaMD\r\n berdasarkan kata kunci( dalam hal ini sarkas) : https://twitter.com/search?q=sarkas&src=typd\"\r\n berdasarkan tanggal (6 feb 2019) : https://twitter.com/search?l=id&q=since%3A2019-02-06%20until%3A2019-02-07&src=typd\r\n\r\n \"\"\"\r\n\r\n #nama = 'Tweets_20181210_20191211.csv'\r\n driver.get(driver.base_url)\r\n\r\n #For scroll the tweets\r\n for i in range(1,numberOfScrolling):\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n time.sleep(0.5)\r\n print(i)\r\n\r\n\r\n #initiate variables we need. used to convert file into csv\r\n twit = {}\r\n twit_usernameTweet = {}\r\n tweets = []\r\n username = []\r\n username_respondents = []\r\n tweet_replies = []\r\n dateTime_mainTweet = []\r\n dateTime_respondent = []\r\n check_replies = 0 # to check whether or not the tweet cointain any replies\r\n i=0 #parameter to decide when we want to stop --> how many data we want\r\n\r\n\r\n #Find all the tweets\r\n posts = driver.find_elements_by_xpath(\"//li[contains(@data-item-type,'tweet')]\")\r\n\r\n #we name the page winHandleBefore so we can get back to this page anytime\r\n winHandleBefore = driver.window_handles[0]\r\n\r\n p =0 \r\n #for post in posts:\r\n while p < len(posts):\r\n #untuk menghandle error manually (skip the error)\r\n # if (p == 0):\r\n # p = p+1\r\n\r\n print(\"ini tweet ke : \" + str(p))\r\n try:\r\n detailpost = posts[p].find_element_by_xpath(\"div[contains(@class,'tweet js-stream-tweet')]\")\r\n except NoSuchElementException :\r\n p = p+1\r\n continue\r\n\r\n usernamepost = detailpost.get_attribute('data-screen-name')\r\n\r\n try:\r\n tweet_content = detailpost.find_element_by_xpath(\"div[contains(@class,'content')]\")\r\n tweet_container = tweet_content.find_element_by_xpath(\"div[contains(@class,'js-tweet-text-container')]\")\r\n tweet_substance = tweet_container.find_element_by_xpath(\"p[contains(@class,'TweetTextSize')]\") # js-tweet-text tweet-text\r\n except NoSuchElementException:\r\n p = p+1\r\n continue\r\n\r\n #check whether the tweet contains a link. If it does, process the next tweet\r\n try:\r\n #if it processes this code, means the tweet contains a link \r\n a_href = tweet_substance.find_element_by_xpath(\"a[@href]\")\r\n #print(a_href)\r\n print()\r\n p = p+1\r\n\r\n except NoSuchElementException: #if it processes this code, nmeans the tweet doesnt contain a link \r\n #print(\"no link(s) here!\")\r\n time.sleep(1)\r\n try:\r\n #print(\"ke try\")\r\n check = detailpost.click()\r\n except Exception as e:#handle error if you cant cick the tweet\r\n #print(\"ke except\")\r\n webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform()\r\n p = p+1\r\n continue #will stop the process and doesnt finish all the steps below and then go to the next tweet \r\n\r\n time.sleep(1)\r\n\r\n #find the replie(s) of the tweets\r\n try:\r\n print(\"masuk sini dong\")\r\n try:\r\n replies = driver.find_elements_by_xpath(\"//li[contains(@class, 'ThreadedConversation')]\")\r\n except NoSuchElementException:\r\n # replies = driver.find_elements_by_xpath(\"//li[contains(@class, 'ThreadedConversation--loneTweet')]\") #yang lama\r\n try:\r\n replies = driver.find_elements_by_xpath(\"//li[contains(@class, 'ThreadedConversation--loneTweet')]\")\r\n except NoSuchElementException:\r\n print(\"hehehe ga sia2 na, ini kehandle ;)\")\r\n p = p+1\r\n continue\r\n \r\n for reply in replies:\r\n reply_items = reply.find_element_by_xpath(\"ol[contains(@class, 'stream-items')]\")\r\n\r\n try:\r\n reply_li2 =reply_items.find_element_by_xpath(\"li[contains(@class,'js-stream-item stream-item stream-item')]\") \r\n reply_expand = reply_li2.find_element_by_xpath(\"div[contains(@class,'tweet js-stream-tweet js-actionable-tweet js-profile-popup-actionable dismissible-content')]\") # stream-item stream-item#js-stream-item stream-item stream-item\r\n\r\n tweet_respondent = reply_expand.get_attribute('data-screen-name')\r\n\r\n reply_content = reply_expand.find_element_by_xpath(\"div[contains(@class,'content')]\")\r\n reply_tweetTextContainer = reply_content.find_element_by_xpath(\"div[contains(@class,'js-tweet-text-container')]\")\r\n reply_tweetText = reply_tweetTextContainer.find_element_by_xpath(\"p[contains(@class,'TweetTextSize js-tweet-text tweet-text')]\").text\r\n tweet_reply = reply_tweetText \r\n\r\n\r\n stream_item_header_respondent = reply_content.find_element_by_xpath(\"div[contains(@class,'stream-item-header')]\")\r\n tweet_time_class_respondent = stream_item_header_respondent.find_element_by_xpath(\"small[contains(@class,'time')]\")\r\n tweet_timestamp_respondent = tweet_time_class_respondent.find_element_by_xpath(\"a[contains(@class,'tweet-timestamp js-permalink js-nav js-tooltip')]\")\r\n tweet_time_respondent = tweet_timestamp_respondent.get_attribute('title')\r\n\r\n check_replies = 1\r\n\r\n\r\n\r\n\r\n except NoSuchElementException:\r\n reply_tweets = reply_items.find_elements_by_xpath(\"div[contains(@class,'ThreadedConversation-tweet')]\") \r\n for reply_li in reply_tweets:\r\n reply_li2 =reply_li.find_element_by_xpath(\"li[contains(@class,'js-stream-item stream-item stream-item')]\") \r\n reply_expand = reply_li2.find_element_by_xpath(\"div[contains(@class,'tweet js-stream-tweet js-actionable-tweet js-profile-popup-actionable dismissible-content')]\") # stream-item stream-item #js-stream-item stream-item stream-item\r\n\r\n tweet_respondent = reply_expand.get_attribute('data-screen-name')\r\n #tweet_time = reply_expand.get_attribute('title')\r\n\r\n reply_content = reply_expand.find_element_by_xpath(\"div[contains(@class,'content')]\")\r\n reply_tweetTextContainer = reply_content.find_element_by_xpath(\"div[contains(@class,'js-tweet-text-container')]\")\r\n reply_tweetText = reply_tweetTextContainer.find_element_by_xpath(\"p[contains(@class,'TweetTextSize js-tweet-text tweet-text')]\").text\r\n tweet_reply = reply_tweetText \r\n\r\n\r\n stream_item_header_respondent = reply_content.find_element_by_xpath(\"div[contains(@class,'stream-item-header')]\")\r\n tweet_time_class_respondent = stream_item_header_respondent.find_element_by_xpath(\"small[contains(@class,'time')]\")\r\n tweet_timestamp_respondent = tweet_time_class_respondent.find_element_by_xpath(\"a[contains(@class,'tweet-timestamp js-permalink js-nav js-tooltip')]\")\r\n tweet_time_respondent = tweet_timestamp_respondent.get_attribute('title')\r\n\r\n break #break bc only takes the first reply\r\n\r\n check_replies = 1\r\n\r\n if(check_replies): \r\n #take the text\r\n try:\r\n main_tweet_content = detailpost.find_element_by_xpath(\"div[contains(@class,'content')]\")\r\n except NoSuchElementException:\r\n p = p+1\r\n continue\r\n\r\n #take the main tweet\r\n main_tweet_text2 = tweet_substance.text\r\n\r\n #take the date and time \r\n stream_item_header_mainTweet = main_tweet_content.find_element_by_xpath(\"div[contains(@class,'stream-item-header')]\")\r\n tweet_time_class_mainTweet = stream_item_header_mainTweet.find_element_by_xpath(\"small[contains(@class,'time')]\")\r\n tweet_timestamp_mainTweet = tweet_time_class_mainTweet.find_element_by_xpath(\"a[contains(@class,'tweet-timestamp js-permalink js-nav js-tooltip')]\")\r\n tweet_time_mainTweet = tweet_timestamp_mainTweet.get_attribute('title')\r\n\r\n #insert all data to lists\r\n print(\"data ke : \" + str(i+1))\r\n username.append(usernamepost)\r\n print(usernamepost)\r\n tweets.append(main_tweet_text2)\r\n print(main_tweet_text2)\r\n dateTime_mainTweet.append(tweet_time_mainTweet)\r\n print(tweet_time_mainTweet)\r\n print()\r\n\r\n username_respondents.append(tweet_respondent) \r\n print(tweet_respondent)\r\n tweet_replies.append(tweet_reply)\r\n print(tweet_reply)\r\n dateTime_respondent.append(tweet_time_mainTweet)\r\n print(tweet_time_respondent)\r\n print()\r\n print()\r\n i += 1\r\n break #break bc only check for the first thread\r\n\r\n #if we cant find the reply. In other words, the tweet has no reply\r\n except NoSuchElementException:\r\n print(\"masuk except\")\r\n try:\r\n webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform()\r\n except Exception as e:\r\n time.sleep(1)\r\n print(e)\r\n p = p+1\r\n continue\r\n\r\n\r\n p = p+1\r\n #find the close button and click it\r\n expandTwit = driver.find_element_by_xpath(\"//div[@id='permalink-overlay']\")\r\n time.sleep(1)\r\n try: #if the tweet only contains text\r\n class_close = expandTwit.find_element_by_xpath(\"div[contains(@class,'PermalinkProfile-dismiss modal-close-fixed')]\")\r\n close_expandedTwit = class_close.click()\r\n time.sleep(1)\r\n except Exception as e: #if the tweetcontains media (pictures or vid)\r\n try:\r\n webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform()\r\n except Exception as e:\r\n time.sleep(1)\r\n print(e)\r\n break\r\n\r\n time.sleep(1)\r\n driver.switch_to_window(winHandleBefore) #switch the handle to the page before we clicked the tweet\r\n #time.sleep(1)\r\n print()\r\n\r\n #set the check back to 0 \r\n check_replies = 0\r\n\r\n #to determine how many tweets we want\r\n if(i>numberOftweet):\r\n break\r\n\r\n \r\n #convert to csv file\r\n twit.update({'usernames' : username, 'tweets' : tweets, 'date_and_time' : dateTime_mainTweet,'respondents':username_respondents,'tweet_replies':tweet_replies, 'date_and_time_replies':dateTime_respondent })\r\n df = pd.DataFrame(data=twit)\r\n df.to_csv(fileName)\r\n\r\n \r\nfor i in range(1,30): #mengambil data dari tgl 10 sampai 29\r\n tanggal_start = str(i)\r\n tanggal_end = str(i+1)\r\n if(i<9):\r\n tanggal_start = \"0\"+str(i)\r\n tanggal_end = \"0\"+str(i+1)\r\n if(i==9):\r\n tanggal_start = \"0\"+str(i)\r\n tahun = \"2018\"\r\n bulan = \"09\"\r\n L = \"https://twitter.com/search?l=id&q=since%3A\" + tahun + \"-\" + bulan + \"-\" +tanggal_start+\"%20until%3A\" + tahun + \"-\" + bulan + \"-\" +tanggal_end+\"&src=typd\"\r\n f_name = \"Tweets_\" + tahun + bulan + tanggal_start + \"_\" + tahun + bulan + tanggal_end + \".csv\"\r\n try:\r\n crawlingTwitter(L,f_name, 80, 120)\r\n except:\r\n continue\r\n","sub_path":"DataCrawlingTwitter_Selenium.py","file_name":"DataCrawlingTwitter_Selenium.py","file_ext":"py","file_size_in_byte":13459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"148784288","text":"from matplotlib import pyplot as plt\nimport random\n\nx = range(2, 26, 2)\n\ny = [random.uniform(15,26) for i in range(12)]\n\n# 每隔2绘制刻度\nx_ticks_1 = range(2,26,2)\n\n# 每隔0.5绘制刻度\nx_ticks_2 = [i/2 for i in range(4, 49)]\n\n# 上面一种太密集,可以使用切片\nx_ticks_3 = x_ticks_2[::3]\n\nplt.xticks(x_ticks_3)\n\nplt.plot(x,y)\n\nplt.show()\n","sub_path":"matplotlib_practice/poly_line/set_x_scale.py","file_name":"set_x_scale.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"547168404","text":"class Solution:\n def combinationSum3(self, k: int, n: int) -> List[List[int]]:\n ans=[]\n c=list(range(1,10))\n def backtracker(c,k,target,path):\n if target==0 and k==0 and path not in ans:\n ans.append(path.copy())\n elif target>0:\n for i in range(len(c)):\n if c[i]>target:\n break\n path.append(c[i])\n backtracker(c[i+1:],k-1,target-c[i],path)\n path.pop()\n \n backtracker(c,k,n,[])\n return list(ans)\n","sub_path":"Combination Sum III.py","file_name":"Combination Sum III.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"114618302","text":"import json\nimport boto3\nfrom boto3.dynamodb.conditions import Key\n\n\ndef lambda_handler(event, context):\n election_id = event[\"queryStringParameters\"][\"election_id\"]\n client = boto3.resource(\"dynamodb\")\n table = client.Table(\"electionInfo\")\n\n response = table.get_item(\n Key={\n 'election_id': election_id\n })\n\n if \"Item\" in response:\n json_body = response[\"Item\"]\n else:\n json_body = {}\n\n responseObject = {}\n responseObject['statusCode'] = 200\n responseObject['headers'] = {}\n responseObject['headers']['Content-Type'] = 'application/json'\n responseObject['body'] = json.dumps(json_body)\n\n return responseObject","sub_path":"Lambda/GetElectionById.py","file_name":"GetElectionById.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"253371857","text":"import os\nimport sys\nimport json\nimport spotipy\nimport webbrowser\nimport spotipy.util as util\nfrom colorama import Fore, Style\nfrom json.decoder import JSONDecodeError\n\n# get username from the terminal\nusername = sys.argv[1]\nscope = 'user-read-private user-read-playback-state user-modify-playback-state'\n\n# User ID: 1232420630?si=oJ4jVWLnQiO1O9PdPCtADg\n\n#erase cache and prompt for user permissions\ntry:\n token = util.prompt_for_user_token(username, scope)\nexcept (AttributeError, JSONDecodeError):\n os.remove(f\".cache-{username}\")\n token = util.prompt_for_user_token(username, scope)\n\n# Create our spotifyObject with permissions\nspotifyObject = spotipy.Spotify(auth=token)\n\n#Python prompt\nneutral = Style.RESET_ALL\npyprompt = Fore.RED + Style.BRIGHT + \">>>\" + neutral\n\n# Get current device\ndevices = spotifyObject.devices()\ndeviceList = devices['devices']\njson_data = json.dumps(devices, sort_keys=True, indent=4) # How can this be more efficient?\nitem_dict = json.loads(json_data)\ntotalDevices = len(item_dict['devices']) # Lists the total number of devices connected to spotify\ndeviceDict = {}\n#print(json.dumps(devices, sort_keys=True, indent=4))\n\n# Current track information\ntrack = spotifyObject.current_user_playing_track()\nprint()\n#print(json.dumps(track, sort_keys=True, indent=4))\nartist = track['item']['artists'][0]['name']\ntrack = track['item']['name']\nif artist != \" \":\n print(\"Currently playing \" + track + \" by \" + artist)\n print()\n\n# User information\nuser = spotifyObject.current_user()\ndisplayName = user['display_name']\nfollowers = user['followers']['total']\n\n\n# Prints out json data in a form we can read\n# print(json.dumps(VARIABLE, sort_keys=True, indent=4))\n\nprint(\"Here are your devices connnected to your spotify account:\")\nwhile totalDevices > 0:\n totalDevices -= 1\n deviceID = deviceList[totalDevices]['id']\n deviceName = deviceList[totalDevices]['name']\n deviceDict[deviceName] = deviceID\n print(totalDevices , \" - \", deviceList[totalDevices]['name'])\n\n\n\nwhile True:\n print()\n print(pyprompt + \" Welcome to Spotipy \" + displayName + \"!\")\n print(\"You have \" + str(followers) + \" followers.\")\n print()\n# print(\"Which device would you like to play to?\")\n# deviceSelect = int(input(\"Your device: \"))\n#\n# print(deviceList[deviceSelect]['name'])\n# if deviceSelect != \"\":\n# print()\n# print(\"Okay. We'll play to \" + deviceList[deviceSelect]['name'])\n## else:\n# break\n\n print(\"0 - search for an artist\")\n print(\"1 - exit\")\n print()\n choice = input(pyprompt + \" Your choice: \")\n\n # Search for the artist\n if choice == \"0\":\n print()\n searchQuery = input(pyprompt + \" Ok, what's their name?: \")\n print()\n\n # Get search results\n searchResults = spotifyObject.search(searchQuery, 1, 0, \"artist\")\n # print(json.dumps(searchResults, sort_keys=True, indent=4))\n\n #Artists details\n artist = searchResults['artists']['items'][0]\n print(Fore.CYAN + Style.BRIGHT + artist['name'] + neutral)\n print(str(artist['followers']['total']) + \" followers\")\n print(artist['genres'][0])\n print()\n webbrowser.open(artist['images'][0]['url'])\n artistID = artist['id']\n\n #Album and track details\n trackURIs = []\n trackArt = []\n z = 0\n\n # Extract album data\n albumResults = spotifyObject.artist_albums(artistID)\n #print(json.dumps(albumResults, sort_keys=True, indent=4))\n albumResults = albumResults['items']\n\n for item in albumResults:\n print(Style.BRIGHT + Fore.MAGENTA + \"Album \" + neutral + \"\\033[4m\" + item['name'] + neutral)\n albumID = item['id']\n albumArt = item['images'][0]['url']\n\n\n # Extract track data\n trackResults = spotifyObject.album_tracks(albumID)\n trackResults = trackResults['items']\n\n for item in trackResults:\n print(Style.BRIGHT + Fore.BLUE + str(z) + neutral + \": \" + item['name'])\n trackURIs.append(item['uri'])\n trackArt.append(albumArt)\n z+=1\n print()\n\n #See album art for specific track and play\n while True:\n songSelection = input(pyprompt + \" Enter a song number to see album art (x to exit): \")\n if songSelection == \"x\":\n break\n trackSelectionList = []\n trackSelectionList.append(trackURIs[int(songSelection)])\n spotifyObject.start_playback(deviceID, None, trackSelectionList)\n webbrowser.open(trackArt[int(songSelection)])\n track = spotifyObject.current_user_playing_track()\n artist = track['item']['artists'][0]['name']\n track = track['item']['name']\n if artist != \" \":\n print()\n print(\"Currently playing \" + track + \" by \" + artist)\n print()\n # End the program\n if choice == \"1\":\n break\n","sub_path":"spotifyxx.py","file_name":"spotifyxx.py","file_ext":"py","file_size_in_byte":5013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"195417038","text":"import numpy as np\nimport Direction\n\nclass Grid:\n\n def __init__(self, width, height):\n self.width = width;\n self.height = height;\n # Grid to hold all objects\n self.grid = [[0 for x in range(width)] for y in range(height)];\n self.object_set = set();\n\n # Map of objects to locations.\n self.obj_to_loc_dic = {};\n \n # Initialize grid to empty dictionaries.\n for x in range(width):\n for y in range(height):\n self.grid[y][x] = [];\n\n def get_all_locations(self):\n list = [];\n for x in range(self.width):\n for y in range(self.height):\n list.append((x,y));\n return list;\n\n def add_to_grid(self, obj, loc):\n x, y = loc[0], loc[1];\n self.grid[y][x].append(obj);\n self.obj_to_loc_dic[obj] = loc;\n self.object_set.add(obj);\n\n def get_objs_at(self, loc):\n if self.valid_location(loc):\n return self.grid[loc[1]][loc[0]].copy();\n # Should be exception.\n else: return None;\n\n def valid_location(self, loc):\n return not (loc[0] >= self.width or loc[0] < 0\n or loc[1] >= self.height or loc[1] < 0);\n\n def is_legal_move(self, obj, vec):\n # Get the old location of the object.\n old_loc = self.get_object_location(obj);\n\n # Calculate new vector.\n new_loc = old_loc + vec;\n\n return self.valid_location(new_loc);\n\n def get_possible_moves(self, actor):\n move_set = [];\n for direction in Direction.directions:\n if self.is_legal_move(actor, direction):\n move_set.append(direction);\n\n return move_set;\n \n \n def move_object(self, obj, vec):\n\n # Get the old location of the object.\n old_loc = self.obj_to_loc_dic[obj];\n\n # Calculate new vector.\n new_loc = old_loc + vec;\n\n if not self.valid_location(new_loc):\n print('Not a valid location');\n return -1;\n\n #print('New Vec: ' + str(new_loc));\n\n x, y = old_loc[0], old_loc[1];\n\n # Remove the object from the grid.\n self.grid[y][x].remove(obj);\n\n # Add the object to the grid in the new location.\n self.add_to_grid(obj, new_loc);\n\n def get_object_location(self, obj):\n return self.obj_to_loc_dic[obj];\n\n def observe_loc(self, stats, loc):\n loc_data = self.get_objs_at(loc);\n if stats.roll('iq', 0):\n print('Observed: ' + str(loc_data));\n return loc_data;\n else:\n num_observations= np.random.random_integers(0, len(loc_data));\n print('Observed nothing.');\n return [];\n\n def get_object_loc_mapping(self):\n loc_obj_map = {};\n for obj in self.object_set:\n loc = Direction.to_tuple(self.obj_to_loc_dic[obj]);\n if loc in loc_obj_map:\n loc_obj_map[loc].append(obj);\n else:\n loc_obj_map[loc] = [obj];\n return loc_obj_map;\n\n def get_object_list(self):\n obj_list = [];\n for obj in self.object_set:\n obj_list.append(obj)\n return obj_list;\n","sub_path":"Grid.py","file_name":"Grid.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"612663495","text":"from kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.screenmanager import Screen\nfrom kivy.uix.widget import Widget\nfrom kivy.core.window import Window\nfrom kivy.clock import Clock\nfrom kivy.graphics import Rectangle, Color, Canvas, Ellipse\n\n\ncolors = (\n (1, 0, 0), # RED\n (0, 1, 0), # GREEN\n (0, 0, 1), # BLUE\n (1, 1, 0), # YELLOW\n (1, 1, 1) # WHITE\n)\n\nSTART_POSITION = (0, 0)\nHOST_COLOR_ID = 0\n\n# Do testow\npositions = [(0, 0), (0, 100), (0, 200)]\npositions2 = [(300, 60), (20, 450), (600, 200)]\npositions3 = [(5, 4), (3, 8), (300, 300)]\npos = [positions, positions2, positions3]\n\n\ndef read_position(position):\n x, y = position.split('.')\n pos = (int(x), int(y))\n return pos\n\n\nclass GameLoopHost(GridLayout):\n def __init__(self, host, **kwargs):\n super(GameLoopHost, self).__init__(**kwargs)\n\n with self.canvas:\n self.background = Rectangle(pos=self.pos, size=(Window.width, Window.height), source='forest.jpg')\n\n self.host = host\n\n self.players_widgets = {}\n players = self.host.players\n self.players_widgets[self.host.username] = (Player(self.host.username, HOST_COLOR_ID, START_POSITION))\n\n for team in players:\n for player in players[team]:\n color_id = team\n self.players_widgets[player] = (Player(player, color_id, START_POSITION))\n\n for player_widget in self.players_widgets:\n self.add_widget(self.players_widgets[player_widget])\n\n Clock.schedule_interval(self.transfer_data, 1)\n\n def transfer_data(self, dt):\n positions = self.host.send_and_collect_data()\n\n self.players_widgets[self.host.username].update(read_position(positions[0]))\n\n for i in range(int(self.host.teams_no)):\n team_positions = positions[i + 1]\n\n for username in team_positions:\n position = team_positions[username]\n\n if position is None:\n position = \"0.0\"\n\n self.players_widgets[username].update(read_position(position))\n\n\nclass GameLoopClient(GridLayout):\n def __init__(self, client, **kwargs):\n super(GameLoopClient, self).__init__(**kwargs)\n with self.canvas:\n self.background = Rectangle(pos=self.pos, size=(Window.width, Window.height), source='forest.jpg')\n\n self.client = client\n\n self.players_widgets = {}\n\n positions = self.client.send_and_collect_data()\n\n for player in positions:\n self.players_widgets[player] = (Player(player, 0, (0, 0)))\n\n for player_widget in self.players_widgets:\n self.add_widget(self.players_widgets[player_widget])\n\n Clock.schedule_interval(self.transfer_data, 1)\n\n def transfer_data(self, dt):\n\n data = self.client.send_and_collect_data()\n\n if not data:\n print(\"LOST CONNECTION\")\n else:\n for username in data:\n position = data[username]\n if position is None:\n position = \"0.0\"\n self.players_widgets[username].update(read_position(position))\n\n\nclass Player(Widget):\n def __init__(self, username, color, position, **kwargs):\n super(Player, self).__init__(**kwargs)\n\n self.username = username\n self.position = position\n self.color = colors[color]\n\n with self.canvas:\n Color(self.color[0], self.color[1], self.color[2])\n self.rect = Ellipse(group='a', pos=self.position, size=(50, 50))\n print(self.username, self.color)\n\n def update(self, position):\n self.position = position\n self.rect.pos = self.position\n\n\nclass GameLoopScreen(Screen):\n def __init__(self, host, **kwargs):\n super().__init__(**kwargs)\n\n self.name = \"gameloop\"\n self.add_widget(GameLoopHost(host))\n\n\nclass GameLoopClientScreen(Screen):\n def __init__(self, client, **kwargs):\n super().__init__(**kwargs)\n self.name = \"gameloopclient\"\n self.add_widget(GameLoopClient(client))\n\n","sub_path":"project/gameloop.py","file_name":"gameloop.py","file_ext":"py","file_size_in_byte":4069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"573786751","text":"import numpy as np\nimport copy\n\n\nclass SlidingTilePuzzle(object):\n def __init__(self, puzzle_size=9):\n self.puzzle_size = puzzle_size\n self.goal_state = np.arange(self.puzzle_size, dtype=np.int8)\n # self.start_state = start_state\n # self.current_state = self.start_state\n self.swapable_idx_lst = [\n [1, 3],\n [0, 2, 4],\n [1, 5],\n [0, 4, 6],\n [1, 3, 5, 7],\n [2, 4, 8],\n [3, 7],\n [4, 6, 8],\n [7, 5]\n ]\n\n def next_states(self, current_state):\n # identify the position of 0\n zero_idx = np.argwhere(current_state == 0).squeeze()\n next_s_lst_ = []\n for idx in self.swapable_idx_lst[zero_idx]:\n next_s_ = np.copy(current_state)\n next_s_[zero_idx] = current_state[idx]\n next_s_[idx] = current_state[zero_idx]\n next_s_lst_.append(next_s_)\n return next_s_lst_\n\n def _swap(self, s_, i, j):\n s_copy_ = copy.copy(s_)\n tmp = s_copy_[j]\n s_copy_[j] = s_[i]\n s_copy_[i] = tmp\n return s_copy_\n\n def _rank(self, n, s_, s_inv):\n if n == 1:\n return 0\n tmp = s_[n - 1]\n s_ = self._swap(s_, n - 1, s_inv[n - 1])\n s_inv = self._swap(s_inv, tmp, n - 1)\n print(tmp)\n return tmp + n * self._rank(n - 1, s_, s_inv)\n\n def rank(self, s_):\n s_inv = [i for i in s_]\n rank_ = self._rank(self.puzzle_size, s_, s_inv)\n return rank_\n\n\nif __name__ == '__main__':\n stp = SlidingTilePuzzle()\n s = np.random.choice(9, 9, replace=False)\n # print(s.reshape((3, 3)))\n # print('-' * 8)\n next_s_lst = stp.next_states(s)\n for next_s in next_s_lst:\n pass\n # print(next_s.reshape(3, 3))\n print(stp.rank(list(range(stp.puzzle_size))))\n","sub_path":"envs/sliding_tile_puzzle.py","file_name":"sliding_tile_puzzle.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"483919623","text":"import urllib.request\r\nimport math\r\nimport re\r\nPenple=[]\r\ndef getHtml(url):\r\n\tdata=None;\r\n\theaders = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36'}\r\n\treq=urllib.request.Request(url,data,headers)\r\n\tresponse=urllib.request.urlopen(req)\r\n\thtml=response.read()\r\n\treturn html\r\ndef getName(html):\r\n\treg=r' (.+?) ((.+?))'\r\n\tnameRe=re.compile(reg)\r\n\tnameCon=re.findall(nameRe,html.decode('gbk',\"ignore\"))\r\n\treturn nameCon\r\ndef getIntro(html):\r\n\treg = r'(.+?)
    '\r\n\tintroRe=re.compile(reg)\r\n\tintroCon=re.findall(introRe,html.decode('gbk',\"ignore\"))\r\n\treturn introCon\r\nfor var in range(1192100,1192300):\r\n\tk=\"http://www.studentsoftheworld.info/penpals/mypage.php?REF=\"+str(var)\r\n\thtml=getHtml(k)\r\n\tnoData=re.search(\"Sorry, page not found\",html.decode('gbk',\"ignore\"))\r\n\tif not noData:\r\n\t\tk1=getName(html)\r\n\t\tprint(\"Code:\"+str(var))\r\n\t\tfor item in k1:\r\n\t\t\tprint(item[0],item[1])\r\n\t\t\tif len(getIntro(html))!=0:\r\n\t\t\t\tprint(getIntro(html)[0]+'\\n')\r\n\t\t\telse:\r\n\t\t\t\tprint(\"No introduce\"+'\\n')\r\n","sub_path":"PenpleSpider.py","file_name":"PenpleSpider.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"248044251","text":"import logging\nfrom time import localtime, time, strftime, sleep\n\n\nclass CallingInfo():\n def __init__(self, name):\n log = logging.getLogger(name=name)\n log.setLevel(logging.INFO)\n fh = logging.FileHandler(name + '.log')\n log.addHandler(fh)\n log.info('Start'.center(50, '-'))\n\n self.log = log\n self.formatter = '%(func)s -> [%(time)s - %(used)s - %(ncalls)s]'\n\n def info(self, func):\n def wrapper(*args, **kwargs):\n wrapper.ncalls += 1\n lt = localtime()\n start = time()\n res = func(*args, **kwargs)\n used = time() - start\n info = {}\n info['func'] = func.__name__\n info['time'] = strftime('%x %X', lt)\n info['used'] = used\n info['ncalls'] = wrapper.ncalls\n msg = self.formatter % info\n self.log.info(msg)\n return res\n\n wrapper.ncalls = 0\n return wrapper\n\n def setFormatter(self, formatter): # 设置 formatter 格式\n self.formatter = formatter\n\n def turnOn(self): # 打开记录(通过调低记录级别)\n self.log.setLevel(logging.INFO)\n\n def turnOff(self): # 关闭记录(通过调高记录级别)\n self.log.setLevel(logging.WARN)\n\n\n# 核心思想其实是,为了程序更加的简单明了。需要对编程思想以及代码进行一些改写。\n# 首先是在类的 __init__ 方法中,事先定义一系列的参数和操作,对类的实例进行初始化。\n# 然后再定义主要的方法(这个例子中,就是 info 方法),同时定义一些其他的用于辅助的方法,\n# 以后开发时,应该以类为主,先写类,然后写类中的方法,这样的话,简单,便于修改与操作。\n\n\n\nc1 = CallingInfo('log1')\nc2 = CallingInfo('log2')\n\n\n@c1.info\ndef f():\n print('f')\n\n\n@c1.info\ndef g():\n print('g')\n\n\n@c2.info\ndef h():\n print('h')\n\n\nfrom random import choice\n\nfor _ in range(50):\n choice([f, g, h])()\n sleep(choice([0.5, 1, 1.5]))\n","sub_path":"basis_python_efficient/p8_装饰器使用技巧进阶训练/py5_如何在类中定义装饰器.py","file_name":"py5_如何在类中定义装饰器.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"165603298","text":"from django.conf.urls import url\nfrom django.shortcuts import render, redirect, HttpResponse\nfrom django.http import JsonResponse\nfrom django.db.models import Q\nfrom django import forms\nfrom django.db.models import OneToOneField, ForeignKey, ManyToManyField\nimport datetime\nfrom django.contrib.auth.hashers import make_password\n# import locale\nfrom Myutils.pageutil import Page\n\n# locale.setlocale(locale.LC_CTYPE, 'chinese')\n\n\nclass ModelXAdmin(object):\n \"\"\"\n 封装给定模型的所有管理选项和功能\n \"\"\"\n field_names = []\n\n def __init__(self, model, site):\n self.model = model\n self.x_admin_site = site\n if self.field_names:\n self.fields = []\n # 借助python的特性实现的查找功能,时间复杂度为O(n)\n for field in self.model._meta.fields:\n if field.name == self.field_names[0]:\n self.fields.append(field)\n self.field_names.pop()\n else:\n self.fields = [field for field in self.model._meta.fields]\n self.cross_table_fields = []\n for field in self.fields:\n if isinstance(field, ManyToManyField) or isinstance(field, OneToOneField) or isinstance(field, ForeignKey):\n self.cross_table_fields.append(field)\n\n def list_view(self, request):\n current_app_label = self.model._meta.app_label\n current_model_name = self.model._meta.model_name\n qs = self.model.objects.all()\n data_list = []\n for obj in qs:\n data = []\n for field in self.fields:\n if field.name.find(\"password\") != -1:\n continue\n data.append(getattr(obj, field.name))\n data_list.append(data)\n field_names = []\n for field in self.fields:\n if field.name.find(\"password\") != -1:\n continue\n field_names.append(field.verbose_name)\n page = Page(data_list, request, per_page=6)\n sum = page.Sum()\n return render(\n request,\n \"xadmin/list_view.html\",\n {\n \"current_url\": current_app_label + \"/\" + current_model_name + \"/\",\n \"model_name\": self.model._meta.model_name,\n \"data_list\": sum[0],\n \"page_html\": sum[1],\n \"field_names\": field_names\n }\n )\n\n def get_form(self, request=None, instance=None):\n \"\"\"\n 用于获取一个XAdminFrom对象\n\n\n\n :param request: 当前请求\n :param instance: model类的对象\n :return: 一个XAdminFrom对象\n \"\"\"\n\n class XAdminFrom(forms.ModelForm):\n class Meta:\n model = self.model\n fields = [field.name for field in self.fields]\n\n if request and instance:\n form = XAdminFrom(data=request.POST, instance=instance)\n elif request:\n form = XAdminFrom(data=request.POST)\n elif instance:\n form = XAdminFrom(instance=instance)\n else:\n form = XAdminFrom()\n return form\n\n def add(self, request):\n cross_tables_info = []\n for field in self.cross_table_fields:\n cross_tables_info.append({\n \"app_label\": getattr(self.model, field.name).field.remote_field.model._meta.app_label,\n \"model_name\": getattr(self.model, field.name).field.remote_field.model._meta.model_name,\n \"verbose_name\": field.verbose_name\n })\n current_app_label = self.model._meta.app_label\n current_model_name = self.model._meta.model_name\n form = self.get_form()\n if request.method == \"POST\":\n form = self.get_form(request)\n if form.is_valid():\n password = form.cleaned_data.get(\"password\")\n form.save(commit=False)\n for field in self.fields:\n if \"password\" in field.name:\n setattr(form.instance, field.name, make_password(password))\n form.instance.save()\n form.save_m2m()\n prev_url = request.GET.get(\"prev_url\")\n if prev_url:\n return redirect(prev_url)\n return redirect(\"/xadmin/\" + current_app_label + \"/\" + current_model_name + \"/\")\n return render(request, \"xadmin/update_view.html\", locals())\n\n def update(self, request, pk):\n cross_tables_info = []\n for field in self.cross_table_fields:\n cross_tables_info.append({\n \"app_label\": getattr(self.model, field.name).field.remote_field.model._meta.app_label,\n \"model_name\": getattr(self.model, field.name).field.remote_field.model._meta.model_name,\n \"verbose_name\": field.verbose_name\n })\n current_app_label = self.model._meta.app_label\n current_model_name = self.model._meta.model_name\n qs = self.model.objects.filter(pk=pk).first()\n form = self.get_form(instance=qs)\n if request.method == \"POST\":\n form = self.get_form(request=request, instance=qs)\n if form.is_valid():\n password = form.cleaned_data.get(\"password\")\n form.save(commit=False)\n for field in self.fields:\n if \"password\" in field.name:\n setattr(form.instance, field.name, make_password(password))\n form.instance.save()\n form.save_m2m()\n prev_url = request.GET.get(\"prev_url\")\n if prev_url:\n return redirect(prev_url)\n return redirect(\"/xadmin/\" + current_app_label + \"/\" + current_model_name + \"/\")\n return render(request, \"xadmin/update_view.html\", locals())\n\n def delete(self, request, pk=None):\n if request.is_ajax():\n delete_id_list = request.POST.getlist(\"delete_id_list\")\n if delete_id_list:\n for delete_id in delete_id_list:\n self.model.objects.filter(pk=delete_id).delete()\n return JsonResponse({\"status\": True})\n self.model.objects.filter(pk=pk).delete()\n return JsonResponse({\"status\": True})\n\n def search_data(self, request):\n if request.is_ajax():\n keyword = request.POST.get(\"keyword\")\n ret = {\"status\": False, \"html\": \"\"}\n if not len(keyword):\n return JsonResponse(ret)\n ret[\"status\"] = True\n q = Q()\n q.connector = \"or\"\n for field in self.fields:\n # 思路2存在的问题的解决方法:\n if field in self.cross_table_fields:\n continue\n else:\n q.children.append((field.name + \"__icontains\", keyword))\n # 此处代码应该可以优化。\n # 问题:如何查询跨表字段中是否包含keyword\n # 思路1:在每个model中将__str__中返回的字段作为一个类属性,然后在此处处理时就会很轻松\n # 思路2(解决方法):先从表中查询所有的数据,然后对跨表的数据进行判断,返回判断为True的model对象\n # 思路2存在的问题:需要查询一遍整个表,虽然QuerySet的惰性机制和缓存机制已经减少了很多冗余的操作\n qs = []\n for obj in self.model.objects.filter(q):\n qs.append(obj)\n for obj in self.model.objects.all():\n for field in self.cross_table_fields:\n if getattr(obj, field.name).__str__().find(keyword) != -1 and obj not in qs:\n qs.append(obj)\n data_list = []\n for obj in qs:\n data = []\n for field in self.fields:\n if field.name.find(\"password\") == -1:\n data.append(getattr(obj, field.name))\n data_list.append(data)\n for data in data_list:\n ret[\"html\"] += \"\"\"\n \n \n \n \n \"\"\"\n for item in data:\n # 此处为返回前端的数据进行过滤\n item = item[:40:] if isinstance(item, str) else item\n # item = item.strftime(\"%Y年%m月%d日 %H:%M\") if isinstance(item, datetime.datetime) else item\n # try:\n # item = item.strftime(\"%Y{0}%m{1}%d{2} %H:%M\".format(\"年\", \"月\", \"日\"))\n # except Exception as e:\n # item = item\n # print(e)\n ret[\"html\"] += \"\"\"\n \n {}\n \n \"\"\".format(item)\n ret[\"html\"] += \"\"\"\n \n 修改\n \n \n \n \"\"\".format(data[0])\n return JsonResponse(ret)\n\n def get_urls(self):\n temp = []\n temp.append(url(r'^$', self.list_view))\n temp.append(url(r'add/$', self.add))\n temp.append(url(r'^(\\d+)/update/$', self.update))\n temp.append(url(r'^(\\d+)/delete/$', self.delete))\n temp.append(url(r'^batch_delete/$', self.delete))\n temp.append(url(r'^search_data/$', self.search_data))\n return temp\n\n @property\n def urls2(self):\n return self.get_urls(), None, None\n\n\nclass XAdminSite(object):\n \"\"\"\n 自己定义的一个类似于django的\n AdminSet\n 用于扩展django的admin模块的功能\n \"\"\"\n\n def __init__(self):\n self._registry = {} # model_class class -> admin_class instance\n\n def register(self, model, admin_class=None):\n \"\"\"\n 用指定的管理类注册给定的app.models的类\n 被注册的应该是一个类,而不是一个对象\n 如果没有给出管理类,它将使用默认的ModelXAdmin作为管理类\n 被注册的类不应该是一个抽象类\n \"\"\"\n if not admin_class:\n admin_class = ModelXAdmin\n self._registry[model] = admin_class(model, self)\n\n def get_urls(self):\n temp = []\n for model, admin_class in self._registry.items():\n app_name = model._meta.app_label\n model_name = model._meta.model_name\n temp.append(url(r'^{0}/{1}/'.format(app_name, model_name), admin_class.urls2))\n temp.append(url(r'^$', self.index))\n temp.append(url(r'^search_models/', self.search_models))\n return temp\n\n @property\n def urls(self):\n return self.get_urls(), None, None\n\n def index(self, request):\n links = []\n for model in self._registry.keys():\n links.append(model._meta.app_label + \"/\" + model._meta.model_name)\n return render(request, \"xadmin/xadmin_index.html\", locals())\n\n def search_models(self, request):\n if request.is_ajax():\n keyword = request.POST.get(\"keyword\")\n ret = {\"status\": False, \"html\": \"\"}\n if not len(keyword):\n return JsonResponse(ret)\n ret[\"status\"] = True\n for model in self._registry.keys():\n model_name = model._meta.model_name\n app_label = model._meta.app_label\n if model_name.find(keyword) != -1 or app_label.find(keyword) != -1:\n ret[\"html\"] += \"\"\"\n \n \n {0}/{1}\n \n \n \"\"\".format(app_label, model_name)\n return JsonResponse(ret)\n\n\nx_admin_site = XAdminSite()\n","sub_path":"xadmin/service/xadmin.py","file_name":"xadmin.py","file_ext":"py","file_size_in_byte":12152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"115083380","text":"#!/usr/bin/env python\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport test\n\n# HTTPRequestHandler class\nclass testHTTPServer_RequestHandler(BaseHTTPRequestHandler):\n\n # GET\n def do_GET(self):\n filename = test.getme()\n # Send response status code\n self.send_response(200)\n\n # Send headers\n self.send_header('Content-type','text/html')\n self.end_headers()\n\n with open('index.html', 'r') as myfile:\n data=myfile.read().replace('\\n', '')\n # Send message back to client\n message = 'Hello'\n # Write content as utf-8 data\n self.wfile.write(bytes(data, \"utf8\"))\n return\n\ndef run():\n print('starting server...')\n\n # Server settings\n # Choose port 8080, for port 80, which is normally used for a http server, you need root access\n server_address = ('0.0.0.0', 8081)\n httpd = HTTPServer(server_address, testHTTPServer_RequestHandler)\n print('running server...')\n httpd.serve_forever()\n\n\nrun()\n","sub_path":"reports/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"60534362","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def deleteDuplicates(self, head: ListNode) -> ListNode:\n # Cleaner solution\n cur = head\n while cur and cur.next != None:\n if cur.next.val == cur.val:\n cur.next = cur.next.next\n else:\n cur = cur.next\n return head\n\n # My solution\n # main logic took about 11 mins\n # but then found error and took an aditional 15 min to debug\n # 'cur = prev ' was the solution\n cur = head\n while cur and cur.next != None:\n prev = cur\n cur = cur.next\n if cur.val == prev.val:\n prev.next = cur.next\n cur = prev\n return head","sub_path":"0-99/83_remove_duplicates_from_sorted_list.py","file_name":"83_remove_duplicates_from_sorted_list.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"639392555","text":"\"\"\"User Action - init\n\nRevision ID: 21c2572c7728\nRevises: 6481968513d0\nCreate Date: 2019-09-23 20:26:42.979666\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '21c2572c7728'\ndown_revision = '6481968513d0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('action_type',\n sa.Column('create_at', sa.TIMESTAMP(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),\n sa.Column('update_at', sa.TIMESTAMP(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=64), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('video_type',\n sa.Column('create_at', sa.TIMESTAMP(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),\n sa.Column('update_at', sa.TIMESTAMP(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=64), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('video',\n sa.Column('create_at', sa.TIMESTAMP(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),\n sa.Column('update_at', sa.TIMESTAMP(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),\n sa.Column('id', sa.String(length=64), nullable=False),\n sa.Column('name', sa.String(length=64), nullable=True),\n sa.Column('fan_page_id', sa.String(length=64), nullable=True),\n sa.Column('video_type_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['video_type_id'], ['video_type.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('user_action',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('create_at', sa.TIMESTAMP(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),\n sa.Column('update_at', sa.TIMESTAMP(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True),\n sa.Column('user_id', sa.String(length=64), nullable=True),\n sa.Column('action_type_id', sa.Integer(), nullable=True),\n sa.Column('video_id', sa.String(length=64), nullable=True),\n sa.Column('video_time', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['action_type_id'], ['action_type.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['video_id'], ['video.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('user_action')\n op.drop_table('video')\n op.drop_table('video_type')\n op.drop_table('action_type')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/21c2572c7728_user_action_init.py","file_name":"21c2572c7728_user_action_init.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"129994395","text":"# coding=utf-8\n\nimport numpy as np\nfrom sklearn import neighbors\n\nfrom demo1 import file2matrix\n\ndata_set, labels = file2matrix(\"dataSet.txt\")\ntraining_set = data_set[200:, :]\ntraining_labels = np.array(labels[200:])\ntesting_set = data_set[:200, :]\ntesting_labels = np.array(labels[:200])\n\nclf = neighbors.KNeighborsClassifier(n_neighbors=3)\nclf.fit(training_set, training_labels)\n\npredicted_label = clf.predict(testing_set)\nprint(\"predicted label:\\n\", predicted_label)\n\nscore = clf.score(testing_set, testing_labels)\nprint(\"score: %f\" % score)\n\nprint(clf.predict_proba(testing_set))\n","sub_path":"NearestNeighbors/dating_demos/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"487373414","text":"# Import numpy and cv2 package\nimport cv2\nimport numpy as np\n\n# Reading original image from the disk as grayscale image\nimg= cv2.imread('low_illum.jpg',0)\n\n#Smoothening the image using different kernel size and sigmaX & sigmaY\nG_smooth1= cv2.GaussianBlur(src=img, ksize=(3,3),sigmaX=3,sigmaY=3)\n\nG_smooth2= cv2.GaussianBlur(src=img, ksize=(3,3),sigmaX=10,sigmaY=10)\n\nG_smooth3= cv2.GaussianBlur(src=img, ksize=(5,5),sigmaX=3,sigmaY=3)\n\n# Displaying the original image and all the output images using different kernel and sigmaX & sigmaY, together\nimg1= np.concatenate((img,G_smooth1,G_smooth2,G_smooth3), axis=1)\ncv2.imshow('Problem6_Output',img1)\n\n# Display the images and wait till any key is pressed\ncv2.waitKey(0)\n# Destroy all the windows created by the imshow() function of the OpenCV\ncv2.destroyAllWindows()","sub_path":"OpenCv-Assignment1/Group23Team46Assignment1/Code/Prob6.py","file_name":"Prob6.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"393421505","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 13 18:18:51 2019\n\n@author: xiaoxuan\n\"\"\"\n\nimport gurobipy as gp\n\n# Sets\nPorts = ['Manly','Cleveland','Dunwich']\nP = range(len(Ports))\nB = range(18)\nCap = [8,8,6]\n\n# Data\nTravel = [\n\t[29, 27, 21], [39, 18, 30], [40, 20, 31], [33, 19, 27], [35, 29, 36], [21, 23, 20],\n\t[30, 41, 32], [37, 27, 36], [20, 25, 34], [36, 28, 20], [24, 23, 25], [38, 22, 40], \n\t[39, 19, 27], [30, 18, 28], [40, 20, 32], [21, 32, 40], [23, 18, 20], [31, 18, 20]\n]\n\n# Question a\nm1 = gp.Model('Boats a')\n\nX1 = {}\nfor b in B:\n for p in P:\n X1[(b,p)] = m1.addVar(vtype = gp.GRB.BINARY)\n\nm1.setObjective(gp.quicksum(Travel[b][p]*X1[(b,p)] for b in B for p in P), gp.GRB.MINIMIZE)\n\n# Constraints\nfor b in B:\n m1.addConstr(gp.quicksum(X1[b,p] for p in P) == 1)\n \nfor p in P:\n m1.addConstr(gp.quicksum(X1[b,p] for b in B) <= Cap[p])\n\nm1.optimize()\n\n# Results\nprint(\"Total time: \", m1.objVal)\nprint(\"Maximum time: \", max(Travel[b][p] for b in B for p in P if X1[b,p].x == 1))\n\n# Question b\nm2 = gp.Model('Boats b')\n\nX2 = {}\nfor b in B:\n for p in P:\n X2[(b,p)] = m2.addVar(vtype = gp.GRB.BINARY)\n \n# MiniMax Objective Function\nZ = m2.addVar()\nm2.setObjective(Z, gp.GRB.MINIMIZE)\n\n# Constraints\nfor b in B:\n m2.addConstr(gp.quicksum(X2[b,p] for p in P) == 1)\n m2.addConstr(Z >= gp.quicksum(Travel[b][p]*X2[b,p] for p in P))\n \nfor p in P:\n m2.addConstr(gp.quicksum(X2[b,p] for b in B) <= Cap[p])\n \nm2.optimize()\n\n# Results\nprint(\"Total time: \", gp.quicksum(Travel[b][p]*X2[b,p] for b in B for p in P).getValue())\nprint(\"Maximum time: \", m2.objVal)","sub_path":"2-Integer_Programming/Boats.py","file_name":"Boats.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"387106833","text":"# import image utilities\nimport os\nimport numpy as np\nimport cv2\n\nimages_path = './images/temp'\n\n\n# define a function that rotates images in the current directory\n# given the rotation in degrees as a parameter\ndef rotateImages():\n for f_name in os.listdir(images_path):\n file_path = os.path.normpath(os.path.join(images_path, f_name))\n img = cv2.imread(file_path)\n img_rotate_90_clockwise = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)\n cv2.imwrite(file_path, img_rotate_90_clockwise)\n\nrotateImages()\n","sub_path":"TensorFlow-2.0/workspace/training/rotate.py","file_name":"rotate.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"563128306","text":"\n\nimport appium\nimport selenium\nimport selenium.webdriver.firefox.options\nimport os\n\nbrowsers = {'Windows 10':\n {'internet explorer': ['11.285'], 'MicrosoftEdge': ['18.17763'], 'chrome': ['71.0'], 'firefox': ['64.0']\n },\n \"macOS 10.14\":\n {'chrome': ['71.0'], 'firefox': ['64.0'], 'safari': ['12.0']}\n\n }\n\n\ndef desktop():\n\n caps = []\n for platform, brows in browsers.items():\n for browser, versions in brows.items():\n for vers in versions:\n cap = {'platform': platform,\n 'browserName': browser, 'version': vers}\n\n caps.append(\n {'driver_type': 'desktop',\n 'capabilities': cap})\n\n return caps\n\n\ndef local():\n options = selenium.webdriver.firefox.options.Options()\n options.set_headless(True)\n\n driver = selenium.webdriver.Firefox(options=options)\n caps = {}\n caps['browserName'] = \"firefox\"\n caps['platform'] = \"macOS 10.14\"\n return [{'driver_type': 'local', 'capabilities': caps}]\n\n\ndef mobile():\n\n caps = {}\n caps['browserName'] = \"Safari\"\n caps['appiumVersion'] = \"1.9.1\"\n caps['deviceName'] = \"iPhone XS Simulator\"\n caps['deviceOrientation'] = \"portrait\"\n caps['platformVersion'] = \"12.0\"\n caps['platformName'] = \"iOS\"\n return [{'capabilities': caps, 'driver_type': 'app'}]\n\n\ndef get_credentials():\n return {'username': os.environ['SAUCE_USERNAME'], 'access_key': os.environ['SAUCE_ACCESS_KEY']}\n\n\ndef start_driver(env, capabilities):\n if env == 'app':\n return app(capabilities)\n elif env == 'desktop':\n return remote(capabilities)\n else:\n options = selenium.webdriver.firefox.options.Options()\n options.set_headless(True)\n\n return selenium.webdriver.Firefox(options=options)\n\n\ndef remote(desired_cap):\n creds = get_credentials()\n driver = selenium.webdriver.Remote(\n command_executor='http://{username}:{key}@ondemand.saucelabs.com:80/wd/hub'.format(\n username=creds['username'],\n key=creds['access_key']),\n desired_capabilities=desired_cap)\n\n return driver\n\n\ndef app(desired_cap):\n creds = get_credentials()\n driver = appium.webdriver.Remote(\n command_executor='http://{username}:{key}@ondemand.saucelabs.com:80/wd/hub'.format(\n username=creds['username'],\n key=creds['access_key']),\n desired_capabilities=desired_cap)\n\n return driver\n","sub_path":"tests/automation/environments.py","file_name":"environments.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"155178255","text":"import time\r\nimport string\r\ndef greeting():\r\n print(\"Hello, let's encode or decode your message!\")\r\n time.sleep(.75)\r\n encode_decode = \"\"\r\n while encode_decode != 'encode' or 'decode':\r\n encode_decode = input(\"Do you want to encode or decode a message: \")\r\n if encode_decode.lower() == 'encode':\r\n return encode_cipher()\r\n elif encode_decode.lower() == 'decode':\r\n return decode_cipher()\r\n else:\r\n print(\"seems like you mistyped: \")\r\n\r\ndef encode_cipher():\r\n rule = \"Keep your cipher under 100 characters, and do not use spaces or symbols.\\n\"\r\n message = \"\"\r\n cipher = []\r\n alphabet = string.ascii_letters + string.digits\r\n while message == \"\":\r\n print(rule)\r\n message = input(\"What message would you like to encode: \")\r\n message = message.lower()\r\n print(\"\")\r\n if len(message) > 100:\r\n message = \"\"\r\n for letter in message:\r\n if letter not in alphabet:\r\n message = \"\"\r\n print(\"You had a letter not in the alphabet or symbol that could not be encoded.\")\r\n print(\"\")\r\n shift_number = 0\r\n while shift_number == 0:\r\n shift_number = input(\"(type only integers: 1, 2, 3... etc.)\\nYour number must be less than 26 \\nHow many characters would you like to shift: \")\r\n try:\r\n shift_number = int(shift_number)\r\n except:\r\n print(\"Positive Whole Numbers Only\")\r\n if not isinstance(shift_number, int):\r\n shift_number = 0\r\n if shift_number > 25:\r\n shift_number = 0\r\n alphabet = list(alphabet)\r\n for position in range(len(message)):\r\n char = message[position]\r\n for letter in alphabet:\r\n if letter == char:\r\n index = alphabet.index(letter)\r\n cipher_block = (index + shift_number - 1)\r\n cipher.append(alphabet[cipher_block])\r\n\r\n print(\"Your cipher is: \" + \"\".join(cipher))\r\n\r\ndef decode_cipher():\r\n alphabet = (string.ascii_letters + string.digits)\r\n rule = \"Your cipher should be under 100 characters, and does not use spaces or symbols.\\n\"\r\n message = \"\"\r\n cipher = []\r\n while message == \"\":\r\n print(rule)\r\n message = input(\"What cipher would you like to decode: \")\r\n print(\"\")\r\n if len(message) > 100:\r\n message = \"\"\r\n for letter in message:\r\n if letter not in alphabet:\r\n message = \"\"\r\n print(\"You had a letter not in the alphabet or symbol that could not be decoded.\")\r\n print(\"\")\r\n shift_number = 0\r\n while shift_number == 0:\r\n shift_number = input(\"What cipher block was used with your cipher?\\n(only type a whole number) : \")\r\n try:\r\n shift_number = int(shift_number)\r\n except:\r\n print(\"Positive Whole Numbers Only\")\r\n if not isinstance(shift_number, int):\r\n shift_number = 0\r\n if shift_number > 25:\r\n print(\"Number must be less than 26... are you sure you have the right cipher block?\")\r\n shift_number = 0\r\n alphabet = list(alphabet)\r\n for position in range(len(message)):\r\n char = message[position]\r\n for letter in alphabet:\r\n if letter == char:\r\n index = alphabet.index(letter)\r\n cipher_block = (index - shift_number + 1)\r\n cipher.append(alphabet[cipher_block])\r\n\r\n print(\"Your message is: \" + \"\".join(cipher))\r\n\r\ngreeting()\r\n","sub_path":"caesarCipher.py","file_name":"caesarCipher.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"517178671","text":"###########################################\n#\n# Converts the user_tweet files, preprocesses them and saves the information to SOLR\n#\n###########################################\nfrom setup_api import setup_api\nfrom content_remove_stopwords import content_remove_stopwords\nfrom read_user_stance_tweets_tokenized_pickles import read_user_stance_tweets_tokenized_pickles\nfrom nltk.tokenize import TweetTokenizer\nimport nltk\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport os\nimport json\nfrom tweepy import Status\nimport pysolr\nfrom flatten_json import flatten\nimport pickle\nfrom urllib.request import urlopen\nimport simplejson\nimport botometer\n\ndef load_seed_user():\n # reads the user_tweets for the seed user files\n seeduser_tweets_contratrump = read_user_stance_tweets_tokenized_pickles('seedusers_contratrump.txt')\n seeduser_tweets_protrump = read_user_stance_tweets_tokenized_pickles('seedusers_protrump.txt')\n \n user_tweets_joined_contratrump = []\n \n # need to convert the tweet array of each user to a \"document\"\n for user_tweet in seeduser_tweets_contratrump:\n user_tweet_joined = ''\n for tweet in user_tweet:\n user_tweet_joined += tweet\n user_tweet_joined += ' '\n \n user_tweets_joined_contratrump.append(user_tweet_joined)\n \n # need to convert all seeduser documents to ONE seeduser document for contratrump\n joined_seeduser_contratrump_tweets = ' '.join(user_tweets_joined_contratrump)\n \n user_tweets_joined_protrump = []\n \n # need to convert the tweet array of each user to a \"document\"\n for user_tweet in seeduser_tweets_protrump:\n user_tweet_joined = ''\n for tweet in user_tweet:\n user_tweet_joined += tweet\n user_tweet_joined += ' '\n \n user_tweets_joined_protrump.append(user_tweet_joined)\n \n # need to convert all seeduser documents to ONE seeduser document for PROTRUMP\n joined_seeduser_protrump_tweets = ' '.join(user_tweets_joined_protrump)\n \n # first column of the matrix depicts contratrump users, second column depcits protrump users\n final_seeduser = []\n final_seeduser.append(joined_seeduser_contratrump_tweets)\n final_seeduser.append(joined_seeduser_protrump_tweets)\n \n return final_seeduser\n\n\ndef findTweetStance(tf_idf_seed_user, tweets_tf_object, tweet):\n tweet_array = []\n tweet_array.append(tweet[\"text_cleaned\"])\n \n tf_idf_tweet = tweets_tf_object.transform(tweet_array)\n cosine = cosine_similarity(tf_idf_tweet, tf_idf_seed_user, True)\n \n if cosine[0][0] > cosine[0][1]:\n return 'contratrump-stance', cosine[0][0] - cosine[0][1]\n else:\n return 'protrump-stance', cosine[0][1] - cosine[0][0]\n \n\ndef remEntity(parent, child, doc):\n if parent in doc:\n if child in doc[parent]:\n del doc[parent][child] \n \n#list with indices to find the users\ndef findUserStance(user_tweets_classes_names, cosine_matrix, screen_name):\n for i in range(0, len(user_tweets_classes_names)):\n if (user_tweets_classes_names[i].Screen_name == screen_name):\n if cosine_matrix[i][0] > cosine_matrix[i][1]:\n return 'contratrump-stance', cosine_matrix[i][0] - cosine_matrix[i][1]\n else:\n return 'protrump-stance', cosine_matrix[i][1] - cosine_matrix[i][0]\n\ndef getBotRating(user_id):\n mashape_key = 'erKn4SCA3HmshTAcuzNfWGk9z1D6p1wLl6SjsnQlN1paNU043s'\n twitter_app_auth = {\n 'consumer_key': 'SS3bqusNGu8nyXiBWKFnIFjLS',\n 'consumer_secret': '1tw51M7qI3UEVbfZEILhjbHqCtAETBHNYGahI1xi8Vwhtsp8G8',\n 'access_token': '830038150609305601-F6av628VxDugsLYvFSI8nrePlLb11eY',\n 'access_token_secret': 'WQUAPjoQkvDJQ48qTt74rAh66Vnae9t6PGdHDXkl1ccoW',\n }\n \n bom = botometer.Botometer(wait_on_ratelimit=True,\n mashape_key=mashape_key,\n **twitter_app_auth)\n \n result = bom.check_account(user_id)\n \n return result\n\ndef cleanUnnecessaryData(doc):\n if 'entities' in doc:\n if 'media' in doc['entities']:\n del doc['entities']['media']\n \n if 'source' in doc:\n del doc['source'] \n \n if 'retweeted_status' in doc:\n del doc['retweeted_status']\n \n remEntity('user', 'utc_offset', doc)\n remEntity('user', 'profile_use_background_image', doc)\n remEntity('user','profile_image_url_https', doc)\n remEntity('user','default_profile_image', doc)\n remEntity('user','contributors_enabled', doc)\n remEntity('user','following', doc)\n remEntity('user','has_extended_profile', doc)\n remEntity('user','profile_background_tile', doc)\n remEntity('user','profile_text_color', doc)\n remEntity('user','is_translation_enabled', doc)\n remEntity('user','profile_banner_url', doc)\n remEntity('user','profile_background_image_url', doc)\n remEntity('user','profile_sidebar_border_color', doc)\n remEntity('user','profile_sidebar_fill_color', doc)\n remEntity('user','time_zone', doc)\n remEntity('user','translator_type', doc)\n remEntity('user','follow_request_sent', doc)\n remEntity('user','url', doc)\n remEntity('user','is_translator', doc)\n remEntity('user','default_profile', doc)\n remEntity('user','geo_enabled', doc) \n remEntity('user','notifications', doc)\n remEntity('user','profile_background_image_url_https', doc)\n remEntity('user','verified', doc)\n remEntity('user','profile_image_url', doc)\n remEntity('user','protected', doc)\n remEntity('user','profile_background_color', doc)\n remEntity('user','profile_link_color', doc)\n \n return doc\n\napi = setup_api()\ntweet_tokenizer = TweetTokenizer()\n\ncounter = 0\n\nprint('started reading user files...')\npath = os.path.dirname(os.path.abspath(__file__))\n\nsolr = pysolr.Solr('http://localhost:8983/solr/tweets_cleaned', timeout=30)\n\n# now find out, whether user is pro or against trump\nuser_tweets_classes_names = pickle.load(open(\"tf_idf/user_tweets_classes.pkl\", 'rb'))\ncosine_matrix = pickle.load(open(\"tf_idf/cosine_similarity_matrix_object.pkl\", 'rb'))\n\ntweets_tf_object = pickle.load(open(\"tf_idf/tweets_tf_object.pkl\", 'rb'))\n#user_tweets_tf_object = pickle.load(open(\"tf_idf/user_tf_object.pkl\", \"rb\" ) )\n\nfinal_seed_user = load_seed_user()\n\ntf_idf_seed_user = tweets_tf_object.transform(final_seed_user)\n\nfor file in os.listdir(path+\"/user_tweets\"):\n # for faster testing\n try:\n #print('user_tweets/' + file)\n with open('user_tweets/' + file) as data_file: \n tweets_user_from_file = json.load(data_file)\n user_tweets_cleaned = []\n \n user_screen_name = Status.parse(api,tweets_user_from_file[0]).user.screen_name\n user_id = Status.parse(api,tweets_user_from_file[0]).user.id\n print(user_id)\n print(user_screen_name)\n \n counter = counter+1\n print('number: {}'.format(counter))\n \n #check if bot was already added\n connection = urlopen('http://localhost:8983/solr/tweets_cleaned/select?fl=user_bot_score_universal,user_bot_score_english&q=user_screen_name:' + user_screen_name)\n users_solr_botscore = simplejson.load(connection)\n \n result = None\n \n if ('user_bot_score_universal' in users_solr_botscore['response']['docs'][0]):\n print('skipped_calc')\n continue\n else:\n print('calc bot rating')\n result = getBotRating(user_id)\n \n #add botrating from external lib\n \n for tweet in tweets_user_from_file:\n tweet_parsed = Status.parse(api,tweet)\n\n followers_count = tweet_parsed.user.followers_count\n user_lang = tweet_parsed.user.lang\n \n #preprocess the tweets, remove stopwords and tokenize them\n preproc = tweet_tokenizer.tokenize(tweet_parsed.text.lower())\n text_cleaned = ' '.join(content_remove_stopwords(preproc))\n \n doc = tweet_parsed._json\n doc['text_cleaned']=text_cleaned\n #doc['user.screen_name']=user_screen_name\n #doc['user.lang']=user_lang\n #doc['followers_count']=followers_count\n \n #remove unnecessary elements\n if 'extended_entities' in doc:\n del doc['extended_entities']\n \n if 'quoted_status' in doc:\n del doc['quoted_status']\n \n doc = cleanUnnecessaryData(doc)\n \n stance, diff_user = findUserStance(user_tweets_classes_names, cosine_matrix, user_screen_name)\n doc['user_stance']=stance\n doc['user_stance_cos_diff']=diff_user\n \n tweet_stance, diff_tweet = findTweetStance(tf_idf_seed_user, tweets_tf_object, tweet)\n doc['tweet_stance']=tweet_stance\n doc['tweet_stance_cos_diff'] = diff_tweet\n \n #bot_score_for_user\n doc['user_bot_score_universal'] = result['scores']['universal']\n doc['user_bot_score_english'] = result['scores']['english']\n \n user_tweets_cleaned.append(flatten(doc))\n \n #write it to Solr!\n print('adding tweets to solr...')\n solr.add(user_tweets_cleaned)\n print('done adding tweets to solr...')\n \n data_file.close()\n \n except Exception as e:\n print(str(e))\n pass\n\nprint('conversion of user tweet files finished...')","sub_path":"Software/Crawling/Workspace/Crawler/DataManipulator/__7binit_convert_user_tweets_to_solr_tweet_classification.py","file_name":"__7binit_convert_user_tweets_to_solr_tweet_classification.py","file_ext":"py","file_size_in_byte":9821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"440058690","text":"# Aluno: Clênio Borges Barboza Filho\n# Introdução a programação 2019.2\n# Professor: Francisco\n\npeso1=0\npeso2=0\nprint(\"\\n\\n\\nO programa vai pedir o peso atual e calculará perca de 15% e acrescimo de 20%!\\n\\n\\n\\n\")\nvalor1=float(input(\"Informe o peso: \\n\"))\n#valor2=float(input(\"Informe o total de vendas: \\n\"))\n#valor3=float(input(\"Informe o terceiro valor: \\n\"))\npeso1= round(valor1-(valor1*0.15),2)\npeso2= round(valor1+(valor1*0.2),2)\nprint(\"\\n\\nSe emagrecer 15%:\",peso1)\nprint(\"Se engordar 20%:\",peso2)","sub_path":"Exercicios Aula/Exercicio7.py","file_name":"Exercicio7.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"161727956","text":"def maximum_prime(number):\n prime=int(number/2)#Начнём искать максимальные делители с этого места\n enumerator=0\n while True:\n for i in range(2, prime-1):#Проверка на простоту \n if (prime%i==0):\n enumerator=enumerator+1\n break # Если составное, то выходим из цикла проверки\n continue\n if ((enumerator==0) and (number%prime==0)):#Если является простым и делителем\n print(prime)\n break\n enumerator=0\n prime=prime-1\n continue\n \n \n","sub_path":"max_prime.py","file_name":"max_prime.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"455039605","text":"#coding=utf-8\nimport tornado.web\nimport tornado.ioloop\nimport tornado.httpserver\nimport json\n\nfrom tornado.web import RequestHandler,MissingArgumentError\nfrom tornado.options import options,define\n\ndefine('port',default='8888',type=int,help='set run port')\n'''\n\t结论:\n\t\t这个文件主要是验证除了服务器返回的状态码,你也可以自己设置状态码,记住如果是标准的状态码,不需要设置 原因,他会默认以前的参数,\n\t\t但是,如果自己定义的状态码,需要写明reason如果不写 将会报错哦\n\n\n'''\nclass IndexHandler(RequestHandler):\n\tdef set_default_headers(self):\n\t\t'''\n\t\t\t该方法会在进入HTTP处理方法前先被调用,可以重写此方法来预先设置默认的headers。注意:在HTTP处理方法中使用set_header()方法会覆盖掉在set_default_headers()方法中设置的同名header。\n\n\t\t'''\n\t\t# self.set_header(\"Content-Type\",'Application/json;charset=UTF-8')\n\t\tself.set_header(\"itcast\", \"python\")\n\n\n\n\n\n\n\n\tdef get(self):\n\t\tdic = {\n\t\t\"name\":'liianzhu',\n\t\t'age':12,\n\t\t'tel':12312,\n\t\t}\n\n\n\t\t\n\t\tdic_dump = json.dumps(dic)\n\t\t# 这是标准的状态码 所以不用写原因\n\t\t# self.set_status(404)\n\n\n\t\t# 这是自定义的状态码,必须写明原因\n\t\tself.set_status(444,'lixianzhu')\n\t\tself.write(dic_dump)\n\t\t\n\n\n\nif __name__ == '__main__':\n\tapp = tornado.web.Application([\n\t(r'/',IndexHandler) \n\n\n\n\n\n\t],\n\n\n\tdebug=True\n\n\n\n\t\t\t\t\t\t\t\t )\n\n\n\thttp_server = tornado.httpserver.HTTPServer(app)\n\thttp_server.listen(options.port)\n\n\ttornado.ioloop.IOLoop.current().start()\n","sub_path":"twoDay/task/03_set_status.py","file_name":"03_set_status.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"399197810","text":"from bs4 import BeautifulSoup\nimport re\n\ndef prettify(webData):\n soup = BeautifulSoup(webData, \"html.parser\")\n\n data = []\n for i in soup.find_all(class_=\"rich-panel-body\"):\n course = re.sub(' \\(S[1-2](|,S[1-2])\\)', '', i.find(class_=\"dailyGradeCourseNameColumn\").get_text())\n grade = re.sub(r'^\\xa0$', 'N/A', i.find(class_=\"dailyGradeGroupColumn\").get_text().split(\": \")[1])\n assignments = []\n for row in i.find_all(class_=\"rich-table-row\"):\n assignment = {\n 'name': row.find(class_=\"dailyGradeAssignmentColumn\").get_text().replace('\\n', ''),\n 'earned': formatGrade(row.find(class_=\"dailyGradeScoreColumn\").get_text()),\n 'possible': formatGrade(row.find(class_=\"dailyGradePossibleColumn\").get_text()),\n 'due': row.find(class_=\"dailyGradeDueDateColumn\").get_text()\n }\n assignment['score'] = '{}/{}'.format(re.sub(r'^\\xa0$', '?? ', assignment['earned']), assignment['possible'])\n assignments.append(assignment)\n scores = [x['score'] for x in assignments]\n convertedScores = []\n missingAssignments = []\n\n for i in scores:\n try:\n if i.split('/')[0] in ['?? ', 'M']:\n missingAssignments.append((0, float(i.split('/')[1])))\n else:\n convertedScores.append((float(i.split('/')[0]), float(i.split('/')[1])))\n except ValueError:\n pass\n\n if len(convertedScores) == 0 and len(assignments) > 0:\n grade = 'N/A'\n elif len(convertedScores) > 0:\n grade = genGrade(convertedScores + missingAssignments)\n\n dataPoint = {}\n if len(convertedScores) > 0:\n dataPoint['analytics'] = {}\n dataPoint['analytics']['drop'] = dropAssignments(convertedScores)\n dataPoint['analytics']['gradeSansMissing'] = genSansMissing(convertedScores)\n\n dataPoint.update({'class': course,\n 'grade': grade,\n 'assignments': assignments})\n data.append(dataPoint)\n return data\n\ndef isFloat(num) -> bool:\n try:\n float(num)\n return True\n except ValueError:\n return False\n\ndef formatGrade(grade: str):\n if grade.endswith('.00'):\n return grade.split('.')[0]\n else:\n return grade\n\ndef genSansMissing(convertedScores):\n return 'Not counting missing assignments, you have a {}'.format(genGrade(convertedScores))\n\ndef genGrade(convertedScores):\n grade = sum([i[0] for i in convertedScores])\n total = sum([i[1] for i in convertedScores])\n\n average = str(round((grade / total * 100), 2)) + '%'\n\n return average\n\ndef dropAssignments(convertedScores):\n\n keys = [i[0] for i in convertedScores]\n values = [i[1] for i in convertedScores]\n\n grade = sum(keys)\n total = sum(values)\n\n average = (grade/total)\n avgAssignment = total / len(values)\n letterBottom = (int(average*10)/10)\n if letterBottom == 1.0:\n letterBottom = 0.9\n elif letterBottom == 0.0:\n return ''\n pointsLost = int((grade * (1 / letterBottom)) - total)\n assignmentsLost = round(pointsLost / avgAssignment, 2)\n\n return 'You can afford to lose {} points (an average of {} assignments) ' \\\n 'before dropping {}'.format(pointsLost, assignmentsLost, letters[letterBottom])\n\nletters = {\n 0.9: 'to a B',\n 0.8: 'to a C',\n 0.7: 'to a D',\n 0.6: 'to an F',\n 0.5: 'below 50%',\n 0.4: 'below 40%',\n 0.3: 'below 30%',\n 0.2: 'below 20%',\n 0.1: 'below 10%',\n}","sub_path":"backpack/pretty.py","file_name":"pretty.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"210518274","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.db.utils import OperationalError\nfrom django.db.models import Q\nfrom django.forms import ModelForm\nfrom django.core.exceptions import ValidationError\nfrom django.conf import settings\nfrom django.utils import timezone\n\nfrom Dining.models import UserDiningSettings\nfrom .models import User, Association, UserMembership\n\n\nclass RegisterUserForm(UserCreationForm):\n class Meta:\n model = User\n fields = ('username', 'password1', 'password2', 'email')\n\n\nclass RegisterUserDetails(forms.ModelForm):\n first_name = forms.CharField(max_length=40, required=True)\n last_name = forms.CharField(max_length=40, required=True)\n allergies = forms.CharField(max_length=100, required=False, help_text=\"Max 100 characters, leave empty if none\")\n\n class Meta:\n model = User\n fields = ['first_name', 'last_name', 'allergies']\n\n def save_as(self, user):\n user.first_name = self.cleaned_data.get('first_name')\n user.last_name = self.cleaned_data.get('last_name')\n user.userdiningsettings.allergies = self.cleaned_data.get('allergies')\n user.save()\n user.userdiningsettings.save()\n\n\nclass DiningProfileForm(ModelForm):\n class Meta:\n model = UserDiningSettings\n fields = ['allergies']\n\n\nclass UserForm(ModelForm):\n name = forms.CharField(required=False)\n\n class Meta:\n model = User\n fields = ['username', 'name', 'email']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['name'].disabled = True\n self.fields['name'].initial = str(self.instance)\n self.fields['email'].disabled = True\n\n\nclass AssociationLinkField(forms.BooleanField):\n \"\"\"\n A special BooleanField model for association links.\n Can also indicate current validation state and auto-sets initial value\n \"\"\"\n\n def __init__(self, user, association, *args, **kwargs):\n super(AssociationLinkField, self).__init__(*args, **kwargs)\n\n self.initial = False\n self.required = False\n self.label = association.name\n self.user = user\n self.association = association\n self.membership = None\n\n # Find the membership, if any\n if user is not None:\n try:\n self.membership = association.usermembership_set.get(related_user=user)\n self.initial = self.membership.is_member()\n if self.membership.get_verified_state() is None:\n self.pending = True\n\n # Check how recently the member has been verified or not. If too recent, block change\n if self.membership.verified_on is not None:\n if self.membership.is_verified:\n if self.membership.verified_on + \\\n settings.DURATION_AFTER_MEMBERSHIP_CONFIRMATION > timezone.now():\n # The user has been verified to recently (prevent spamming)\n self.disabled = True\n else:\n if self.membership.verified_on + \\\n settings.DURATION_AFTER_MEMBERSHIP_REJECTION > timezone.now():\n # The user has been verified not to be a member to recently (prevent spamming)\n self.disabled = True\n\n except UserMembership.DoesNotExist:\n pass\n if association is None:\n raise ValueError(\"Association can not be None\")\n\n def verified(self):\n if self.membership is None:\n return None\n return self.membership.get_verified_state()\n\n def get_membership_model(self, user=None, new_value=True):\n # Check input data for correctness\n if self.user is None and user is None:\n raise ValueError(\"Field does not contain user and user was not given in method\")\n if user is not None and self.user is not None and self.user != user:\n raise ValueError(\"Given user differs from field user\")\n\n if self.membership is not None:\n return self.membership\n if self.user is not None:\n # If there was a user given, but the link was not found. Create a new link if allowed\n if new_value:\n return UserMembership(related_user=self.user, association=self.association)\n else:\n # user originally not given. Try to find the link\n try:\n return self.association.usermembership_set.get(related_user=user)\n except UserMembership.DoesNotExist:\n if new_value:\n return UserMembership(related_user=user, association=self.association)\n return None\n\n\nclass AssociationLinkForm(forms.Form):\n\n def __init__(self, user, *args, **kwargs):\n super(AssociationLinkForm, self).__init__(*args, **kwargs)\n\n self.user = user\n\n if user is None:\n associations = Association.objects.filter(is_choosable=True)\n else:\n associations = Association.objects.filter(\n Q(is_choosable=True) |\n (Q(is_choosable=False) & Q(usermembership__related_user=user))) \\\n .order_by('slug')\n\n # Get all associations and make a checkbox field\n for association in associations:\n field = AssociationLinkField(user, association)\n # (using the slug since HTML IDs may not contain spaces)\n self.fields[association.slug] = field\n\n def clean(self):\n cleaned_data = super().clean()\n # Check if user is assigned to at least one association\n has_association = True in self.cleaned_data.values()\n\n if not has_association:\n raise ValidationError(\"At least one association needs to be chosen\")\n\n return cleaned_data\n\n def save(self, user=None):\n \"\"\"\n Saves the association links by creating or removing UserMembership instances.\n :return:\n \"\"\"\n if not self.user and not user:\n raise ValueError(\"Both self.user and user are None\")\n if user is None:\n user = self.user\n\n for key, value in self.cleaned_data.items():\n link = self.fields[key].get_membership_model(user, new_value=value)\n if value:\n if link.id is None:\n link.save()\n elif link.get_verified_state() == False:\n # If user was rejected, and a new request is entered\n link.verified_on = None\n link.save()\n else:\n if link and link.get_verified_state() != False:\n link.delete()\n","sub_path":"UserDetails/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"574016055","text":"import logging\nimport requests\n\nfrom django.conf import settings\nfrom django.template import Context\nfrom django.template.loader import get_template\n\nfrom fearless_parents import constants\n\n\nLOG = logging.getLogger(__name__)\n\ndef send_simple_message(receiver_name, receiver_email,\n email_subject, email_template,\n from_email=constants.EMAIL_DEFAULT):\n\n try:\n requests.post(\n settings.MAILGUN_API_URL + \"/messages\",\n data={\"from\": from_email,\n \"to\": [receiver_email],\n \"subject\": email_subject,\n \"html\": get_template(email_template).render(Context({\n 'name': receiver_name\n }))\n })\n except Exception as e:\n LOG.error(\"Failed to send email: \" + e.message)\n return False\n else:\n return True\n","sub_path":"fearless_parents/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"220739360","text":"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Converters for jax2tf.\"\"\"\nimport functools\nimport numpy as np\nimport tempfile\nfrom typing import Any, Callable, Tuple\n\nfrom jax._src import dtypes\nimport jax.numpy as jnp\nfrom jax.experimental import jax2tf\nfrom jax.experimental.jax2tf.examples import saved_model_lib\nfrom jax.experimental.jax2tf.converters_eval import converters_eval_lib as lib\nimport tensorflow as tf\nfrom tensorflowjs.converters import converter as tfjs_converter\n\nArray = Any\nTempDir = tempfile.TemporaryDirectory\n\nDEFAULT_RTOL = 1e-05\n\n\ndef _jax2tf(jax_fn: Callable[..., Any], input_shape: Tuple[int, ...], dtype: Any, *, enable_xla: bool = True):\n \"\"\"Converts the given `jax_fn` to TF using jax2tf and returns `(tf_fn, concrete_fn)`.\"\"\"\n tf_fn = tf.function(\n jax2tf.convert(jax_fn, enable_xla=enable_xla),\n input_signature=[\n tf.TensorSpec(\n shape=input_shape,\n dtype=dtype,\n name='input')\n ],\n autograph=False)\n concrete_fn = tf_fn.get_concrete_function()\n return tf_fn, concrete_fn\n\n\ndef _get_random_data(dtype: jnp.dtype, shape: Tuple[int, ...], seed=0) -> Any:\n dtype = dtypes.canonicalize_dtype(dtype)\n np.random.seed(seed)\n # Adjust the max values of the numbers based on the seed, so different seeds\n # result in different ranges.\n max_value = max(1, 100*seed)\n if np.issubdtype(dtype, np.integer):\n return np.random.randint(0, max_value, size=shape, dtype=dtype)\n elif np.issubdtype(dtype, np.floating):\n return np.array(np.random.uniform(size=shape), dtype=dtype) * max_value\n elif dtype == np.bool:\n return np.random.choice(a=[False, True], size=shape)\n else:\n raise ValueError(f\"Unsupported dtype for numerical comparison: {dtype}\")\n\n\ndef _compare(jax_fn: Callable[..., Any],\n tf_fn: Callable[..., Any],\n module: lib.ModuleToConvert,\n comparison: str,\n nr_runs: int = 5):\n rtol = DEFAULT_RTOL\n if module.rtol:\n rtol = module.rtol\n for i in range(nr_runs):\n input_data = _get_random_data(module.dtype, module.input_shape, seed=i)\n # A function may return multiple arrays, which may be of different shapes.\n # We can't just input the tuple into `np.allclose` since it will cast the\n # tuple to an array, which will break if the shapes in the tuple are\n # different. Therefore we iterate over the tuple explicitly.\n wrap_tuple = lambda x: (x,) if not isinstance(x, tuple) else x\n jax_results = wrap_tuple(jax_fn(input_data))\n tf_results = wrap_tuple(tf_fn(input_data))\n\n if len(tf_results) != len(jax_results):\n raise ValueError(f\"For {comparison}: returned output tuples lengths do not\"\n f\"match: TF length vs JAX length: {len(tf_results)} != \"\n f\"{len(jax_results)}\")\n\n for jax_result, tf_result in zip(jax_results, tf_results):\n np.testing.assert_allclose(jax_result, tf_result, rtol)\n # TFLite doesn't allow existing references to its data when it is\n # invoked. We therefore delete TF results so we can run multiple inputs on\n # the same interpreter.\n del tf_result\n del tf_results\n\n\ndef jax2tf_xla(module: lib.ModuleToConvert):\n \"\"\"Converts the given `module` using the jax2tf emitter with enable_xla=True.\"\"\"\n apply = functools.partial(module.apply, module.variables)\n _, apply_tf = _jax2tf(apply, module.input_shape, module.dtype)\n _compare(apply, apply_tf, module, \"JAX vs TF (enable_xla=True)\")\n\n\ndef jax2tf_to_tfjs(module: lib.ModuleToConvert):\n \"\"\"Converts the given `module` using the TFjs converter.\"\"\"\n with TempDir() as saved_model_path, TempDir() as converted_model_path:\n # the model must be converted with with_gradient set to True to be able to\n # convert the saved model to TF.js, as \"PreventGradient\" is not supported\n saved_model_lib.convert_and_save_model(\n module.apply,\n module.variables,\n saved_model_path,\n input_signatures=[\n tf.TensorSpec(\n shape=module.input_shape,\n dtype=module.dtype,\n name='input')\n ],\n with_gradient=True,\n compile_model=False,\n enable_xla=False\n )\n tfjs_converter.convert([saved_model_path, converted_model_path])\n\n # TODO(marcvanzee): Add numerical comparison for TFjs as well.\n\n\ndef jax2tf_to_tflite(module: lib.ModuleToConvert):\n \"\"\"Converts the given `module` using the TFLite converter.\"\"\"\n apply = functools.partial(module.apply, module.variables)\n tf_fn, apply_tf = _jax2tf(apply, module.input_shape, module.dtype, enable_xla=False)\n\n # First compare JAX output with TF output.\n _compare(apply, apply_tf, module, \"JAX vs TF (enable_xla=False)\")\n\n # Convert TF function to TF Lite format.\n converter = tf.lite.TFLiteConverter.from_concrete_functions([apply_tf], tf_fn)\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.\n tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.\n ]\n # Convert the model.\n tflite_model = converter.convert()\n\n # Construct an interpreter for doing a numerical comparison.\n interpreter = tf.lite.Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n # We assume a single input, but we allows multiple outputs.\n inputs = interpreter.get_input_details()[0]\n output_details = interpreter.get_output_details()\n outputs = tuple(interpreter.tensor(out[\"index\"]) for out in output_details)\n\n def apply_tflite(input_data):\n interpreter.set_tensor(inputs['index'], input_data)\n interpreter.invoke()\n if len(outputs) > 1:\n return tuple(o() for o in outputs)\n else:\n return outputs[0]()\n\n _compare(apply, apply_tflite, module, \"JAX vs TFLite\")\n","sub_path":"jax/experimental/jax2tf/converters_eval/converters.py","file_name":"converters.py","file_ext":"py","file_size_in_byte":6284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"355393190","text":"#!/usr/bin/python3.5 -u\n\nimport os\nimport subprocess\nimport sys\nimport time\n\nfrom libCore import *\n\n\ndef RunRemap(forceFlag = False):\n retVal = True\n\n Log(\"Running Remap.py\")\n try:\n if forceFlag:\n subprocess.check_call((\"Remap.py --force\").split())\n else:\n subprocess.check_call((\"Remap.py\").split())\n except Exception as e:\n retVal = False\n\n return retVal\n\n\ndef Main():\n retVal = False\n\n if len(sys.argv) > 2 or (len(sys.argv) >= 2 and sys.argv[1] == \"--help\"):\n print(\"Usage: %s\" % os.path.basename(sys.argv[0]))\n sys.exit(-1)\n\n forceFlag = False\n if len(sys.argv) == 2 and sys.argv[1] == \"--force\":\n forceFlag = True\n\n service__serviceDetail = RunInfo.GetServiceMap()\n\n serviceList = sorted(service__serviceDetail.keys())\n\n ss = ServerState()\n\n if ss.GetStateLock():\n state = ss.GetState()\n\n movePastState = True\n if not state == \"CLOSED\":\n movePastState = False\n\n if forceFlag:\n movePastState = True\n Log(\"Force starting despite state %s \"\n \"not being CLOSED\" % state)\n\n if movePastState:\n if RunRemap(forceFlag):\n ss.SetState(\"STARTING\")\n\n Log(\"Starting all services\")\n for service in serviceList:\n if not RunInfo.ServiceIsRunning(service):\n try:\n cmd = \"StartProcess.py \" + service\n subprocess.check_call(cmd.split())\n retVal = True\n except Exception as e:\n retVal = False\n else:\n Log(\"Service %s already running, no action taken\" % service)\n retVal = True\n\n ss.SetState(\"STARTED\")\n else:\n Log(\"Remap failed, quitting\")\n else:\n Log(\"State %s, needs to be CLOSED, quitting\" % state)\n\n ss.ReleaseStateLock()\n else:\n Log(\"State locked, operation in progress elsewhere, quitting\")\n\n Log(\"Done\")\n\n return retVal == False\n\n\nsys.exit(Main())\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"scripts/StartServer.py","file_name":"StartServer.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"102622258","text":"import json\nimport os\nimport boto3\nimport sys\nfrom colorama import Fore, Back, Style\n\nregion = 'us-east-1'\nec2 = boto3.client('ec2', region_name=region)\necs = boto3.client('ecs', region_name=region)\nasg = boto3.client('autoscaling', region_name=region)\ncf = boto3.client('cloudformation', region_name=region)\n\n\ndef get_env_name(asg_name):\n env_name = ''\n if '-prod' in asg_name:\n env_name = 'prod'\n elif '-stage' in asg_name:\n env_name = 'stage'\n elif '-latest' in asg_name:\n env_name = 'latest'\n elif '-load' in asg_name:\n env_name = 'load'\n elif '-shadow' in asg_name:\n env_name = 'shadow'\n elif '-training' in asg_name:\n env_name = 'training'\n elif '-train' in asg_name:\n env_name = 'training'\n else:\n env_name = 'NONE'\n\n return env_name\n\nall_asg = []\ndesc_asg = asg.describe_auto_scaling_groups()\n#print(desc_asg)\n\nnext_token = True\nwhile next_token != False:\n #print(len(clusters['clusterArns']))\n for group in desc_asg['AutoScalingGroups']:\n all_asg.append(group)\n\n if 'NextToken' in desc_asg:\n next_token = True\n desc_asg = asg.describe_auto_scaling_groups(\n NextToken = desc_asg['NextToken']\n )\n else:\n next_token = False\n\n\n#print(Style.RESET_ALL)\nprint('total asgs\\t' + str(len(all_asg)))\n#input('proceed/....................??')\nfor group in all_asg:\n try:\n input('proceed ???')\n \n asg_name = group['AutoScalingGroupName']\n envname = get_env_name(asg_name)\n \n #if envname == 'NONE':\n print(Fore.GREEN + asg_name + ' => ' + envname)\n print(Style.RESET_ALL) \n\n pace_tag = [tag['Value'] for tag in group['Tags'] if tag['Key'] == 'pace_env']\n \n if envname != 'NONE': \n if len(pace_tag) == 0:\n \n print('adding tag to asg:\\t' + asg_name)\n add_asg_tag_resp = asg.create_or_update_tags(\n Tags=[{\n 'ResourceId': asg_name,\n 'ResourceType': 'auto-scaling-group',\n 'Key': 'pace_env',\n 'Value': envname,\n 'PropagateAtLaunch': True\n },]\n )\n print(Fore.GREEN + '\\t*Tag has been updated to asg:\\t' + asg_name)\n print(Style.RESET_ALL)\n else:\n print(Style.RESET_ALL + '\\t*Tag already exist:\\t' + asg_name) \n \n else:\n print(Fore.RED + '\\t*Skipping asg as no environment found:\\t' + asg_name)\n print(Style.RESET_ALL)\n\n\n if envname != 'NONE':\n\n #tagging ec2 instances under autoscaling\n asg_instances = [instance['InstanceId'] for instance in group['Instances']]\n\n #print(asg_instances)\n\n for instance in asg_instances:\n instance_tags = ec2.describe_tags(\n Filters=[{\n 'Name': 'resource-id',\n 'Values': [ instance ]\n }]\n )\n instance_tags = [tag for tag in instance_tags['Tags'] if tag['Key'] == 'pace_env']\n if len(instance_tags) == 0:\n add_tag = ec2.create_tags(\n Resources=[ instance ],\n Tags=[{\n 'Key': 'pace_env',\n 'Value': envname\n }]\n )\n\n print(Fore.GREEN + '\\t\\t* Tag has been updated to instance:\\t' + instance)\n print(Style.RESET_ALL)\n else:\n print(Style.RESET_ALL + '\\t\\t* Tag already exist.....:\\t' + instance)\n \n# #input('proceed with instance tagging//...???') \n\n except Exception as e:\n print(Fore.RED + str(e))\n\n print(Style.RESET_ALL)\n\n","sub_path":"python scripts copy/tagging/pace_env/asg_env.py","file_name":"asg_env.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"353335862","text":"import os\r\nimport signal\r\n\r\n\r\nrunning = True\r\n\r\n\r\nif hasattr(os, 'kill'):\r\n def shutdown(pid=os.getpid()):\r\n os.kill(pid, signal.SIGTERM)\r\nelse:\r\n import ctypes\r\n def shutdown(pid=os.getpid()):\r\n '''kill function for Win32 with Python < 2.7 / 3.2'''\r\n kernel32 = ctypes.windll.kernel32\r\n handle = kernel32.OpenProcess(1, 0, pid)\r\n return (0 != kernel32.TerminateProcess(handle, 0))\r\n\r\n\r\nif __name__ == '__main__':\r\n print('Hello, World.')\r\n shutdown()\r\n","sub_path":"management.py","file_name":"management.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"650033399","text":"\"\"\"Support for the Daikin HVAC.\"\"\"\nimport logging\nfrom pymadoka.connection import ConnectionStatus\n\nimport voluptuous as vol\n\nfrom pymadoka import (\n Controller,\n PowerState,\n PowerStateStatus,\n FanSpeed,\n FanSpeedEnum,\n FanSpeedStatus,\n OperationMode,\n OperationModeEnum,\n OperationModeStatus,\n SetPoint,\n SetPointStatus,\n Temperatures,\n TemperaturesStatus,\n ConnectionException\n)\n\n\nfrom homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity\nfrom homeassistant.components.climate.const import (\n ATTR_FAN_MODE,\n ATTR_HVAC_MODE,\n CURRENT_HVAC_ACTIONS,\n FAN_OFF,\n HVAC_MODE_AUTO,\n HVAC_MODE_COOL,\n HVAC_MODE_DRY,\n HVAC_MODE_FAN_ONLY,\n HVAC_MODE_HEAT,\n HVAC_MODE_HEAT_COOL,\n HVAC_MODE_OFF,\n CURRENT_HVAC_COOL,\n CURRENT_HVAC_DRY,\n CURRENT_HVAC_HEAT,\n CURRENT_HVAC_FAN,\n CURRENT_HVAC_IDLE,\n CURRENT_HVAC_OFF,\n\n SUPPORT_FAN_MODE,\n SUPPORT_TARGET_TEMPERATURE,\n FAN_AUTO,\n FAN_LOW,\n FAN_MEDIUM,\n FAN_HIGH,\n)\nfrom homeassistant.const import (\n ATTR_TEMPERATURE,\n CONF_DEVICES,\n CONF_DEVICE,\n CONF_SCAN_INTERVAL,\n CONF_NAME,\n CONF_FORCE_UPDATE,\n TEMP_CELSIUS,\n)\nimport homeassistant.helpers.config_validation as cv\n\nfrom . import DOMAIN\nfrom .const import CONTROLLERS, MAX_TEMP, MIN_TEMP\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(\n {\n vol.Required(CONF_DEVICE): cv.string\n }\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n# PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(\n# {vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME): cv.string}\n# )\n\nHA_MODE_TO_DAIKIN = {\n HVAC_MODE_FAN_ONLY: OperationModeEnum.FAN,\n HVAC_MODE_DRY: OperationModeEnum.DRY,\n HVAC_MODE_COOL: OperationModeEnum.COOL,\n HVAC_MODE_HEAT: OperationModeEnum.HEAT,\n HVAC_MODE_AUTO: OperationModeEnum.AUTO,\n HVAC_MODE_OFF: OperationModeEnum.AUTO,\n}\n\nDAIKIN_TO_HA_MODE = {\n OperationModeEnum.FAN: HVAC_MODE_FAN_ONLY,\n OperationModeEnum.DRY: HVAC_MODE_DRY,\n OperationModeEnum.COOL: HVAC_MODE_COOL,\n OperationModeEnum.HEAT: HVAC_MODE_HEAT,\n OperationModeEnum.AUTO: HVAC_MODE_AUTO,\n}\n\nHA_FAN_MODE_TO_DAIKIN = {\n FAN_LOW: FanSpeedEnum.LOW,\n FAN_MEDIUM: FanSpeedEnum.MID,\n FAN_HIGH: FanSpeedEnum.HIGH,\n FAN_AUTO: FanSpeedEnum.AUTO\n}\n\nDAIKIN_TO_HA_FAN_MODE = {\n FanSpeedEnum.LOW: FAN_LOW,\n FanSpeedEnum.MID: FAN_MEDIUM,\n FanSpeedEnum.HIGH: FAN_HIGH,\n FanSpeedEnum.AUTO: FAN_AUTO,\n}\n\nDAIKIN_TO_HA_CURRENT_HVAC_MODE = {\n OperationModeEnum.FAN: CURRENT_HVAC_FAN,\n OperationModeEnum.DRY: CURRENT_HVAC_DRY,\n OperationModeEnum.COOL: CURRENT_HVAC_COOL,\n OperationModeEnum.HEAT: CURRENT_HVAC_HEAT\n}\n\nDATA = \"data\"\n\n\nasync def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n \"\"\"Old way of setting up the Daikin HVAC platform.\n\n Can only be called when a user accidentally mentions the platform in their\n config. But even in that case it would have been ignored.\n \"\"\"\n\nasync def async_setup_entry(hass, entry, async_add_entities):\n \"\"\"Set up Daikin climate based on config_entry.\"\"\"\n\n entities = []\n config = entry.data\n\n for controller in hass.data[DOMAIN][CONTROLLERS].values():\n try:\n entity = DaikinMadokaClimate(controller)\n entities.append(entity)\n await entity.controller.update()\n except ConnectionAbortedError:\n pass\n\n async_add_entities(entities, update_before_add=True)\n\nclass DaikinMadokaClimate(ClimateEntity):\n \"\"\"Representation of a Daikin HVAC.\"\"\"\n\n def __init__(self, controller:Controller):\n \"\"\"Initialize the climate device.\"\"\"\n self.controller = controller\n\n\n @property\n def supported_features(self):\n \"\"\"Return the list of supported features.\"\"\"\n return SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE\n\n @property\n def available(self):\n \"\"\"Return the availability.\"\"\"\n return self.controller.connection.connection_status == ConnectionStatus.CONNECTED\n\n @property\n def name(self):\n \"\"\"Return the name of the thermostat, if any.\"\"\"\n return self.controller.connection.name if self.controller.connection.name is not None else self.controller.connection.address\n\n @property\n def unique_id(self):\n \"\"\"Return a unique ID.\"\"\"\n return self.controller.connection.address\n\n @property\n def temperature_unit(self):\n \"\"\"Return the unit of measurement which this thermostat uses.\"\"\"\n return TEMP_CELSIUS\n\n @property\n def current_temperature(self):\n \"\"\"Return the current temperature.\"\"\"\n if self.controller.temperatures.status is None:\n return None\n\n return self.controller.temperatures.status.indoor\n\n @property\n def target_temperature(self):\n \"\"\"Return the temperature we try to reach.\"\"\"\n\n if self.controller.set_point.status is None:\n return MIN_TEMP\n\n if self.hvac_mode == HVAC_MODE_HEAT:\n return self.controller.set_point.status.heating_set_point\n else:\n return self.controller.set_point.status.cooling_set_point\n\n\n @property\n def target_temperature_step(self):\n \"\"\"Return the supported step of target temperature.\"\"\"\n return 1\n\n @property\n def min_temp(self):\n \"\"\"Return the minimum temperature.\"\"\"\n return MIN_TEMP\n\n @property\n def max_temp(self):\n \"\"\"Return the maximum temperature.\"\"\"\n return MAX_TEMP\n\n async def async_set_temperature(self, **kwargs):\n \"\"\"Set new target temperature.\"\"\"\n try:\n _LOGGER.debug(f\"Setting temperature of device {self.name}\")\n new_cooling_set_point = self.controller.set_point.status.cooling_set_point\n new_heating_set_point = self.controller.set_point.status.cooling_set_point\n if (self.controller.operation_mode.status.operation_mode != OperationModeEnum.HEAT):\n new_cooling_set_point = round(kwargs.get(ATTR_TEMPERATURE))\n if self.controller.operation_mode.status.operation_mode != OperationModeEnum.COOL:\n new_heating_set_point = round(kwargs.get(ATTR_TEMPERATURE))\n\n await self.controller.set_point.update(\n SetPointStatus(new_cooling_set_point,new_heating_set_point)\n )\n except ConnectionAbortedError:\n _LOGGER.info(f\"Could not set target temperature on {self.name}. Connection not available, please reload integration to try reenabling.\")\n except ConnectionException:\n pass\n\n @property\n def hvac_mode(self):\n \"\"\"Return current operation ie. heat, cool, idle.\"\"\"\n\n if self.controller.power_state.status.turn_on == False:\n return HVAC_MODE_OFF\n\n _LOGGER.debug(f\"Getting operation mode of device {self.name}\")\n return DAIKIN_TO_HA_MODE.get(\n self.controller.operation_mode.status.operation_mode\n )\n\n @property\n def hvac_modes(self):\n \"\"\"Return the list of available operation modes.\"\"\"\n return list(HA_MODE_TO_DAIKIN)\n\n @property\n def hvac_action(self):\n \"\"\"Return the HVAC current action.\"\"\"\n\n if self.controller.power_state.status.turn_on == False :\n return CURRENT_HVAC_OFF\n\n if self.controller.operation_mode.status.operation_mode == OperationModeEnum.AUTO:\n if self.target_temperature == self.current_temperature:\n return CURRENT_HVAC_IDLE\n elif self.target_temperature > self.current_temperature:\n return CURRENT_HVAC_HEAT\n else:\n return CURRENT_HVAC_COOL\n else:\n _LOGGER.debug(f\"Getting operation mode of device {self.name}\")\n return DAIKIN_TO_HA_MODE.get(self.controller.operation_mode.status.operation_mode)\n\n async def async_set_hvac_mode(self, hvac_mode):\n \"\"\"Set HVAC mode.\"\"\"\n try:\n _LOGGER.debug(f\"Setting operation mode of device {self.name}\")\n await self.controller.operation_mode.update(\n OperationModeStatus(HA_MODE_TO_DAIKIN.get(hvac_mode))\n )\n await self.controller.power_state.update(\n PowerStateStatus(hvac_mode != HVAC_MODE_OFF))\n\n self.async_schedule_update_ha_state()\n except ConnectionAbortedError:\n _LOGGER.info(f\"Could not set HVAC mode on {self.name}. Connection not available, please reload integration to try reenabling.\")\n except ConnectionException:\n pass\n\n @property\n def fan_mode(self):\n \"\"\"Return the fan setting.\"\"\"\n\n if self.controller.fan_speed.status is None:\n return FAN_OFF\n\n if self.hvac_mode == HVAC_MODE_HEAT:\n _LOGGER.debug(f\"Getting heating fan speed of device {self.name}\")\n return DAIKIN_TO_HA_FAN_MODE.get(\n self.controller.fan_speed.status.heating_fan_speed\n )\n else:\n _LOGGER.debug(f\"Getting cooling fan speed of device {self.name}\")\n return DAIKIN_TO_HA_FAN_MODE.get(\n self.controller.fan_speed.status.cooling_fan_speed\n )\n\n async def async_set_fan_mode(self, fan_mode):\n \"\"\"Set fan mode.\"\"\"\n try:\n _LOGGER.debug(f\"Setting fan speed of device {self.name}\")\n await self.controller.fan_speed.update(\n FanSpeedStatus(\n HA_FAN_MODE_TO_DAIKIN.get(fan_mode), HA_FAN_MODE_TO_DAIKIN.get(fan_mode)\n )\n )\n except ConnectionAbortedError:\n _LOGGER.info(f\"Could not set fan mode on {self.name}. Connection not available, please reload integration to try reenabling.\")\n except ConnectionException:\n pass\n\n @property\n def fan_modes(self):\n \"\"\"List of available fan modes.\"\"\"\n return list(HA_FAN_MODE_TO_DAIKIN)\n\n async def async_update(self):\n \"\"\"Retrieve latest state.\"\"\"\n\n try:\n _LOGGER.debug(f\"Updating device status for {self.name}\")\n await self.controller.read_info()\n await self.controller.update()\n\n except ConnectionAbortedError:\n _LOGGER.info(f\"Could not update device status for {self.name}. Connection not available, please reload integration to try reenabling.\")\n except ConnectionException:\n pass\n\n async def async_turn_on(self):\n \"\"\"Turn device on.\"\"\"\n try:\n _LOGGER.debug(f\"Turning ON device {self.name}\")\n await self.controller.power_state.update(PowerStateStatus(True))\n except ConnectionAbortedError:\n _LOGGER.info(f\"Could not turn on {self.name}. Connection not available, please reload integration to try reenabling.\")\n except ConnectionException:\n pass\n\n async def async_turn_off(self):\n \"\"\"Turn device off.\"\"\"\n try:\n _LOGGER.debug(f\"Turning OFF device {self.name}\")\n await self.controller.power_state.update(PowerStateStatus(False))\n except ConnectionAbortedError:\n _LOGGER.info(f\"Could not turn off {self.name}. Connection not available, please reload integration to try reenabling.\")\n except ConnectionException:\n pass\n @property\n async def async_device_info(self):\n \"\"\"Return a device description for device registry.\"\"\"\n\n return await self.controller.read_info()\n","sub_path":"climate.py","file_name":"climate.py","file_ext":"py","file_size_in_byte":11488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"110955149","text":"\"\"\"\nEste módulo contém as funções principais responsáveis pelo funcionamento do jogo.\n\"\"\"\n\n# Módulos importados\nimport pygame \nimport random\n# Classes e variáveis específicas que precisaram ser importadas;\nfrom elementos import (\n Player,\n Enemy,\n collide,\n larger_explosion_sound,\n smaller_explosion_sound\n )\n# Classes e variáveis específicas que precisaram ser importadas;\nfrom fundo import (BG, BG_2, BG_3, BG_4, WIDTH, HEIGHT, WIN)\n\n# Preparando as fontes que serão usadas\npygame.font.init()\n\ndef main(): \n run = True # Determina que o jogo está rodando;\n FPS = 60 # Frames por segundo;\n level = 0 # Determina o nível em que o jogador está;\n lives = 5 # Número de vidas do jogador;\n y = 0 # Variável que será usada para rolar a tela;\n \n # Determinando a fonte e seu tamanho\n main_font = pygame.font.SysFont(\"comicsans\", 50)\n lost_font = pygame.font.SysFont(\"comicsans\", 60)\n \n # Aparição das naves inimigas\n enemies = []\n wave_length = 5 # Número de inimigos acrescentados a cada onda;\n enemy_vel = 1\n \n # Determinando a velocidade do jogador\n player_vel = 5\n \n # Determinando a velocidade do laser\n laser_vel = 5\n \n # Criando a nave do jogador\n player = Player(300, 600)\n \n # Temporizador\n clock = pygame.time.Clock() \n \n # Variável que recebe a(s) derrota(s);\n lost = False\n lost_count = 0\n \n '''\n A função abaixo irá adicionar a imagem de ao pygame. Se não me engano ela \n irá se atualizar constantemente.\n '''\n def redraw_window():\n '''\n Escrever o texto\n \n Esses rótulos (label) armazenam os textos que exibirão as informações\n de número de vidas e o nível no qual o jogador se encontra.\n \n Respectivamente, temos o conteúdo, um valor inicial e a cor da fonte\n '''\n lives_label = main_font.render(f\"Lives: {lives}\", 1, (255, 255, 255))\n level_label = main_font.render(f\"Level: {level}\", 1, (255, 255, 255))\n \n # Localização do texto\n WIN.blit(lives_label, (10, 10)) # 10p para a direita e 10p para baixo;\n WIN.blit(level_label, (WIDTH - level_label.get_width() - 10, 10)) \n # A posição acima é em relação à posição anterior e ao tamanho da janela\n # do jogo.\n \n # Repetição responsável por gerar os inimigos;\n for enemy in enemies:\n enemy.draw(WIN)\n \n # Função que \"desenha\" o jogador;\n player.draw(WIN)\n \n # Tela de derrota;\n if lost:\n lost_label = lost_font.render(\"You Lost!\", 1, (255, 255, 255))\n WIN.blit(lost_label, (WIDTH / 2 - lost_label.get_width() / 2, 350))\n \n pygame.display.update()\n \n # Variável que será usada para rolar a tela;\n y = 0\n while run:\n # A cada frame a tela será redesenhada;\n clock.tick(FPS)\n redraw_window()\n \n # Rolamento do plano de fundo durante o jogo;\n if level < 2:\n rel_y = y % BG.get_rect().height\n WIN.blit(BG, (0, rel_y - BG.get_rect().height))\n if rel_y < HEIGHT:\n WIN.blit(BG, (0, rel_y))\n y += 3\n if level >= 2:\n rel_y = y % BG.get_rect().height\n WIN.blit(BG_2, (0, rel_y - BG.get_rect().height))\n if rel_y < HEIGHT:\n WIN.blit(BG_2, (0, rel_y))\n y += 3\n if level >= 5:\n rel_y = y % BG.get_rect().height\n WIN.blit(BG_3, (0, rel_y - BG.get_rect().height))\n if rel_y < HEIGHT:\n WIN.blit(BG_3, (0, rel_y))\n y += 1\n if level >= 7:\n rel_y = y % BG.get_rect().height\n WIN.blit(BG_4, (0, rel_y - BG.get_rect().height))\n if rel_y < HEIGHT:\n WIN.blit(BG_4, (0, rel_y))\n y += 3\n \n # Derrota do jogador;\n if lives <= 0 or player.health <= 0:\n larger_explosion_sound.play()\n lost = True\n lost_count += 1\n if lost:\n if lost_count > FPS * 3:\n run = False\n else:\n continue\n \n # Determinando o aumento das ordas de inimigos;\n # A cada nível mais cinco inimigos são acrescentados à orda;\n if len(enemies) == 0:\n level += 1\n wave_length += 5\n for i in range(wave_length):\n enemy = Enemy(random.randrange(50, WIDTH - 100), random.randrange(-1500, -100), random.choice([\"red\", \"blue\", \"green\"]))\n enemies.append(enemy)\n \n # Determinando o \"fechamento\" do jogo\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n \n # MOVIMENTOS DO JOGADOR \n \n keys = pygame.key.get_pressed()\n \n # --------------------------------------------------------------------\n # Mover para a esquerda\n # player.x - player_vel > 0 impede que o jogador saia para fora da tela\n # pelo lado esquerdo.\n if keys[pygame.K_a] and player.x - player_vel > 0:\n player.x -= player_vel\n # --------------------------------------------------------------------\n \n # Mover para a direita\n # player.x + player_vel + player.get_width() < WIDTH impede que o jogador\n # saia da tela pelo lado direito.\n if keys[pygame.K_d] and player.x + player_vel + player.get_width() < WIDTH:\n player.x += player_vel\n # --------------------------------------------------------------------\n \n # Mover para a cima\n # player.y - player_vel > 0 impede que o jogador saia para fora da tela\n # pelo por cima.\n if keys[pygame.K_w] and player.y - player_vel > 0:\n player.y -= player_vel\n # --------------------------------------------------------------------\n \n # Mover para a baixo\n # player.y + player_vel + player.get_height() + 15 impede que o jogador\n # saia para fora da tela pelo lado de baixo\n if keys[pygame.K_s] and player.y + player_vel + player.get_height() + 50 < HEIGHT:\n player.y += player_vel\n # --------------------------------------------------------------------\n \n # Atirar lasers \n if keys[pygame.K_SPACE]:\n player.shoot()\n # Botão de pause - Não funciona\n if keys[pygame.K_p]:\n paused()\n if keys[pygame.K_p]:\n not paused()\n \n # Repetição que determina o movimento dos lasers, bem como sua aparição;\n for enemy in enemies[:]:\n enemy.move(enemy_vel) \n enemy.move_lasers(laser_vel, player)\n \n # Chance aleatória de um inimigo atirar;\n if random.randrange(0, 1 * 60) == 1:\n enemy.shoot()\n # Impedir que os inimigos saiam da tela do jogo;\n if enemy.x > WIDTH - 2 * enemy.ship_img.get_width():\n enemy.x -= enemy.ship_img.get_width()\n if enemy.x < 2 * enemy.ship_img.get_width():\n enemy.x += enemy.ship_img.get_width()\n # Colisão dos inimigos com o jogador;\n if collide(enemy, player):\n player.health -= 10\n smaller_explosion_sound.play()\n enemies.remove(enemy)\n # Redução de vidas do jogador;\n elif enemy.y + enemy.get_height() > HEIGHT:\n lives -= 1\n enemies.remove(enemy)\n \n # Movimento do laser do jogador;\n # Como o eixo y do Python cresce para baixo, a velocidade do laser do \n # jogador deve ser negativa. Senão, os lasers seriam disparados para \n # baixo e não para cima, como se espera.\n player.move_lasers(-laser_vel, enemies) \n\n# Função que gera o menu principal;\n# Nele é possível fechar o jogo;\ndef main_menu():\n # Funte do título\n title_font = pygame.font.SysFont(\"comicsans\", 70)\n # Determina que o jogo está rodando\n run = True\n # Variável usada no rolamento da imagem fundo no eixo y;\n y = 0 \n while run:\n # Rolamento da imagem de fundo no eixo y;\n rel_y = y % BG.get_rect().height\n WIN.blit(BG, (0, rel_y - BG.get_rect().height))\n if rel_y < HEIGHT:\n WIN.blit(BG, (0, rel_y))\n y += 1\n \n # Título do jogo\n title_label = title_font.render(\"Press the mouse to begin...\", 1, (255, 255, 255))\n WIN.blit(title_label, (WIDTH / 2 - title_label.get_width() / 2, 350))\n \n # Update da janela do jogo;\n pygame.display.update()\n \n # Inicialização ou saída do jogo\n for event in pygame.event.get():\n # Para sair;\n if event.type == pygame.QUIT:\n run = False\n # Para iniciar o jogo;\n if event.type == pygame.MOUSEBUTTONDOWN:\n main()\n pygame.quit()\n\n# Função que pausa o jogo;\ndef paused():\n # Variável que determina que o jogo está pausado;\n pause = True\n \n # Mensagem que é exibida na tela;\n pause_font = pygame.font.SysFont(\"comicsans\", 50)\n pause_label = pause_font.render(\"Paused, press any keybord key to resume\", 1, (255, 255, 255))\n WIN.blit(pause_label, (WIDTH / 2 - pause_label.get_width() / 2, 350))\n \n # Atualiza a tela do pygame;\n pygame.display.flip()\n \n # Enquanto o jogo está pausado...\n while pause: \n # Pressione qualquer tecla para fechar o pause;\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n pygame.display.update()\n pause = False\n\n# Chamando a função que inicia o jogo;\nmain_menu()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"164662561","text":"from sudBoard.settings import BASE_URL\nimport requests\nimport pandas as pd\nimport json\nimport geopandas as gpd\nfrom vizApps.Utils.dataUtils import DataUtils\nfrom vizApps.Utils.geomUtils import GeomUtil\nfrom lumen.sources import Source\n\nfrom sqlalchemy import create_engine\n\nfrom sudBoard.settings import EXTERNE_DATABASES\n\nfrom vizApps.services.JossoSessionService import JossoSession\n\n# limit de nombre d'objet par requete\nLIMIT_PARAM = 5000\nSAMPLE_LIMIT = 100\n\n\nREST_API = \"Url\"\nDATABASE = \"SQL\"\nFILE = \"Fichier\"\n\nCONNECTOR_LIST = [(REST_API,\"WEB - Application métier Province Sud\"),\n (DATABASE,\"DB -Base de données interne à la Province Sud\"),\n (FILE,\"Import de fichier (Xls, Csv, ShapeFile)\")\n ]\n\n\nFULL = \"FULL\"\nSAMPLE = \"SAMPLE\"\n\nclass ConnectorInterface():\n instances = []\n\n def __init__(self, json):\n self.jossoSession = JossoSession()\n self.id = json[\"connector-id\"]\n self.data = pd.DataFrame()\n self.sample_data = pd.DataFrame()\n self.full_data = pd.DataFrame()\n self.sample_or_full_from_cache = None #SAMPLE or FULL\n self.sample = False\n self.no_cache = False\n self.caching_in_progress = False\n self.connectorType= json[\"connector\"]\n self.message = None\n ConnectorInterface.instances.append(self)\n if (json[\"connector\"] == REST_API ):\n self.connector = PsudRestApi(json['url'], json['extraParams'],self.jossoSession)\n elif (json[\"connector\"] == DATABASE):\n self.connector = PsudDatabase(json['db'], json['table'], json['whereClause'],self.jossoSession)\n\n def configureConnector(self,sampleOrFull):\n print(\"Configuration du connector Interface en mode {}\".format(sampleOrFull))\n if sampleOrFull == SAMPLE:\n self.toSampleOnly()\n elif sampleOrFull == FULL:\n self.toFullData()\n\n @classmethod\n def get(cls, jsonParams):\n id = jsonParams[\"connector-id\"]\n instance = [inst for inst in cls.instances if inst.id == id]\n if len(instance) >= 1:\n return instance[0] # on renvoit uniquement un objet\n return ConnectorInterface(jsonParams)\n\n def isDataInCache(self):\n if self.sample_or_full_from_cache == SAMPLE and not self.sample_data.empty:\n return True\n elif self.sample_or_full_from_cache == FULL and not self.full_data.empty:\n return True\n else:\n return False\n\n def lookCachedData(self):\n if self.isDataInCache():\n self.data = self.returnSampleOrFullData()\n return self.data\n else:\n self.message = 'Pas de données {} en cache pour le type de demande'.format(self.sample_or_full_from_cache)\n print(self.message)\n return None\n\n def returnSampleOrFullData(self):\n data = {\n SAMPLE: self.sample_data,\n FULL : self.full_data\n }\n return data.get(self.sample_or_full_from_cache)\n\n def getData(self):\n self.message = \"Loading des données -- \", \"connecteur :\", self.connectorType, \"mode Sample: \", self.sample\n print(self.message)\n\n data = self.connector.getData(self.sample)\n\n if data is None :\n return None\n\n if not isinstance(data, gpd.GeoDataFrame):\n if (GeomUtil.getIsGeo(self, dataframe=data)):\n data = GeomUtil.transformToGeoDf(self,dataframe=data)\n else:\n data = DataUtils.dataCleaner(self, data)\n self.setData(data)\n return self.data\n\n def setData(self, data):\n if self.sample_or_full_from_cache == SAMPLE and self.sample:\n self.sample_data = data\n self.data = self.sample_data\n elif self.sample_or_full_from_cache == FULL and not self.sample:\n self.full_data = data\n self.data = self.full_data\n\n def toSampleOnly(self):\n self.sample_or_full_from_cache = SAMPLE\n self.sample = True\n\n def toFullData(self):\n self.sample_or_full_from_cache = FULL\n self.sample = False\n\n def disconnect(self):\n self.connector.disconnect()\n self.caching_in_progress = False\n\n\nclass PsudRestApi(Source):\n baseUrl = BASE_URL.__getitem__(0)[1]\n #endPointUrl = \"\"\n extraParams = {}\n\n def __init__(self, url, extraParams,jossoSession):\n self.jossoSession = jossoSession\n self.endPointUrl = url\n self.extraParams = extraParams\n self.totalNbEntity = 0\n self.nbEntityLoaded = 0\n self.nbRequest = 1\n\n def disconnect(self):\n pass\n\n def getData(self, sampleOnly):\n\n\n url = self.endPointUrl\n extraParams = self.extraParams\n data = pd.DataFrame()\n modulo = self.totalNbEntity % LIMIT_PARAM\n\n # on récupère les meta data à partir d'une seul entité\n extraParams['limit']= SAMPLE_LIMIT\n result = self.makeRequest(url, extraParams)\n while result.status_code == 503:\n result = self.makeRequest(url, extraParams)\n break\n\n if result.status_code == 200:\n try:\n jsonResult = json.loads(result.text)\n if jsonResult['success']:\n total = jsonResult.get('total')\n if total:\n self.totalNbEntity = total\n else:\n # on devrait être dans le cas ou on demande une seule entité à l'application COMMON\n self.totalNbEntity = 1\n except ValueError as e:\n self.message = \"la donnée n\\'est pas un JSON valide\"\n print(self.message)\n return None\n\n elif result.status_code == 500:\n self.message = \"erreur 500\"\n dataFrame = pd.DataFrame(data={'0':[\"Data-Acces-Error\"]})\n print(self.message)\n return dataFrame\n elif result.status_code == 503 :\n self.message = \"erreur 503\" + \" \" + result.reason\n return print(self.message)\n elif result.status_code == 404 :\n self.message = \"erreur 404 \" + result.reason\n return print( self.message )\n\n nbRequest = int((self.totalNbEntity - modulo) / LIMIT_PARAM)\n\n\n self.nbRequest = nbRequest if nbRequest > 0 else 1\n\n if sampleOnly:\n self.data = self.createDataframeFromJson(result)\n # on supprime le parametre limit et en fonction du nombre total de donnée on va paginer nos requetes pour pas killer le serveur.\n extraParams.pop('limit')\n self.nbEntityLoaded = self.data.shape[0]\n return self.data\n\n if (self.totalNbEntity < LIMIT_PARAM):\n result = self.makeRequest(url, extraParams)\n data = self.createDataframeFromJson(result)\n\n else:\n extraParams['limit'] = LIMIT_PARAM\n\n for i in range(0, self.totalNbEntity, LIMIT_PARAM):\n extraParams['start'] = i\n result = self.makeRequest(url, extraParams)\n df = pd.DataFrame()\n\n while result.status_code == 503:\n self.message = \"erreur 503\"\n print(self.message)\n result = self.makeRequest(url, extraParams)\n break\n\n if result.status_code == 200:\n df = self.createDataframeFromJson(result)\n elif result.status_code == 500:\n self.message = \"erreur 500\"\n dataFrame = pd.DataFrame({'0':\"data-Acces-Error\"})\n print(self.message)\n return dataFrame\n elif result.status_code == 503:\n self.message = \"erreur 503\"\n print(self.message)\n else:\n self.message =\"on a un problème Huston \" + result.status_code\n print(self.message)\n raise Exception('PsudRestApi Error', 'Request status code: ', result.status_code)\n\n if (data.size == 0):\n data = df\n else:\n data = data.append(df)\n\n self.nbEntityLoaded = data.shape[0]\n\n extraParams.pop('start')\n\n self.data = data\n return self.data\n\n def makeRequest(self, url, extraParams):\n params = \"\"\n for key, value in extraParams.items():\n params += \"&\" + key + \"=\" + str(value)\n\n result = requests.get(self.baseUrl + url + \"?_responseMode=json\" + params,\n cookies=self.jossoSession.cookies, headers=self.jossoSession.headers)\n return result\n\n def createDataframeFromJson(self, result):\n jsonResult = json.loads(result.text)\n # on nettoie les données null en dict vide\n for i in jsonResult['data']:\n for key, value in i.items():\n if value == None:\n i[key] = {}\n\n dataFrame = pd.json_normalize(jsonResult['data'])\n return dataFrame\n\n def getSlicedData(self):\n url = self.endPointUrl\n extraParams = self.extraParams\n data = pd.DataFrame()\n\n\n result = self.makeRequest(url, extraParams)\n\n while result.status_code == 503:\n result = self.makeRequest(url, extraParams)\n break\n\n if result.status_code == 200:\n data = self.createDataframeFromJson(result)\n\n self.nbEntityLoaded += data.shape[0]\n\n return data\n\nclass PsudGeoCat():\n def __init__(self):\n return\n\n\nclass PsudDatabase():\n\n def __init__(self, dataBaseConnection, table, whereClause, jossoSession):\n self.jossoSession = jossoSession\n self.nbRequest = 1\n self.totalNbEntity = 0\n self.nbEntityLoaded = 0\n\n self.database = EXTERNE_DATABASES.get(dataBaseConnection)\n self.extraParams = whereClause\n\n user = jossoSession.login.split('@')[0]\n password = jossoSession.password\n host = self.database.get('HOST')\n namedb = self.database.get('NAME')\n\n url = 'postgresql://{}:{}@{}:5432/{}'.format(user,password,host,namedb)\n\n self.engine = create_engine(url)\n\n\n\n self.table = table\n self.whereClause = whereClause\n self.conn = None\n\n\n def getData(self, sampleOnly):\n # self.conn = psycopg2.connect(host=self.database.get('HOST'),\n # database=self.database.get('NAME'),\n # user=self.database.get('USER'),\n # password=self.database.get('PASSWORD'))\n query = \"SELECT * from {} {}\".format(self.table, self.whereClause)\n\n print(\"SQL query Log : {}\".format(query))\n\n dataframe = pd.read_sql_query(query, con=self.engine)\n\n self.totalNbEntity = dataframe.shape[0]\n self.nbEntityLoaded = dataframe.shape[0]\n\n return dataframe\n\n def disconnect(self):\n self.closeConnection()\n pass\n\n def closeConnection(self):\n if self.conn is not None:\n self.conn.close()\n print('Database connection closed.')\n\n\nclass csvFile():\n def __init__(self):\n return\n","sub_path":"vizApps/services/DataConnectorSevice.py","file_name":"DataConnectorSevice.py","file_ext":"py","file_size_in_byte":11299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"591354965","text":"from REESSimulation.solver import *\nfrom REESSimulation.types import *\n\n\ndef run(engine):\n if engine is None:\n return\n\n if engine.solver_params.mode == 'simulate':\n return simulate(engine)\n\n if engine.solver_params.mode == 'play':\n return play(engine)\n\n return False\n\n\ndef create_rigid_body(engine, body_name):\n if body_name in engine.rigid_bodies:\n raise RuntimeError('connect() rigid body already exist with that name')\n engine.rigid_bodies[body_name] = RigidBody(body_name)\n\n\ndef create_shape_from_obj(engine, shape_name, filename):\n if shape_name in engine.shapes:\n raise RuntimeError('create_shape_from_obj(): shape with that name already exist')\n shape = Shape(shape_name)\n shape.mesh = MESH.read_obj(filename)\n transform_shape_into_body_frame(shape)\n engine.shapes[shape_name] = shape\n\n\ndef create_cylinder_shape(engine, shape_name, radius, height):\n if shape_name in engine.shapes:\n raise RuntimeError('create_cylinder_shape(): shape with that name already exist')\n shape = Shape(shape_name)\n shape.mesh = MESH_FACTORY.make_cylinder(radius, height, slices=12)\n transform_shape_into_body_frame(shape)\n engine.shapes[shape_name] = shape\n\n\ndef create_convex_shape(engine, shape_name, points):\n if shape_name in engine.shapes:\n raise RuntimeError('create_convex_shape(): shape with that name already exist')\n shape = Shape(shape_name)\n shape.mesh = MESH_FACTORY.make_convex_hull(points)\n transform_shape_into_body_frame(shape)\n engine.shapes[shape_name] = shape\n\n\ndef create_capsule_shape(engine, shape_name, radius, height):\n if shape_name in engine.shapes:\n raise RuntimeError('create_capsule_shape(): shape with that name already exist')\n shape = Shape(shape_name)\n shape.mesh = MESH_FACTORY.make_capsule(radius, height, slices=12, segments=12)\n transform_shape_into_body_frame(shape)\n engine.shapes[shape_name] = shape\n\n\ndef create_cone_shape(engine, shape_name, radius, height):\n if shape_name in engine.shapes:\n raise RuntimeError('create_cone_shape(): shape with that name already exist')\n shape = Shape(shape_name)\n shape.mesh = MESH_FACTORY.make_cone(radius, height, slices=12)\n transform_shape_into_body_frame(shape)\n engine.shapes[shape_name] = shape\n\n\ndef create_conical_shape(engine, shape_name, bottom_radius, top_radius, height):\n if shape_name in engine.shapes:\n raise RuntimeError('create_conical_shape(): shape with that name already exist')\n shape = Shape(shape_name)\n shape.mesh = MESH_FACTORY.make_conical(bottom_radius, top_radius, height, slices=12)\n transform_shape_into_body_frame(shape)\n engine.shapes[shape_name] = shape\n\n\ndef create_sphere_shape(engine, shape_name, radius):\n if shape_name in engine.shapes:\n raise RuntimeError('create_sphere_shape(): shape with that name already exist')\n shape = Shape(shape_name)\n shape.mesh = MESH_FACTORY.make_sphere(radius, slices=12, segments=12)\n transform_shape_into_body_frame(shape)\n engine.shapes[shape_name] = shape\n\n\ndef create_ellipsoid_shape(engine, shape_name, a, b, c):\n if shape_name in engine.shapes:\n raise RuntimeError('create_ellipsoid_shape(): shape with that name already exist')\n shape = Shape(shape_name)\n shape.mesh = MESH_FACTORY.make_ellipsoid(a, b, c, slices=12, segments=12)\n transform_shape_into_body_frame(shape)\n engine.shapes[shape_name] = shape\n\n\ndef create_tetrahedron_shape(engine, shape_name, p0, p1, p2, p3):\n if shape_name in engine.shapes:\n raise RuntimeError('create_ellipsoid_shape(): shape with that name already exist')\n shape = Shape(shape_name)\n shape.mesh = MESH_FACTORY.make_tetrahedron(p0, p1, p2, p3)\n transform_shape_into_body_frame(shape)\n engine.shapes[shape_name] = shape\n\n\ndef create_cuboid_shape(engine, shape_name, p0, p1, p2, p3, p4, p5, p6, p7):\n if shape_name in engine.shapes:\n raise RuntimeError('create_cuboid_shape(): shape with that name already exist')\n shape = Shape(shape_name)\n shape.mesh = MESH_FACTORY.make_cuboid(p0, p1, p2, p3, p4, p5, p6, p7)\n transform_shape_into_body_frame(shape)\n engine.shapes[shape_name] = shape\n\n\ndef create_box_shape(engine, shape_name, width, height, depth):\n if shape_name in engine.shapes:\n raise RuntimeError('create_ellipsoid_shape(): shape with that name already exist')\n shape = Shape(shape_name)\n shape.mesh = MESH_FACTORY.make_box(width, height, depth)\n transform_shape_into_body_frame(shape)\n engine.shapes[shape_name] = shape\n\n\ndef connect_shape(engine, body_name, shape_name):\n if body_name in engine.rigid_bodies:\n body = engine.rigid_bodies[body_name]\n else:\n raise RuntimeError('connect() no such rigid body exist with that name')\n if shape_name in engine.shapes:\n shape = engine.shapes[shape_name]\n else:\n raise RuntimeError('connect() no such shape exist with that name')\n body.shape = shape\n\n\ndef set_position(engine, body_name, r, use_model_frame=False):\n if body_name in engine.rigid_bodies:\n body = engine.rigid_bodies[body_name]\n else:\n raise RuntimeError('set_position() no such rigid body exist with that name')\n if use_model_frame:\n\n r_bf2wcs = body.r\n q_bf2wcs = body.q\n\n r_bf2mf = body.shape.r\n q_bf2mf = body.shape.q\n # |q r| |x|\n # By definition we have the rigid body transformations T x = q*x + r = |0 q| |1|\n #\n # T_bf2wcs = T_mf2wcs T_bf2mf\n #\n # |q_bf2wcs r_bf2wcs| |q_mf2wcs r_mf2wcs| |q_bf2mf r_bf2mf |\n # |0 1 | = |0 1 | |0 1 |\n #\n # So\n #\n # q_bf2wcs = q_mf2wcs q_bf2mf\n # r_bf2wcs = q_mf2wcs r_bf2mf + r_mf2wcs (*)\n #\n # From this we can solve for the current model to world coordinate transformation\n q_mf2wcs = Q.prod(q_bf2wcs, Q.conjugate(q_bf2mf))\n r_mf2wcs = r_bf2wcs - Q.rotate(q_mf2wcs, r_bf2mf)\n #\n # Now we wish to change the position of model frame origin to (x,y,z) wrt the world frame\n #\n # so\n #\n # r_mf2wcs = [x, y, z]\n #\n # The orientation of the model frame wrt. world frame is unchanged, hence we want to\n # know what r_bf2wcs should be. For this we use (*) to compute it\n #\n r_mf2wcs = r\n r_bf2wcs = Q.rotate(q_mf2wcs, r_bf2mf) + r_mf2wcs\n\n # Finally we can update the origin of the body model frame origin to reflect the desired position of the model\n # frame origin in the world coordinate system\n body.r = r_bf2wcs\n else:\n body.r = r\n\n\ndef set_orientation(engine, body_name, q, use_model_frame=False):\n if body_name in engine.rigid_bodies:\n body = engine.rigid_bodies[body_name]\n else:\n raise RuntimeError('set_position() no such rigid body exist with that name')\n\n if use_model_frame:\n\n r_bf2wcs = body.r\n q_bf2wcs = body.q\n\n r_bf2mf = body.shape.r\n q_bf2mf = body.shape.q\n # |q r| |x|\n # By definition we have the rigid body transformations T x = q*x + r = |0 q| |1|\n #\n # T_bf2wcs = T_mf2wcs T_bf2mf\n #\n # |q_bf2wcs r_bf2wcs| |q_mf2wcs r_mf2wcs| |q_bf2mf r_bf2mf |\n # |0 1 | = |0 1 | |0 1 |\n #\n # So\n #\n # q_bf2wcs = q_mf2wcs q_bf2mf\n # r_bf2wcs = q_mf2wcs r_bf2mf + r_mf2wcs\n #\n # From this we can solve for the current model to world coordinate transformation\n #\n q_mf2wcs = Q.prod(q_bf2wcs, Q.conjugate(q_bf2mf))\n r_mf2wcs = r_bf2wcs - Q.rotate(q_mf2wcs, r_bf2mf)\n #\n # Now we wish to change the orientation of the model frame origin wrt the world frame\n #\n # so we must now have\n #\n # q_mf2wcs = [qs, qx, qy, qz]\n #\n q_mf2wcs = Q.unit(q)\n #\n # Change the orientation of the model frame means that both the orientation and\n # position of the body frame will change wrt. the world coordinate system.\n q_bf2wcs = Q.prod(q_mf2wcs, q_bf2mf)\n r_bf2wcs = Q.rotate(q_mf2wcs, r_bf2mf) + r_mf2wcs\n\n body.q = q_bf2wcs\n body.r = r_bf2wcs\n else:\n body.q = Q.unit(q)\n\n\ndef set_velocity(engine, body_name, v):\n if body_name in engine.rigid_bodies:\n body = engine.rigid_bodies[body_name]\n else:\n raise RuntimeError('set_velocity() no such rigid body exist with that name')\n body.v = v\n\n\ndef set_spin(engine, body_name, w):\n if body_name in engine.rigid_bodies:\n body = engine.rigid_bodies[body_name]\n else:\n raise RuntimeError('set_spin() no such rigid body exist with that name')\n body.w = w\n\n\ndef set_mass_properties_from_shape(engine, body_name, density):\n if body_name in engine.rigid_bodies:\n body = engine.rigid_bodies[body_name]\n else:\n raise RuntimeError('set_mass_properties_from_shape() no such rigid body exist with that name')\n if body.shape is None:\n raise RuntimeError('set_mass_properties_from_shape() rigid body did not have a shape')\n body.mass = body.shape.mass*density\n body.inertia = body.shape.inertia*density\n\n\ndef set_mass(engine, body_name, mass):\n if body_name in engine.rigid_bodies:\n body = engine.rigid_bodies[body_name]\n else:\n raise RuntimeError('set_mass() no such rigid body exist with that name')\n if mass <= 0.0:\n raise RuntimeError('set_mass() illegal mass value')\n body.mass = mass\n\n\ndef set_inertia(engine, body_name, inertia):\n if body_name in engine.rigid_bodies:\n body = engine.rigid_bodies[body_name]\n else:\n raise RuntimeError('set_inertia() no such rigid body exist with that name')\n\n if inertia[0] <= 0.0:\n raise RuntimeError('set_inertia() Illegal Ixx value')\n\n if inertia[1] <= 0.0:\n raise RuntimeError('set_inertia() Illegal Iyy value')\n\n if inertia[2] <= 0.0:\n raise RuntimeError('set_inertia() Illegal Izz value')\n\n body.inertia = inertia\n\n\ndef set_visual_material(engine, body_name, visual_material_name):\n if body_name in engine.rigid_bodies:\n body = engine.rigid_bodies[body_name]\n else:\n raise RuntimeError('set_visual_material() no such rigid body exist with that name')\n\n if visual_material_name is None:\n raise RuntimeError('set_visual_material() must give a visual material name')\n\n body.visual_material = visual_material_name\n\n\ndef create_gravity_force(engine, force_name, g, up):\n if force_name in engine.forces:\n raise RuntimeError('create_gravity(): Force already exist with that name')\n if g <= 0.0:\n raise RuntimeError('create_gravity(): Illegal value for gravitational acceleration')\n\n gravity = Gravity(force_name)\n gravity.up = V3.unit(up)\n gravity.g = g\n\n engine.forces[force_name] = gravity\n\n\ndef create_damping_force(engine, force_name, alpha, beta):\n if force_name in engine.forces:\n raise RuntimeError('create_damping(): Force already exist with that name')\n if alpha <= 0:\n raise RuntimeError('create_damping(): Illegal value for alpha')\n if beta <= 0:\n raise RuntimeError('create_damping(): Illegal value for beta')\n\n damping = Damping(force_name)\n damping.alpha = alpha\n damping.beta = beta\n\n engine.forces[force_name] = damping\n\n\ndef connect_force(engine, body_name, force_name):\n if body_name in engine.rigid_bodies:\n body = engine.rigid_bodies[body_name]\n else:\n raise RuntimeError('connect_force() no such rigid body exist with that name')\n\n if force_name in engine.forces:\n force = engine.forces[force_name]\n else:\n raise RuntimeError('connect_force() no such force exist with that name')\n\n if force in body.forces:\n raise RuntimeError('connect_force() force was already connected to body')\n\n body.forces.append(force)\n\n\ndef set_body_type(engine, body_name, body_type):\n if body_name not in engine.rigid_bodies:\n raise RuntimeError('set_body_type() no such rigid body exist with that name')\n\n body = engine.rigid_bodies[body_name]\n\n body.is_free = False\n body.is_fixed = False\n body.is_scripted = False\n\n if body_type in ['free', 'Free', 'FREE']:\n body.is_free = True\n elif body_type in ['fixed', 'Fixed', 'FIXED']:\n body.is_fixed = True\n elif body_type in ['scripted', 'Scripted', 'SCRIPTED']:\n body.is_scripted = True\n else:\n raise RuntimeError('set_body_type(): Unsupported body type found')\n\n\ndef is_fixed_body(engine, body_name):\n if body_name not in engine.rigid_bodies:\n raise RuntimeError('is_fixed_body() no such rigid body exist with that name')\n body = engine.rigid_bodies[body_name]\n return body.is_fixed\n\n\ndef is_free_body(engine, body_name):\n if body_name not in engine.rigid_bodies:\n raise RuntimeError('is_free_body() no such rigid body exist with that name')\n body = engine.rigid_bodies[body_name]\n return body.is_free\n\n\ndef is_scripted_body(engine, body_name):\n if body_name not in engine.rigid_bodies:\n raise RuntimeError('is_scripted_body() no such rigid body exist with that name')\n body = engine.rigid_bodies[body_name]\n return body.is_scripted\n\n\ndef set_body_active(engine, body_name, value):\n if body_name not in engine.rigid_bodies:\n raise RuntimeError('set_body_type() no such rigid body exist with that name')\n\n body = engine.rigid_bodies[body_name]\n body.is_active = value\n\n\ndef is_active_body(engine, body_name):\n if body_name not in engine.rigid_bodies:\n raise RuntimeError('is_active_body() no such rigid body exist with that name')\n body = engine.rigid_bodies[body_name]\n return body.is_active\n\n\ndef set_body_material(engine, body_name, material_name):\n if body_name not in engine.rigid_bodies:\n raise RuntimeError('set_body_material() no such rigid body exist with that name')\n\n body = engine.rigid_bodies[body_name]\n\n if not engine.material_library.exist_material(material_name):\n raise RuntimeError('set_body_material() no such material exist')\n\n body.material = material_name\n\n\ndef create_material_behavior(engine, A, B, epsilon, mu):\n if engine.material_library.exist_behaviour(A, B):\n raise RuntimeError('create_material_behavior() behaviour already exist')\n\n if epsilon < 0.0:\n raise RuntimeError('create_material_behavior() illegal epsilon value')\n\n if mu[0] < 0.0:\n raise RuntimeError('create_material_behavior() illegal mu_x value')\n\n if mu[1] < 0.0:\n raise RuntimeError('create_material_behavior() illegal mu_y value')\n\n if mu[2] < 0.0:\n raise RuntimeError('create_material_behavior() illegal mu_z value')\n\n tmp = [A, B]\n tmp.sort()\n key = tuple(tmp)\n\n behaviour = MaterialBehaviour()\n behaviour.epsilon = epsilon\n behaviour.mu = mu\n engine.material_library.storage[key] = behaviour\n\n\ndef set_finite_update(engine, body_name, value):\n if body_name not in engine.rigid_bodies:\n raise RuntimeError('set_finite_update_rotation_axis() no such rigid body exist with that name')\n body = engine.rigid_bodies[body_name]\n body.use_finite_update = value\n\n\ndef set_finite_update_rotation_axis(engine, body_name, axis):\n if body_name not in engine.rigid_bodies:\n raise RuntimeError('set_finite_update_rotation_axis() no such rigid body exist with that name')\n body = engine.rigid_bodies[body_name]\n body.finite_update_rotation_axis = V3.unit(axis)\n\n\ndef create_keyframe(engine, body_name, time, r, q):\n if body_name not in engine.rigid_bodies:\n raise RuntimeError('create_keyframe() no such rigid body exist with that name')\n\n body = engine.rigid_bodies[body_name]\n\n if not body.is_scripted:\n raise RuntimeError('create_keyframe() body was not scripted')\n\n if body.scripted_motion is None:\n body.scripted_motion = KeyframeMotion()\n\n body.scripted_motion.create_keyframe(time, r, Q.unit(q))\n\n body.scripted_motion.keyframes.sort()\n\n body.r = body.scripted_motion.keyframes[0][1]\n body.q = body.scripted_motion.keyframes[0][2]\n\n\ndef generate_unique_name(engine, name):\n import datetime\n import random\n n = random.random()\n unique_name = name + '_' + str(n) + '_' + str(datetime.datetime.now())\n return unique_name\n\n\ndef compute_shape_extends(engine, shape_name, use_model_frame=True):\n if shape_name in engine.shapes:\n shape = engine.shapes[shape_name]\n else:\n raise RuntimeError('get_shape_extends() no such shape exist with that name')\n if use_model_frame:\n model_mesh = MESH.join([shape.mesh])\n MESH.rotate(model_mesh, shape.q)\n MESH.translate(model_mesh, shape.r)\n (l, u) = MESH.aabb(model_mesh)\n else:\n (l, u) = MESH.aabb(shape.mesh)\n return u-l\n\n\ndef rotate_shape(engine, shape_name, q):\n if shape_name in engine.shapes:\n shape = engine.shapes[shape_name]\n else:\n raise RuntimeError('resize_shape() no such shape exist with that name')\n # Make sure mesh are in model frame\n MESH.rotate(shape.mesh, shape.q)\n MESH.translate(shape.mesh, shape.r)\n # Rotate mesh in its model frame\n MESH.rotate(shape.mesh, q)\n # Compute new body frame mesh\n transform_shape_into_body_frame(shape)\n\n\ndef scale_shape(engine, shape_name, sx, sy, sz):\n if shape_name in engine.shapes:\n shape = engine.shapes[shape_name]\n else:\n raise RuntimeError('resize_shape() no such shape exist with that name')\n # Make sure mesh are in model frame\n MESH.rotate(shape.mesh, shape.q)\n MESH.translate(shape.mesh, shape.r)\n # Compute the scaling in model frame\n MESH.scale(shape.mesh, sx, sy, sz)\n # Compute new body frame mesh\n transform_shape_into_body_frame(shape)\n\n\ndef center_shape(engine, shape_name):\n if shape_name in engine.shapes:\n shape = engine.shapes[shape_name]\n else:\n raise RuntimeError('center_shape() no such shape exist with that name')\n # Make sure mesh are in model frame\n MESH.rotate(shape.mesh, shape.q)\n MESH.translate(shape.mesh, shape.r)\n # Compute the new center in model frame\n (l, u) = MESH.aabb(shape.mesh)\n center = (l+u)/2.0\n MESH.translate(shape.mesh, -center)\n # Compute new body frame mesh\n transform_shape_into_body_frame(shape)\n\n\ndef resize_shape_to_unit_box(engine, shape_name, keep_model_center=False):\n if shape_name in engine.shapes:\n shape = engine.shapes[shape_name]\n else:\n raise RuntimeError('resize_shape() no such shape exist with that name')\n # Make sure mesh are in model frame\n MESH.rotate(shape.mesh, shape.q)\n MESH.translate(shape.mesh, shape.r)\n # Compute the scaling in model frame\n (l, u) = MESH.aabb(shape.mesh)\n center = (l+u)/2.0\n MESH.translate(shape.mesh, -center)\n s = 1.0 / np.max(u-l)\n MESH.scale(shape.mesh, s, s, s)\n if keep_model_center:\n MESH.translate(shape.mesh, center)\n # Compute new body frame mesh\n transform_shape_into_body_frame(shape)\n\n\ndef create_ball_joint(engine, joint_name, socket_body_name, plug_body_name):\n if joint_name in engine.joints:\n raise RuntimeError('create_ball_joint() joint already exist with that name')\n\n if socket_body_name not in engine.rigid_bodies:\n raise RuntimeError('create_ball_joint() no such rigid body exist with name ' + socket_body_name)\n\n socket_body = engine.rigid_bodies[socket_body_name]\n\n if plug_body_name not in engine.rigid_bodies:\n raise RuntimeError('create_ball_joint() no such rigid body exist with name ' + plug_body_name)\n\n plug_body = engine.rigid_bodies[plug_body_name]\n\n engine.joints[joint_name] = BallJoint(joint_name, socket_body, plug_body)\n\n\ndef set_socket_connector(engine, joint_name, r, q):\n if joint_name not in engine.joints:\n raise RuntimeError('set_socket_connector() No joint with that name')\n\n joint = engine.joints[joint_name]\n # TODO Need to carefully design control over rigging frame\n joint.socket.transform.r = r\n joint.socket.transform.q = q\n\n\ndef set_plug_connector(engine, joint_name, r, q):\n if joint_name not in engine.joints:\n raise RuntimeError('set_plug_connector() No joint with that name')\n\n joint = engine.joints[joint_name]\n # TODO Need to carefully design control over rigging frame\n joint.plug.transform.r = r\n joint.plug.transform.q = q\n\n\ndef set_joint_error_reduction(engine, joint_name, error_reduction):\n if joint_name not in engine.joints:\n raise RuntimeError('set_joint_parameters() No joint with that name')\n\n joint = engine.joints[joint_name]\n\n if error_reduction < 0.0:\n raise RuntimeError('set_joint_parameters() Illegal error_reduction value')\n\n if error_reduction > 1.0:\n raise RuntimeError('set_joint_parameters() Illegal error_reduction value')\n\n joint.error_reduction = error_reduction\n\n\n\n\n\n","sub_path":"REESSimulation/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":21259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"569258044","text":"\"\"\"\n=================================================================================================================\nЗадание-14:\nНапишите функцию, округляющую полученное произвольное десятичное число\nдо кол-ва знаков (кол-во знаков передается вторым аргументом)\nОкругление должно происходить по математическим правилам (0.6 --> 1, 0.4 --> 0).\nДля решения задачи не используйте встроенные функции и функции из модуля math\n=================================================================================================================\n\"\"\"\n\n# 1st variant:\n\n\ndef my_round(num, ndigits):\n \"\"\"\"\n Округляет произвольное десятичное число\n \"\"\"\n st = 10 ** (ndigits + 1)\n num = str(int(num * st))\n if num[-1] > '4':\n num = int(num[:-1]) + 1\n else:\n num = int(num[:-1])\n st = st / 10\n num = num / st\n print(\"Дробная часть числа number после округления равна: \", num)\n\nnum = 2.9994567\nndigits = 2\nf = my_round(num, ndigits)\n\n\n# 2d variant:\n\ndef my_round(num, ndigits):\n \"\"\"\"\n Округляет произвольное десятичное число\n \"\"\"\n f = '{:.{}}'.format(num, ndigits)\n print(f)\n\nnum = 2.9994567\nndigits = 4\nf = my_round(num, ndigits)\n","sub_path":"Python: level 1 (typical exs.)/Exercise_14.py","file_name":"Exercise_14.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"394948893","text":"class Solution(object):\n def romanToInt(self, s):\n #create a dict for the roman numerals\n roman = {\"I\" : 1, \"V\": 5, \"X\" : 10, \"L\" : 50, \"C\" : 100, \"D\" : 500, \"M\" : 1000}\n #store the int representation\n num = 0\n #the last digit is always added so ignore it for now, and add it to the total after\n for i in range(len(s) - 1):\n #s[i] is being used as the key for the dict and compares the int representations of the next element\n #since if the next element is larger, you need to subtract it ex. IV = -1 + 5\n if roman[s[i]] < roman[s[i + 1]]:\n num -= roman[s[i]]\n #otherwise add it\n else:\n num += roman[s[i]]\n #add the last element\n num += roman[s[-1]]\n return(num)\n print(romanToInt(0, input()))","sub_path":"romanNum.py","file_name":"romanNum.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"58795454","text":"from django.db import models\nfrom django.utils import timezone\n\nclass Score(models.Model):\n\tjudge = models.ForeignKey('auth.User')\n\tcontestant = models.CharField(max_length=50)\n\ttotal = models.IntegerField()\n\tcreated_date = models.DateTimeField(\n\t\t\t\t\tdefault=timezone.now)\n\n\t\n\n\tdef __str__(self):\n\t\treturn str(self.total)\n","sub_path":"scoreboard/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"16359808","text":"#! /usr/bin/python3\n\nimport gi\ngi.require_version('Gtk','3.0')\nfrom gi.repository import Gtk\nimport keygen\n\n\nclass Window(Gtk.Window):\n def __init__(self):\n Gtk.Window.__init__(self,title=\"Keygen.sh\")\n \n self.set_border_width(10)\n \n headerbar=Gtk.HeaderBar()\n headerbar.set_show_close_button(True)\n headerbar.props.title=\"Keygen\"\n self.set_titlebar(headerbar)\n \n grid=Gtk.Grid()\n self.add(grid)\n\n text=Gtk.Entry()\n text.set_text(\"nothing\")\n grid.attach(text,1,0,1,1)\n \n button1=Gtk.Button(label=\"Generate Key\")\n button1.connect(\"clicked\",self.key,text)\n grid.attach(button1,1,1,1,1)\n \n def key(self,widget,text):\n result=keygen.keygen()\n text.set_text(str(result))\n\nwin=Window()\nwin.connect(\"delete-event\",Gtk.main_quit)\nwin.show_all()\nGtk.main()\n","sub_path":"keygen-ui.py","file_name":"keygen-ui.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"264524730","text":"\"\"\"\nMixture distributions\n\nThis module contains both the base Mixture Class as well as the Location-Scale Family\nMixture Subclass.\n\"\"\"\nfrom dataclasses import dataclass, field\nfrom functools import partial\nimport itertools\nfrom typing import List, Optional, Sequence, Type, TypeVar\n\nfrom jax import grad, jit, nn, scipy\nimport jax.numpy as np\nimport numpy as onp\nimport scipy as oscipy\n\nfrom ergo.utils import minimize\n\nfrom .base import categorical\nfrom .conditions import Condition\nfrom .distribution import Distribution\nfrom .location_scale_family import LSDistribution\nfrom .scale import Scale\n\nM = TypeVar(\"M\", bound=\"Mixture\")\n\n\n@dataclass\nclass Mixture(Distribution):\n components: Sequence[Distribution]\n probs: List[float]\n\n def __mul__(self, x):\n return self.__class__(\n [component * x for component in self.components], self.probs\n )\n\n def rv(self,):\n raise NotImplementedError(\"No access to mixture rv at this time\")\n\n def cdf(self, x):\n return np.sum([c.cdf(x) * p for c, p in zip(self.components, self.probs)])\n\n def ppf(self, q):\n \"\"\"\n Percent point function (inverse of cdf) at q.\n\n Returns the smallest x where the mixture_cdf(x) is greater\n than the requested q provided:\n\n argmin{x} where mixture_cdf(x) > q\n\n The quantile of a mixture distribution can always be found\n within the range of its components quantiles:\n https://cran.r-project.org/web/packages/mistr/vignettes/mistr-introduction.pdf\n \"\"\"\n if len(self.components) == 1:\n return self.components[0].ppf(q)\n ppfs = [c.ppf(q) for c in self.components]\n cmin = np.min(ppfs)\n cmax = np.max(ppfs)\n try:\n return oscipy.optimize.bisect(\n lambda x: self.cdf(x) - q,\n cmin - abs(cmin / 100),\n cmax + abs(cmax / 100),\n maxiter=1000,\n )\n except ValueError:\n return (cmax + cmin) / 2\n\n def sample(self):\n i = categorical(np.array(self.probs))\n component_dist = self.components[i]\n return component_dist.sample()\n\n @staticmethod\n def initialize_params(num_components):\n raise NotImplementedError(\"This should be implemented by a subclass\")\n\n def to_params(self):\n # This is like destructure, but from_params transforms probs using softmax,\n # so x != from_params(to_params(x))\n raise NotImplementedError(\"This should be implemented by a subclass\")\n\n def denormalize(self, scale_min, scale_max):\n \"\"\"\n Assume that the distribution has been normalized to be over [0,1].\n Return the distribution on the true scale of [scale_min, scale_max]\n\n :param scale_min: the true-scale minimum of the range\n :param scale_max: the true-scale maximum of the range\n \"\"\"\n raise NotImplementedError(\"This should be implemented by a subclass\")\n\n def normalize(self, scale_min, scale_max):\n \"\"\"\n Assume that the distribution's true range is [scale_min, scale_max].\n Return the normalized condition.\n\n :param scale_min: the true-scale minimum of the range\n :param scale_max: the true-scale maximum of the range\n :return: the condition normalized to [0,1]\n \"\"\"\n raise NotImplementedError(\"This should be implemented by a subclass\")\n\n @classmethod\n def from_samples(cls, data, num_components=3, verbose=False) -> M:\n data = np.array(data)\n scale = Scale(scale_min=min(data), scale_max=max(data))\n normalized_data = np.array([scale.normalize_point(datum) for datum in data])\n\n # FIXME (#219): This is pretty inefficient\n\n @jit\n def loss(params):\n dist = cls.from_params(params)\n normed_params = dist.to_params()\n return -cls.params_logpdf(normed_params, normalized_data)\n\n @jit\n def jac(params):\n dist = cls.from_params(params)\n normed_params = dist.to_params()\n return -cls.params_gradlogpdf(normed_params, normalized_data)\n\n normalized_mixture: M = cls.from_loss(loss, jac, num_components, verbose)\n return normalized_mixture.denormalize(scale.scale_min, scale.scale_max)\n\n @classmethod\n def from_params(cls, params):\n raise NotImplementedError(\"This should be implemented by a subclass\")\n\n @classmethod\n def from_conditions(\n cls,\n conditions: Sequence[Condition],\n num_components: Optional[int] = None,\n verbose=False,\n scale_min=0,\n scale_max=1,\n init_tries=100,\n opt_tries=10,\n ) -> M:\n \"\"\"\n Fit a mixture distribution from Conditions\n\n :param conditions: conditions to fit\n :param num_components: number of components to include in the mixture.\n :param init_tries:\n :param opt_tries:\n :param verbose:\n :param scale_min: the true-scale minimum of the range to fit over.\n :param scale_max: the true-scale maximum of the range to fit over.\n :return: the fitted mixture\n \"\"\"\n normalized_conditions = [\n condition.normalize(scale_min, scale_max) for condition in conditions\n ]\n\n cond_data = [condition.destructure() for condition in normalized_conditions]\n if cond_data:\n cond_classes, cond_params = zip(*cond_data)\n else:\n cond_classes, cond_params = [], []\n\n loss = lambda params: static_loss( # noqa: E731\n cls, params, cond_classes, cond_params\n )\n jac = lambda params: static_loss_grad( # noqa: E731\n cls, params, cond_classes, cond_params\n )\n\n normalized_mixture: M = cls.from_loss(\n loss=loss,\n jac=jac,\n num_components=num_components,\n verbose=verbose,\n init_tries=init_tries,\n opt_tries=opt_tries,\n )\n return normalized_mixture.denormalize(scale_min, scale_max)\n\n @classmethod\n def from_loss(\n cls,\n loss,\n jac,\n num_components: Optional[int] = None,\n verbose=False,\n init_tries=100,\n opt_tries=10,\n ) -> M:\n onp.random.seed(0)\n\n init = lambda: cls.initialize_params(num_components) # noqa: E731\n\n fit_results = minimize(\n loss,\n init=init,\n jac=jac,\n init_tries=init_tries,\n opt_tries=opt_tries,\n verbose=verbose,\n )\n if not fit_results.success and verbose:\n print(fit_results)\n final_params = fit_results.x\n\n return cls.from_params(final_params)\n\n def logpdf(self, data):\n return self.params_logpdf(self.to_params(), data)\n\n def logpdf1(self, datum):\n return self.params_logpdf1(self.to_params(), datum)\n\n def pdf1(self, datum):\n # Not calling logpdf1 because we only want to call\n # to_params once even if we call this with a vector\n return np.exp(self.params_logpdf1(self.to_params(), datum))\n\n @staticmethod\n def params_cdf(params, x):\n raise NotImplementedError\n\n @staticmethod\n def params_ppf(params, p):\n raise NotImplementedError\n\n\n@dataclass\nclass LSMixture(Mixture):\n components: Sequence[LSDistribution]\n probs: List[float]\n component_type: Type[LSDistribution] = field(repr=False)\n\n @staticmethod\n def initialize_params(num_components, scale_multiplier=0.2):\n \"\"\"\n Each component has (location, scale, weight).\n The shape of the components matrix is (num_components, 3).\n Weights sum to 1 (are given in log space).\n We use original numpy to initialize parameters since we don't\n want to track randomness.\n \"\"\"\n locs = onp.random.rand(num_components)\n scales = onp.random.rand(num_components) * scale_multiplier\n weights = onp.full(num_components, -num_components)\n components = onp.stack([locs, scales, weights]).transpose()\n return components.reshape(-1)\n\n @classmethod\n def from_params(cls, params):\n structured_params = params.reshape((-1, 3))\n unnormalized_weights = structured_params[:, 2]\n probs = list(nn.softmax(unnormalized_weights))\n component_dists = [cls.component_type(p[0], p[1]) for p in structured_params]\n return cls(component_dists, probs)\n\n def to_params(self):\n nested_params = [\n [c.loc, c.scale, weight] for c, weight in zip(self.components, self.probs)\n ]\n return np.array(list(itertools.chain.from_iterable(nested_params)))\n\n def normalize(self, scale_min: float, scale_max: float):\n normalized_components = [\n component.normalize(scale_min, scale_max) for component in self.components\n ]\n return self.__class__(normalized_components, self.probs, self.component_type)\n\n def denormalize(self, scale_min: float, scale_max: float):\n denormalized_components = [\n component.denormalize(scale_min, scale_max) for component in self.components\n ]\n return self.__class__(denormalized_components, self.probs, self.component_type)\n\n\n@partial(jit, static_argnums=(0, 2))\ndef static_loss(dist_class, dist_params, cond_classes, cond_params):\n print(\n f\"Tracing {dist_class.__name__} loss for {[c.__name__ for c in cond_classes]}\"\n )\n dist = dist_class.from_params(dist_params)\n total_loss = 0.0\n for (cond_class, cond_param) in zip(cond_classes, cond_params):\n condition = cond_class.structure(cond_param)\n total_loss += condition.loss(dist)\n return total_loss * 100\n\n\nstatic_loss_grad = jit(grad(static_loss, argnums=1), static_argnums=(0, 2))\n","sub_path":"ergo/distributions/mixture.py","file_name":"mixture.py","file_ext":"py","file_size_in_byte":9804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"129513197","text":"import logging\nfrom multiprocessing import Condition\n\nfrom schieber.dealer import Dealer\nfrom schieber.rules.stich_rules import stich_rules, card_allowed\nfrom schieber.rules.trumpf_rules import trumpf_allowed\nfrom schieber.rules.count_rules import count_stich, counting_factor\nfrom schieber.stich import PlayedCard, stich_dict, played_card_dict\nfrom schieber.trumpf import Trumpf\n\nlogger = logging.getLogger(__name__)\n\n\nclass Game:\n def __init__(self, teams=None, point_limit=1500, use_counting_factor=False, seed=None):\n self.teams = teams\n self.point_limit = point_limit\n self.players = [teams[0].players[0], teams[1].players[0], teams[0].players[1], teams[1].players[1]]\n self.dealer = Dealer(players=self.players)\n self.geschoben = False\n self.trumpf = None\n self.stiche = []\n self.cards_on_table = []\n self.use_counting_factor = use_counting_factor\n self.seed = seed\n self.endless_play_control = Condition() # used to control the termination of the play_endless method\n self.stop_playing = False # has to be set to true in order to stop the endless play\n\n def play_endless(self, start_player_index=0, whole_rounds=True):\n \"\"\"\n Plays one game after the other with no end. This can be used for training a RL Player. Like this we can reuse\n one game. When we are training with tournaments, each time we play a game, it is added to the list of games.\n This could result in very high unneeded memory usage.\n :param start_player_index:\n :param whole_rounds:\n :return:\n \"\"\"\n while True:\n logger.debug(\"start playing game\")\n self.play(start_player_index, whole_rounds)\n logger.debug(\"game finished\")\n try:\n self.endless_play_control.acquire()\n # timeout in case something goes wrong in the reset, or reset is not called for any reason.\n # In the normal case we just want to continue playing\n received = self.endless_play_control.wait(0.01)\n if received:\n logger.debug(\"endless play received control message\")\n else:\n logger.debug(\n \"endless play did not receive control message! Timeout occurred. Endless play resuming.\")\n if self.stop_playing:\n logger.debug(\"stopping endless play\")\n break\n finally:\n self.endless_play_control.release()\n logger.debug(\"reset game\")\n self.reset()\n\n def reset(self):\n \"\"\"\n Resets the game so that a new game can be started. Used in the endless mode\n :return:\n \"\"\"\n self.reset_points()\n self.stiche = []\n\n def play(self, start_player_index=0, whole_rounds=False):\n \"\"\"\n Plays a game from the start to the end in the following manner:\n 1. The dealer shuffles the cards\n 2. The dealer deals 9 cards to each player\n 3. The player on the right side of the dealer chooses the trumpf. If he/she chooses 'geschoben' his/her partner\n can choose the trumpf.\n 4. For 9 rounds/stichs let the players play their cards.\n 5. After each stich count the points, update the starting player based on who won the stich and add the cards\n played in the stich to the already played stichs.\n 6. Check if a team has reached the point limit\n :param start_player_index:\n :param whole_rounds:\n :return:\n \"\"\"\n if self.seed is not None:\n # Increment seed by one so that each game is different.\n # But still the sequence of games is the same each time\n self.seed += 1\n self.dealer.shuffle_cards(self.seed)\n self.dealer.deal_cards()\n self.define_trumpf(start_player_index=start_player_index)\n logger.info('Chosen Trumpf: {0} \\n'.format(self.trumpf.name))\n for i in range(9):\n stich = self.play_stich(start_player_index)\n self.count_points(stich, last=(i == 8))\n logger.info('\\nStich: {0} \\n'.format(stich.player))\n logger.info('{}{}\\n'.format('-' * 180, self.trumpf))\n start_player_index = self.players.index(stich.player)\n self.stiche.append(stich)\n self.stich_over_information()\n if (self.teams[0].won(self.point_limit) or self.teams[1].won(self.point_limit)) and not whole_rounds:\n return True\n return False\n\n def define_trumpf(self, start_player_index):\n \"\"\"\n Sets the trumpf based on the choice of the player assigned to choose the trumpf\n :param start_player_index: The player which is on the right side of the dealer\n :return:\n \"\"\"\n is_allowed_trumpf = False\n generator = self.players[start_player_index].choose_trumpf(geschoben=self.geschoben)\n chosen_trumpf = next(generator)\n if chosen_trumpf == Trumpf.SCHIEBEN:\n self.geschoben = True\n generator = self.players[(start_player_index + 2) % 4].choose_trumpf(geschoben=self.geschoben)\n chosen_trumpf = next(generator)\n while not is_allowed_trumpf:\n is_allowed_trumpf = trumpf_allowed(chosen_trumpf=chosen_trumpf, geschoben=self.geschoben)\n trumpf = generator.send(is_allowed_trumpf)\n chosen_trumpf = chosen_trumpf if trumpf is None else trumpf\n self.trumpf = chosen_trumpf\n return self.trumpf\n\n def play_stich(self, start_player_index):\n \"\"\"\n Plays one entire stich\n :param start_player_index: the index of the player who won the last stich or was assigned to choose the trumpf\n :return: the stich containing the played cards and the winner\n \"\"\"\n self.cards_on_table = []\n first_card = self.play_card(table_cards=self.cards_on_table, player=self.players[start_player_index])\n self.move_made(self.players[start_player_index].id, first_card)\n self.cards_on_table = [PlayedCard(player=self.players[start_player_index], card=first_card)]\n for i in get_player_index(start_index=start_player_index):\n current_player = self.players[i]\n card = self.play_card(table_cards=self.cards_on_table, player=current_player)\n self.move_made(current_player.id, card)\n self.cards_on_table.append(PlayedCard(player=current_player, card=card))\n stich = stich_rules[self.trumpf](played_cards=self.cards_on_table)\n return stich\n\n def play_card(self, table_cards, player):\n \"\"\"\n Checks if the card played by the player is allowed. If yes removes the card from the players hand.\n :param table_cards:\n :param player:\n :return: the card chosen by the player\n \"\"\"\n cards = [played_card.card for played_card in table_cards]\n is_allowed_card = False\n generator = player.choose_card(state=self.get_status())\n chosen_card = next(generator)\n while not is_allowed_card:\n is_allowed_card = card_allowed(table_cards=cards, chosen_card=chosen_card, hand_cards=player.cards,\n trumpf=self.trumpf)\n card = generator.send(is_allowed_card)\n chosen_card = chosen_card if card is None else card\n else:\n logger.info('Table: {0}:{1}'.format(player, chosen_card))\n player.cards.remove(chosen_card)\n return chosen_card\n\n def move_made(self, player_id, card):\n for player in self.players:\n player.move_made(player_id, card, self.get_status())\n\n def stich_over_information(self):\n [player.stich_over(state=self.get_status()) for player in self.players]\n\n def count_points(self, stich, last):\n \"\"\"\n Gets the team of the winner of the stich and counts the points.\n :param stich:\n :param last: True if it is the last stich of the Game, False otherwise\n :return:\n \"\"\"\n stich_player_index = self.players.index(stich.player)\n cards = [played_card.card for played_card in stich.played_cards]\n self.add_points(team_index=(stich_player_index % 2), cards=cards, last=last)\n\n def add_points(self, team_index, cards, last):\n \"\"\"\n Adds the points of the cards to the score of the team who won the stich.\n :param team_index:\n :param cards:\n :param last:\n :return:\n \"\"\"\n points = count_stich(cards, self.trumpf, last=last)\n points = points * counting_factor[self.trumpf] if self.use_counting_factor else points\n self.teams[team_index].points += points\n\n def get_status(self):\n \"\"\"\n Returns the status of the game in a dictionary containing\n - the stiche\n - the trumpf\n - if it has been geschoben\n - the point limit\n - the cards currently on the table\n - the teams\n :return:\n \"\"\"\n return dict(\n stiche=[stich_dict(stich) for stich in self.stiche],\n trumpf=self.trumpf.name,\n geschoben=self.geschoben,\n point_limit=self.point_limit,\n table=[played_card_dict(played_card) for played_card in self.cards_on_table],\n teams=[dict(points=team.points) for team in self.teams]\n )\n\n def reset_points(self):\n \"\"\"\n Resets the points of the teams to 0. This is used when single games are played.\n :return:\n \"\"\"\n [team.reset_points() for team in self.teams]\n\n\ndef get_player_index(start_index):\n for i in range(1, 4):\n yield (i + start_index) % 4\n","sub_path":"schieber/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":9773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"548153461","text":"import asyncio\n\nimport logging\nfrom asyncio.futures import CancelledError\n\nimport pytest\n\nimport lightbus\nimport lightbus.path\nfrom lightbus.exceptions import LightbusTimeout\n\n\npytestmark = pytest.mark.reliability\n\n\n@pytest.mark.asyncio\nasync def test_timeouts(bus: lightbus.path.BusPath, caplog, dummy_api, loop):\n caplog.set_level(logging.WARNING)\n loop.slow_callback_duration = 0.01\n results = []\n\n async def do_single_call(n):\n nonlocal results\n try:\n result = await bus.my.dummy.random_death.call_async(n=n, death_probability=0.5)\n results.append(result)\n except LightbusTimeout:\n results.append(None)\n\n async def co_call_rpc():\n await asyncio.sleep(0.1)\n fut = asyncio.gather(*[do_single_call(n) for n in range(0, 100)])\n await fut\n return fut.result()\n\n async def co_consume_rpcs():\n return await bus.client.consume_rpcs(apis=[dummy_api])\n\n (call_task,), (consume_task,) = await asyncio.wait(\n [co_call_rpc(), co_consume_rpcs()], return_when=asyncio.FIRST_COMPLETED\n )\n call_task.result()\n consume_task.cancel()\n try:\n await consume_task\n consume_task.result()\n except CancelledError:\n pass\n\n total_successful = len([r for r in results if r is not None])\n total_timeouts = len([r for r in results if r is None])\n assert len(results) == 100\n assert total_successful > 0\n assert total_timeouts > 0\n","sub_path":"tests/redis_transports/test_reliability_redis_rpc.py","file_name":"test_reliability_redis_rpc.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"387844970","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass EnvironmentVariable(Model):\n \"\"\"Describes an environment variable for the container.\n\n :param type: The type of the environment variable being given in value.\n Possible values include: 'ClearText', 'KeyVaultReference',\n 'SecretValueReference'. Default value: \"ClearText\" .\n :type type: str or ~azure.servicefabric.models.EnvironmentVariableType\n :param name: The name of the environment variable.\n :type name: str\n :param value: The value of the environment variable, will be processed\n based on the type provided.\n :type value: str\n \"\"\"\n\n _attribute_map = {\n 'type': {'key': 'type', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'value': {'key': 'value', 'type': 'str'},\n }\n\n def __init__(self, *, type=\"ClearText\", name: str=None, value: str=None, **kwargs) -> None:\n super(EnvironmentVariable, self).__init__(**kwargs)\n self.type = type\n self.name = name\n self.value = value\n","sub_path":"customSDK/servicefabric/models/environment_variable_py3.py","file_name":"environment_variable_py3.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"379502456","text":"\n\nfrom xai.brain.wordbase.verbs._clamour import _CLAMOUR\n\n#calss header\nclass _CLAMOURED(_CLAMOUR, ):\n\tdef __init__(self,): \n\t\t_CLAMOUR.__init__(self)\n\t\tself.name = \"CLAMOURED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"clamour\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_clamoured.py","file_name":"_clamoured.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"552251094","text":"#! /usr/bin/python3\r\nprint(\"GPU Loaded\")\r\nfrom Component import *\r\nimport os\r\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = \"hide\"\r\nimport pygame\r\nGPU = Component()\r\n\r\ndef GPU_init():\r\n pygame.init()\r\n pygame.display.set_caption(\"Emulator\")\r\n GPU.WIDTH, GPU.HEIGHT = 1020, 765\r\n GPU.DIM = (GPU.WIDTH, GPU.HEIGHT)\r\n GPU.Screen = pygame.display.set_mode(GPU.DIM)\r\n GPU.Surface = pygame.Surface(GPU.DIM)\r\n GPU.PixelBuffer = pygame.PixelArray(GPU.Surface)\r\n GPU.X = 0\r\n GPU.Y = 0\r\n GPU.Update = False\r\n GPU.Clock = pygame.time.Clock()\r\n # \r\ndef GPU_DVCSND(Inst,Data1,Data2,Data3,Data4,Data5,Data6,Data7):\r\n if Inst == 0x00: #SetXY\r\n GPU.X = (Data1+Data2+Data3+Data4)\r\n GPU.Y = (Data5+Data6+Data7)\r\n elif Inst == 0x02: #SetRGB\r\n GPU.R,GPU.G,GPU.B,GPU.A = Data1,Data2,Data3,Data4\r\n elif Inst == 0x04: #Plot\r\n GPU.PixelBuffer[GPU.X, GPU.Y] = (GPU.R,GPU.G,GPU.B,GPU.A)\r\n #print([GPU.X, GPU.Y])\r\n #GPU.Update = True # Pixel by pixel Rendering\r\n elif Inst == 0x05: #Update\r\n print(\"Plot Data done Rendering to screen\")\r\n GPU.Update = True \r\n \r\ndef Render(Object):\r\n screen = pygame.display.get_surface()\r\n screen.fill((255,255,255))\r\n screen.blit(Object,(0,0))\r\n pygame.display.flip()\r\n\r\ndef GPU_tick():\r\n pygame.event.get()\r\n if GPU.Update:\r\n GPU.Clock.tick()\r\n #print(GPU.Clock.get_fps())\r\n GPU.Update = False\r\n #Render(GPU.Surface)\r\n Render(GPU.PixelBuffer.make_surface())\r\n del GPU.PixelBuffer\r\n GPU.PixelBuffer = pygame.PixelArray(GPU.Surface)\r\n","sub_path":"Emulator/GPU.py","file_name":"GPU.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"301880898","text":"import cx_Oracle\r\nimport os\r\n# import user_info, popular_place, search, category,distance\r\nfrom user_info import login,regist\r\nfrom search import search\r\nfrom distance import distance\r\nfrom category import category\r\nfrom popular_place import popularity\r\nfrom history import history\r\nfrom bookmark import bookmark\r\n\r\n\r\nLOCATION = r\"C:\\instantclient_19_12\"\r\nos.environ[\"PATH\"] = LOCATION + \";\" + os.environ[\"PATH\"] #환경변수 등록\r\n\r\nconnect = cx_Oracle.connect(\"miniproject/0000@localhost:1521/xe\")\r\ncursor = connect.cursor()\r\n\r\nID = None\r\n\r\nwhile ID is None:\r\n pick = input('[1] 로그인, [2] 계정 생성 : ')\r\n if pick == '1':\r\n ID = login(cursor)\r\n elif pick == '2':\r\n regist(cursor)\r\n else:\r\n print('잘못된 명령어입니다.')\r\n\r\nwhile True:\r\n pick = input('[1] 주소검색 [2] 거리계산 [3] 유형별검색 [4] 인기검색어 [5] 히스토리 [6] 즐겨찾기 [7] 종료 : ')\r\n if pick == '1':\r\n search(cursor,ID)\r\n elif pick == '2':\r\n distance(cursor)\r\n elif pick == '3':\r\n category(cursor)\r\n elif pick == '4':\r\n popularity(cursor)\r\n elif pick == '5':\r\n history(cursor,ID)\r\n elif pick == '6':\r\n bookmark(cursor,ID)\r\n elif pick == '7':\r\n print('종료')\r\n break\r\n else:\r\n print('명령어 오류')\r\n\r\nconnect.commit()\r\ncursor.close()\r\nconnect.close()","sub_path":"MiniProject.py","file_name":"MiniProject.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"101977820","text":"try:\n Fact=int(input(\"Enter a number to find its factorial\"))\n def CalC(a):\n if a<0:\n print(\"Entered number cannot be taken as an input since the number is below 0\")\n else:\n result=1\n for x in range(1,(a+1)):\n result=result*x\n print(result)\n CalC(Fact)\nexcept:\n print(\"Not Valid\")\n ","sub_path":"Assign_12.py","file_name":"Assign_12.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"604226059","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def hasPathSum(self, root: TreeNode, sum: int) -> bool:\n ans = False\n\n def recall(now, lft, rgt):\n nonlocal ans\n if lft is None and rgt is None:\n if now == sum:\n ans = True\n return\n\n if lft:\n recall(now+lft.val, lft.left, lft.right)\n if rgt:\n recall(now+rgt.val, rgt.left, rgt.right)\n\n if root:\n recall(root.val, root.left, root.right)\n return ans","sub_path":"code_practice/binary_tree/sum_count.py","file_name":"sum_count.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"245993988","text":"class Node:\n def __init__(self):\n self.word = ''\n self.children = dict()\n\n\nclass Solution:\n def findWords(self, board, words):\n \"\"\"\n :type board: List[List[str]]\n :type words: List[str]\n :rtype: List[str]\n \"\"\"\n root = Node()\n for word in words:\n node = root\n for ch in word:\n if not ch in node.children:\n node.children[ch] = Node()\n node = node.children[ch]\n node.word = word\n ret = set()\n for r in range(len(board)):\n for c in range(len(board[0])):\n self.bfs(board, r, c, root, ret)\n return list(ret)\n\n def bfs(self, board, r, c, root, ret):\n rows, cols = len(board), len(board[0])\n deltas = [(-1, 0), (1, 0), (0, -1), (0, 1)]\n q = [(r, c, set([self.getKey(r, c)]), root)]\n while len(q) > 0:\n nq = []\n for r0, c0, pathSet, node in q:\n ch = board[r0][c0]\n if not ch in node.children:\n continue\n curNode = node.children[ch]\n if curNode.word:\n ret.add(curNode.word)\n for dr, dc in deltas:\n r1, c1 = r0 + dr, c0 + dc\n key = self.getKey(r1, c1)\n if r1 < 0 or r1 >= rows or c1 < 0 or c1 >= cols or key in pathSet:\n continue\n newPathSet = pathSet.copy()\n newPathSet.add(key)\n nq.append((r1, c1, newPathSet, curNode))\n q = nq\n\n def getKey(self, r, c):\n return '{0},{1}'.format(r, c)\n\n\nsol = Solution()\nret = sol.findWords([[\"a\", \"b\"], [\"a\", \"a\"]], [\n \"aba\", \"baa\", \"bab\", \"aaab\", \"aaa\", \"aaaa\", \"aaba\"])\n# ret = sol.findWords([\n# ['o', 'a', 'a', 'n'],\n# ['e', 't', 'a', 'e'],\n# ['i', 'h', 'k', 'r'],\n# ['i', 'f', 'l', 'v']\n# ], [\"oath\", \"pea\", \"eat\", \"rain\"])\n","sub_path":"src/word-search-ii.py","file_name":"word-search-ii.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"560342506","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\nimport pytta\n\n\ndef parseArgs(args):\n file = None\n for arg in args:\n if arg.split('.')[-1] in ['wav', 'WAV', 'Wav']:\n file = arg\n return file\n\n\nclass AudioPlayer(object):\n \"\"\"\n Example class for simple audio player app based on PyTTa\n \"\"\"\n\n def __init__(self, fileName=None):\n \"\"\"\n fileName is a str with the absolute or relative path to a WAVE file\n \"\"\"\n print(\"Welcome to MyAudioPlayer!\")\n print(\"To quit the program use the command:\\n -exit\")\n\n self.load(fileName)\n self.commandList = ['-load', '-play', '-pause', '-stop', '-exit']\n return\n\n def load(self, fileName=None):\n if fileName is None:\n print(\"Please, insert a file name: \")\n fileName = input()\n if fileName == '-exit':\n self.exit()\n self.file = fileName\n self.audio = pytta.read_wav(self.file)\n self.streaming = pytta.generate.stream('O', excitation=self.audio)\n print(\"Opened file\", self.file)\n print(\"Available commands are:\\n\", \"-play;\\n\", \"-pause;\\n\", \"-stop.\")\n return\n\n def play(self):\n \"\"\"\n Start playback of the wave file\n \"\"\"\n self.streaming.start()\n return\n\n def pause(self):\n \"\"\"\n Stop playback of the wave file\n \"\"\"\n self.streaming.stop()\n return\n\n def stop(self):\n \"\"\"\n Start playback of the wave file and move the audio to the beggining\n \"\"\"\n self.streaming.stop()\n self.kn = 0\n return\n\n def exit(self):\n try:\n self.stream.close()\n except AttributeError:\n pass\n sys.exit()\n return\n\n def exec_(self):\n \"\"\"\n Application context for continuous read of command line arguments to\n control the reproduction of the audio file\n \"\"\"\n\n # It goes on, and on, and on, and on, and on, and on, ..., and on, ...\n while True:\n if self.file is None:\n self.load()\n\n # read command from command line\n command = input()\n\n # check if the command can be used by the application\n if command in self.commandList:\n\n # True: evaluates it as a function\n eval('self.' + command[1:] + '()')\n\n # False: it is ignored\n else:\n print(\"Unknown command\", command, \"\\nSkipping.\")\n\n # untill program closure\n return\n\n\nif __name__ == \"__main__\":\n \"\"\"\n This IF statement guarantees that the execution of the file will only ocurr\n when explicitly told so, e.g.:\n\n ~ $ python audio_player.py mywavefile.wav\n\n \"\"\"\n file = parseArgs(sys.argv[:])\n player = AudioPlayer(file)\n sys.exit(player.exec_())\n","sub_path":"examples/audio_player.py","file_name":"audio_player.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"137508071","text":"# -*- coding: UTF-8 -*-\n#!/usr/bin/env python\n\n#-------------------------------------------------------------------------------\n# Name: 模块1\n# Purpose:\n#\n# Author: dell\n#\n# Created: 14/10/2014\n# Copyright: (c) dell 2014\n# Licence: \n#-------------------------------------------------------------------------------\nimport httplib, urllib\nimport json, time, sys\nfrom threading import Thread, Lock\n\nlock = Lock()\n\ndef httpRequest(url, params={},headers={}):\n #print url\n body=None\n if params:\n body=urllib.urlencode(params)\n\n protocol=\"http\"\n index=0\n if (r\"http://\" in url):\n url=url.replace(\"http://\",\"\")\n elif (r\"https://\" in url):\n protocol=\"https\"\n url=url.replace(\"https://\",\"\")\n else:\n pass\n\n ##print url\n if (\".com\" in url):\n index=url.find(\".com\")+4\n elif(\".cn\" in url):\n index=url.find(\".cn\")+3\n elif(\".net\" in url):\n index=url.find(\".net\")+4\n\n ##print url[:index+4]\n if (protocol==\"http\") :\n conn=httplib.HTTPConnection(url[:index])\n else:\n conn=httplib.HTTPSConnection(url[:index])\n conn.request(\"GET\", url[index:], body, headers)\n response = conn.getresponse()\n data = response.read()\n conn.close()\n return json.loads(data)\n\ndef getCurrentMarket(website):\n if (website==\"huobi\") :\n return getHuobiCurrentMarket()\n else :\n pass\n\ndef getRate():\n return 6.11\n## headers = {\"Content-type\":\"application/json\"}\n## headers[\"User-Agent\"] = \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.101 Safari/537.36\"\n## params = {\"from\":\"USD\",\"to\":\"CNY\"}\n## return httpRequest(r\"http://rate-exchange.appspot.com/currency\", params, headers)\n\ndef getHuobiCurrentMarket():\n return httpRequest(r\"http://market.huobi.com/staticmarket/ticker_btc_json.js\")\n\ndef getOkCoinCurrentMarket():\n return httpRequest(r\"https://www.okcoin.cn/api/ticker.do\")\n\ndef getBtStempCurrentMarket():\n #headers = {\"Content-type\":\"text/html\"}\n return httpRequest(r\"https://www.bitstamp.net/api/ticker/\")\n\nclass GrabData(Thread):\n def __init__(self):\n super(GrabData, self).__init__(name=\"GrabData\")\n self.setDaemon(True)\n\n def run(self):\n super(GrabData, self).run()\n self.working=True\n while self.working:\n lock.acquire()\n self.worker()\n lock.release()\n time.sleep(10)\n\n def worker(self):\n huobiInfo=getHuobiCurrentMarket()\n #print(huobiInfo)\n btStempInfo=getBtStempCurrentMarket()\n #print(btStempInfo)\n if huobiInfo and btStempInfo:\n priceOfHuoBi = huobiInfo[\"ticker\"][\"last\"]\n priceOfBtStemp = btStempInfo[\"last\"]\n priceBetweenHuoBiAndBtStemp = float(priceOfBtStemp) * rate - float(priceOfHuoBi)\n sys.stdout.write(\"\\n\"+time.strftime('%Y/%m/%d %H:%M:%S',time.localtime(time.time())) + \" \")\n sys.stdout.write(u\"火币网:%s BtStemp: %s 最新差价: %s\" % (priceOfHuoBi, priceOfBtStemp, priceBetweenHuoBiAndBtStemp))\n\n def join(self):\n self.working=False\n super(GrabData, self).join()\n\n##infoOfHuobi=getHuobiCurrentMarket()\n##infoOfOkcoin=getOkCoinCurrentMarket()\n##print infoOfHuobi\n##print infoOfOkcoin\n\nrate = getRate()\ngrab = GrabData()\ngrab.start()\n\nimport os\nos.system(\"pause\")\nsys.stdout.write(u\"正在结束线程,请稍等。。。\")\ngrab.join()\n\n##info=r\"http://market.huobi.com/staticmarket/ticker_btc_json.js\"\n##index1=0\n##if (\".com\" in info):\n## index1=info.find(\".com\")\n##print info[:index1+4]\n##print info[index1+4:]\n\n\n","sub_path":"market.py","file_name":"market.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"507440029","text":"import streamlit as st\nimport webbrowser\nimport pandas as pd\nimport datetime\n\n#アンケート数値化\nans_output = []\nq_nums = ['Q1','Q2','Q3','Q4','その他']\ndef val(q):\n if q == 'すごく良い':\n ans_output.append(5)\n elif q == '良い':\n ans_output.append(4)\n elif q == 'どちらとも言えない':\n ans_output.append(3)\n elif q == '悪い':\n ans_output.append(2)\n elif q == 'すごく悪い':\n ans_output.append(1)\n else:\n ans_output.append(q)\n\n\nst.title('Welcome to 3D room tour!!!')\n#st.subheader('内見形式を選択してください')\nurl1 = 'file:///' + 'test.html'\n\n# 内見\nview = st.button('3Dモデルを見る')\nif view == True:\n webbrowser.open_new_tab(url1)\n\n#アンケート\nst.write(\"アンケートに答える\")\nq = st.checkbox('Yes', value=False) # valueは初期状態\nif q == True:\n q = ['すごく良い', '良い', 'どちらとも言えない','悪い','すごく悪い']\n ans = []\n q1 = st.radio(\"Q1\",q)\n ans.append(q1)\n val(q1)\n q2 = st.radio(\"Q2\",q)\n ans.append(q2)\n val(q2)\n q3 = st.radio(\"Q3\",q)\n ans.append(q3)\n val(q3)\n q4 = st.radio(\"Q4\",q)\n ans.append(q4)\n val(q4)\n qf = st.text_input('その他')\n ans.append(qf)\n val(qf)\n #print(ans_output)\n \n #アンケート送信\n send = st.button('アンケートを送信')\n if send == True:\n # データフレームを作成\n df = pd.DataFrame([ans_output], columns=q_nums)\n path = '/Users/komakoma/Box/test/'\n # CSV ファイル出力\n now = datetime.datetime.now()\n\n df.to_csv('answer/answers' + now.strftime('%Y_%m_%d_%H_%M') + '.csv',encoding='utf_8_sig')\n #表示\n #df","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"40889708","text":"import json\nimport unittest\nfrom unittest import mock\n\n\nfrom app import app\n\n\nclass TestClientMethods(unittest.TestCase):\n\n\n def test_get_species_ok(self):\n response = app.test_client().get('/species')\n self.assertEqual(response.status_code, 200)\n\n # @mock.patch('app.get_access')\n def test_get_access_ok(self):\n response = app.test_client().get('/access')\n self.assertEqual(response.status_code, 200)\n\n # @mock.patch('app.get_species_by_id')\n def test_get_species_by_id_ok(self):\n response = app.test_client().get('/species/1')\n data = json.loads(response.get_data(as_text=True))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(data['id'], 1)\n self.assertEqual(data['name'], \"siams\")\n\n # @mock.patch('app.create_animal')\n def test_create_animal_unauth(self):\n response = app.test_client().post('/animals')\n self.assertEqual(response.status_code, 401)\n\n # @mock.patch('app.get_species_by_id')\n def test_get_species_by_id_not_ok(self):\n response = app.test_client().get('/species/122222')\n self.assertEqual(response.status_code, 400)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/unittests/tst.py","file_name":"tst.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"212738568","text":"#处理ZeroDivisionError异常\nprint(\"Input two numbers:\")\nprint(\"Input 'q' to exit\")\nwhile True:\n first = input(\"The first number: \")\n if first == 'q':\n break\n second = input(\"The second number: \")\n try:\n answer = int(first) / int(second)\n except ZeroDivisionError:\n print(\"You can't divide by zero!\")\n else: #else用来执行try代码块执行成功的后续操作\n print(str(answer))\n#处理FileNotFoundError\nfilename = 'alice.txt'\ntry:\n with open(filename) as file_object:\n message = file_object.read()\nexcept FileNotFoundError:\n msg = \"The file \" + filename +\" does not exist.\"\n print(msg)\n","sub_path":"Python Files/exception.py","file_name":"exception.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"527346619","text":"from setuptools import setup\nfrom setuptools import find_packages\n\nlong_description = '''\nnnn'''\n\nsetup(name='kaggle_humpback',\n version='1.0.0',\n description='Kaggle Humpback challenge',\n long_description=long_description,\n author='',\n author_email='',\n url='https://github.com/maxjeblick/kaggle-humpback',\n download_url='https://github.com/maxjeblick/kaggle-humpback',\n license='MIT',\n install_requires=[],\n extras_require={\n },\n classifiers=[\n ],\n packages=find_packages())\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"121534596","text":"from django.contrib.postgres.search import TrigramSimilarity\nfrom rest_framework import viewsets\n\nfrom tags.models import Tag\nfrom tags.api.serializers import TagSerializer\n\n\nMIN_SEARCH_LENGTH = 2\n\n\nclass TagViewSet(viewsets.ReadOnlyModelViewSet):\n serializer_class = TagSerializer\n\n def get_queryset(self):\n \"\"\"Filter data according to search query.\"\"\"\n\n qs = Tag.objects.all()\n q = self.request.query_params.get('q', '')\n if len(q) >= MIN_SEARCH_LENGTH:\n qs = qs \\\n .annotate(similarity=TrigramSimilarity('name', q)) \\\n .filter(name__trigram_similar=q) \\\n .order_by('-similarity', 'name')\n\n return qs\n","sub_path":"src/tags/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"432934734","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/mburger/Work/Research/NeutralCloudModel/nexoclom/build/lib/nexoclom/Input.py\n# Compiled at: 2019-02-20 14:44:49\n# Size of source mod 2**32: 14435 bytes\nimport os, os.path, numpy as np, psycopg2\nfrom astropy.io import ascii\nimport astropy.units as u\nfrom .produce_image import ModelImage\nfrom .modeldriver import modeldriver\nfrom .configure_model import configfile\nfrom .input_classes import Geometry, StickingInfo, Forces, SpatialDist, SpeedDist, AngularDist, Options\n\nclass Input:\n\n def __init__(self, infile, verbose=False):\n self.database, self.savepath, self.datapath = configfile()\n self.inputfile = infile\n if os.path.isfile(infile):\n data = ascii.read(infile, delimiter='=', comment=';', data_start=0,\n names=['Param', 'Value'])\n else:\n assert 0, 'Input file {} does not exist.'.format(infile)\n section = [d.split('.')[0].casefold() for d in data['Param']]\n param = [d.split('.')[1].casefold() for d in data['Param']]\n values = [v.split(';')[0].strip().casefold() if ';' in v else v.casefold() for v in data['Value']]\n gparam = {b:c for a, b, c in zip(section, param, values) if a == 'geometry' if a == 'geometry'}\n self.geometry = Geometry(gparam)\n sparam = {b:c for a, b, c in zip(section, param, values) if a == 'sticking_info' if a == 'sticking_info'}\n self.sticking_info = StickingInfo(sparam)\n fparam = {b:c for a, b, c in zip(section, param, values) if a == 'forces' if a == 'forces'}\n self.forces = Forces(fparam)\n sparam = {b:c for a, b, c in zip(section, param, values) if a == 'spatialdist' if a == 'spatialdist'}\n self.spatialdist = SpatialDist(sparam)\n sparam = {b:c for a, b, c in zip(section, param, values) if a == 'speeddist' if a == 'speeddist'}\n self.speeddist = SpeedDist(sparam)\n aparam = {b:c for a, b, c in zip(section, param, values) if a == 'angulardist' if a == 'angulardist'}\n self.angulardist = AngularDist(aparam, self.spatialdist)\n oparam = {b:c for a, b, c in zip(section, param, values) if a == 'options' if a == 'options'}\n self.options = Options(oparam, self.geometry.planet)\n\n def __str__(self):\n print(self.geometry)\n print(self.sticking_info)\n print(self.forces)\n print(self.spatialdist)\n print(self.speeddist)\n print(self.angulardist)\n print(self.options)\n return ''\n\n def findpackets(self):\n \"\"\"\n Search the database for identical inputs\n \"\"\"\n con = psycopg2.connect(host='localhost', database=(self.database))\n cur = con.cursor()\n dtor = np.pi / 180.0\n\n def isNone(x):\n try:\n q = x.value\n except:\n q = x\n\n if type(q) == str:\n if q is None:\n return 'is NULL'\n return f\"= '{q}'\"\n if q is None:\n return 'is NULL'\n return f\"= {q}\"\n\n def inRange(field, x, delta):\n return f\"ABS({field} - {x}) <= {delta / 2}\"\n\n geometry = self.geometry\n objs = [obj.object for obj in geometry.objects]\n objs.sort()\n objs2 = ','.join(objs)\n phi = [p.value for p in geometry.phi]\n if not phi[0] == 0.0:\n raise AssertionError('There is a problem with phi')\n else:\n dtaa = (5.0 * u.deg).to(u.rad)\n taa = [geometry.taa - dtaa / 2.0, geometry.taa + dtaa / 2.0]\n taa = [taa[0].value, taa[1].value]\n if taa[0] < 0.0:\n taabit = '(taa>={} or taa<{})'.format(2 * np.pi + taa[0], taa[1])\n else:\n if taa[1] > 2 * np.pi:\n taabit = '(taa>={} or taa<{})'.format(taa[0], taa[1] % (2 * np.pi))\n else:\n taabit = inRange('taa', geometry.taa.value, dtaa.value)\n ptxt = [inRange('phi[{}]'.format(i + 1), p, 5.0 * dtor) for i, p in enumerate(phi)]\n ptxt2 = ' and '.join(ptxt)\n query = \"SELECT geo_idnum FROM geometry\\n WHERE planet='{}' and\\n StartPoint='{}' and\\n objects=ARRAY['{}']::SSObject[] and\\n {} and\\n {} and\\n {} and\\n {}\".format(geometry.planet.object, geometry.startpoint, objs2, ptxt2, inRange('subsolarpt[0]', geometry.subsolarpoint[0].value, 5 * dtor), inRange('subsolarpt[1]', geometry.subsolarpoint[1].value, 5 * dtor), taabit)\n cur.execute(query)\n if cur.rowcount == 0:\n return ([], 0, 0)\n else:\n result = cur.fetchall()\n georesult = [r[0] for r in result]\n sticking_info = self.sticking_info\n geostr = [str(i) for i in georesult]\n geostr = '(' + ', '.join(geostr) + ')'\n if sticking_info.stickcoef == 1:\n query = 'SELECT st_idnum FROM sticking_info\\n WHERE stickcoef=1 and\\n st_idnum in {}'.format(geostr)\n else:\n query = 'SELECT st_idnum FROM sticking_info\\n WHERE stickcoef={} and\\n tsurf {} and\\n stickfn {} and\\n stick_mapfile {} and\\n epsilon {} and\\n n {} and\\n tmin {} and\\n emitfn {} and\\n accom_mapfile {} and\\n st_idnum in {}'.format(sticking_info.stickcoef, isNone(sticking_info.tsurf), isNone(sticking_info.stickfn), isNone(sticking_info.stick_mapfile), isNone(sticking_info.epsilon), isNone(sticking_info.n), isNone(sticking_info.tmin), isNone(sticking_info.emitfn), isNone(sticking_info.accom_mapfile), geostr)\n cur.execute(query)\n if cur.rowcount == 0:\n return ([], 0, 0)\n result = cur.fetchall()\n stickresult = [s[0] for s in result]\n stickstr = [str(i) for i in stickresult]\n stickstr = '(' + ', '.join(stickstr) + ')'\n forces = self.forces\n query = 'SELECT f_idnum FROM forces\\n WHERE gravity=%s and\\n radpres=%s and\\n f_idnum in {}'.format(stickstr)\n cur.execute(query, (\n forces.gravity, forces.radpres))\n result = cur.fetchall()\n if cur.rowcount == 0:\n return ([], 0, 0)\n forceresult = [s[0] for s in result]\n spatialdist = self.spatialdist\n forcestr = [str(i) for i in forceresult]\n forcestr = '(' + ', '.join(forcestr) + ')'\n if spatialdist.longitude is None:\n long0 = 'longitude[1] = 0.'\n long1 = 'longitude[2] = 0.'\n else:\n long0 = inRange('longitude[1]', spatialdist.longitude[0].value, 5 * dtor)\n long1 = inRange('longitude[2]', spatialdist.longitude[1].value, 5 * dtor)\n if spatialdist.latitude is None:\n lat0 = 'latitude[1] = 0.'\n lat1 = 'latitude[2] = 0.'\n else:\n lat0 = inRange('latitude[1]', spatialdist.latitude[0].value, 5 * dtor)\n lat1 = inRange('latitude[2]', spatialdist.latitude[1].value, 5 * dtor)\n query = \"SELECT spat_idnum FROM spatialdist\\n WHERE type = '{}' and\\n {} and\\n use_map {} and\\n mapfile {} and\\n {} and\\n {} and\\n {} and\\n {} and\\n spat_idnum in {}\".format(spatialdist.type, inRange('exobase', spatialdist.exobase, 0.05), isNone(spatialdist.use_map), isNone(spatialdist.mapfile), long0, long1, lat0, lat1, forcestr)\n cur.execute(query)\n if cur.rowcount == 0:\n return ([], 0, 0)\n else:\n result = cur.fetchall()\n spatresult = [s[0] for s in result]\n speeddist = self.speeddist\n spatstr = [str(i) for i in spatresult]\n spatstr = '(' + ', '.join(spatstr) + ')'\n if speeddist.vprob is None:\n vstr = 'vprob is NULL'\n else:\n vstr = inRange('vprob', speeddist.vprob.value, speeddist.vprob.value * 0.05)\n if speeddist.temperature is None:\n Tstr = 'temperature is NULL'\n else:\n Tstr = inRange('temperature', speeddist.temperature.value, speeddist.temperature.value * 0.05)\n query = \"SELECT spd_idnum FROM speeddist\\n WHERE type = '{}' and\\n {} and\\n sigma {} and\\n U {} and\\n alpha {} and\\n beta {} and\\n {} and\\n delv {} and\\n spd_idnum in {}\".format(speeddist.type, vstr, isNone(speeddist.sigma), isNone(speeddist.U), isNone(speeddist.alpha), isNone(speeddist.beta), Tstr, isNone(speeddist.delv), spatstr)\n cur.execute(query)\n if cur.rowcount == 0:\n return ([], 0, 0)\n result = cur.fetchall()\n speedresult = [s[0] for s in result]\n angdist = self.angulardist\n spdstr = [str(i) for i in speedresult]\n spdstr = '(' + ', '.join(spdstr) + ')'\n if angdist.azimuth is None:\n az0 = 'azimuth[1] is NULL'\n az1 = 'azimuth[2] is NULL'\n else:\n az0 = inRange('azimuth[1]', angdist.azimuth[0].value, 5 * dtor)\n az1 = inRange('azimuth[2]', angdist.azimuth[1].value, 5 * dtor)\n if angdist.altitude is None:\n alt0 = 'altitude[1] is NULL'\n alt1 = 'altitude[2] is NULL'\n else:\n alt0 = inRange('altitude[1]', angdist.altitude[0].value, 5 * dtor)\n alt1 = inRange('altitude[2]', angdist.altitude[1].value, 5 * dtor)\n n = isNone(angdist.n)\n query = \"SELECT ang_idnum from angulardist\\n WHERE type = '{}' and\\n {} and {} and\\n {} and {} and\\n n {} and\\n ang_idnum in {}\".format(angdist.type, az0, az1, alt0, alt1, n, spdstr)\n cur.execute(query)\n if cur.rowcount == 0:\n return ([], 0, 0)\n result = cur.fetchall()\n angresult = [a[0] for a in result]\n options = self.options\n angstr = [str(i) for i in angresult]\n angstr = '(' + ', '.join(angstr) + ')'\n endtime = inRange('endtime', options.endtime.value, options.endtime.value * 0.05)\n outeredge = isNone(options.outeredge)\n nsteps = isNone(options.nsteps)\n res = isNone(options.resolution)\n query = \"SELECT opt_idnum from options\\n WHERE {} and\\n resolution {} and\\n at_once = {} and\\n atom = '{}' and\\n lifetime = {} and\\n fullsystem = {} and\\n outeredge {} and\\n motion = {} and\\n streamlines = {} and\\n nsteps {} and\\n opt_idnum in {}\".format(endtime, res, options.at_once, options.atom, options.lifetime.value, options.fullsystem, outeredge, options.motion, options.streamlines, nsteps, angstr)\n cur.execute(query)\n if cur.rowcount == 0:\n return ([], 0, 0)\n result = cur.fetchall()\n finalresult = [str(a[0]) for a in result]\n finalresult = '(' + ', '.join(finalresult) + ')'\n query = 'SELECT filename, npackets, totalsource FROM outputfile\\n WHERE idnum in {}'.format(finalresult)\n cur.execute(query)\n result = cur.fetchall()\n filenames = [r[0] for r in result]\n npackets = sum((r[1] for r in result))\n totalsource = sum((r[2] for r in result))\n return (\n filenames, npackets, totalsource)\n\n def run(self, npackets, overwrite=False, compress=True):\n modeldriver(self, npackets, overwrite, compress)\n\n def produce_image(self, format_, filenames=None):\n return ModelImage(self, format_, filenames=filenames)","sub_path":"pycfiles/nexoclom-2.0.18-py3.7/Input.cpython-37.py","file_name":"Input.cpython-37.py","file_ext":"py","file_size_in_byte":12983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"372819722","text":"from flask import g,current_app as app\nfrom flask_login import current_user\nfrom flask_wtf import CSRFProtect, FlaskForm\nfrom wtforms import StringField, TextField, SubmitField,validators,SelectMultipleField,PasswordField,BooleanField \\\n,IntegerField,HiddenField,SelectField,ValidationError,TextAreaField\nfrom wtforms.validators import (DataRequired,\n Email,\n EqualTo,\n Length,\n URL)\nfrom wtforms_sqlalchemy.fields import QuerySelectField,QuerySelectMultipleField\nfrom ..models.user import User,Roles,getRoles,user_roles\nfrom ..models.product import ProductAttribute,ArticleCategory,Article\nfrom ..share.helpers import Pagination\nfrom sqlalchemy import func,exc\nimport datetime,re\nfrom flask_sqlalchemy import SQLAlchemy\ndb = SQLAlchemy()\n\"\"\"\n fix Flask WTForms and WTForms-SQLAlchemy QuerySelectField produce too many values to unpack \n\"\"\"\nfrom ..models.fix_wtf_sql import fix_wtfsql\nfix_wtfsql()\n\ndef mapUpdateForm(model,id=None):\n \n class BaseForm(FlaskForm):\n\n @classmethod\n def update_data(cls,form,item):\n try:\n form.populate_obj(item)\n db.session.add(item) \n db.session.commit()\n except exc.SQLAlchemyError as e:\n \"\"\" 捕獲錯誤, 否則無法回傳\n \"\"\"\n db.session().rollback() \n error = str(e.__dict__['orig'])\n raise Exception(error)\n #raise Exception('資料庫更新失敗!!')\n \n\n @classmethod\n def insert_data(cls,form,item):\n try:\n form.populate_obj(item)\n db.session.add(item) \n db.session.commit()\n except exc.SQLAlchemyError as e:\n \"\"\" 捕獲錯誤, 否則無法回傳\n \"\"\"\n db.session().rollback() \n error = str(e.__dict__['orig'])\n raise Exception(error)\n #raise Exception('資料庫新增失敗!!')\n \n @classmethod\n def get_count(cls,q):\n count_q = q.statement.with_only_columns([func.count()]).order_by(None)\n count = q.session.execute(count_q).scalar()\n return count\n \n @classmethod\n def get_query(cls,filters,page,per_page):\n import math\n page=int(page)\n per_page=int(per_page)\n #get all rows before doing pagination !!\n q = cls.get_model().query.filter(*filters)\n q_count = cls.get_count(q)\n #修正不正確的頁數顯示\n if (page-1)*per_page > q_count:\n page = math.ceil(q_count / per_page)\n return page,q_count,q.limit(per_page).offset((page-1)*per_page).all()\n \n @classmethod\n def get_form(cls,id=None):\n update_layout = cls.get_layout()\n if id: #\"\"\"for update\"\"\"\n item = cls.get_model().query.get(id)\n form = cls(obj=item)\n else: #\"\"\"for insert\"\"\"\n item = cls.get_model()() # 這裡不能只回傳function , 再實例它才可以:()()\n form = cls()\n\n return form,item,update_layout\n @classmethod\n def get_pagination(cls,total,page=1, per_page=10): \n #total = cls.get_total()\n return Pagination(page, per_page, total)\n \n @classmethod\n def get_list(cls,page=1,per_page=10,search=None):\n filters = cls.get_search_filters(search)\n \n page,count,rows = cls.get_query(filters,page,per_page) \n out_rows = cls.get_listrows(rows)\n\n return page,out_rows, cls.get_pagination(count,page,per_page) \n \n @classmethod\n def get_template(cls):\n return 'update.html' \n \n @classmethod\n def format_datetime(cls,_datetime):\n return _datetime.strftime(\"%Y/%m/%d %H:%M\") #not second :%S\n \n class UpdateArticle(BaseForm):\n title = StringField('標題', [\n DataRequired()],render_kw={'class':'form-control'})\n active = BooleanField('有效', \n render_kw={'class':'form-check-input'})\n category = QuerySelectField('上層', \n query_factory=ArticleCategory.get_tree_for_article,\n allow_blank=True, \n render_kw={'class':'form-control'}) #todo: 若是母層, 則不能被指定為文章類別\n content = TextAreaField('內容',[\n DataRequired()],render_kw={'class':'form-control','id':'editor'}) #init ckeditor 必須參照這裡的做法:TextAreaField,'id':'editor'\n \n submit = SubmitField('存檔', \n render_kw={'class':'btn btn-default'})\n \n @classmethod\n def update_data(cls,form,item):\n form.populate_obj(item)\n #item.author = g.user.email\n item.updated = datetime.datetime.now() \n db.session.add(item) \n db.session.commit()\n\n @classmethod\n def insert_data(cls,form,item):\n form.populate_obj(item)\n item.author = g.user.email\n db.session.add(item) \n db.session.commit() \n \n @classmethod\n def get_fields(cls):\n return [('name','名稱'),('category','類別'),('active','有效'),('created','建立日'),('updated','更新日'),('author','作者')]\n \n @classmethod\n def get_model(cls):\n return Article\n \n @classmethod\n def get_search_filters(cls,search):\n model = cls.get_model()\n filters = []\n if search:\n if 'title' in search and search.title.data:\n filters.append(model.title.like('%{}%'.format(search.title.data)))\n if 'content' in search and search.content.data:\n filters.append(model.content.like('%{}%'.format(search.content.data)))\n if 'author' in search and search.author.data:\n filters.append(model.author.like('%{}%'.format(search.author.data)))\n if 'category' in search and search.category.data:\n statement = db.text(\n \"\"\"\n WITH RECURSIVE category_path (id, path) AS\n (\n SELECT id, name as path\n FROM articlecategory\n WHERE parent_id ==:id /*IS NULL or ==2*/\n UNION ALL\n SELECT c.id, cp.path|| ' > '|| c.name as path\n FROM category_path AS cp JOIN articlecategory AS c\n ON cp.id = c.parent_id\n )\n SELECT * FROM category_path\n ORDER BY path;\n \"\"\")\n #child = [i.id for i in db.session.execute(statement,{\"id\":search.category.data.id})]\n #filters.append(model.category_id.in_(child))\n filters.append(model.category_id==search.category.data.id)\n return filters\n \n @classmethod\n def get_listrows(cls,rows):\n out_rows = []\n for row in rows:\n out_rows.append([''.format(row.id),\n '{}'.format(row.id,row.title),\n row.category if row.category else \"無\" ,'有效' if row.active else '失效',\n cls.format_datetime(row.created),cls.format_datetime(row.updated),row.author])\n return out_rows\n \n @classmethod\n def get_layout(cls): \n return [\n ['title'],['active','category'],['content']\n ] \n \n class UpdateArticleCategory(BaseForm):\n #todo: \n # .validate 要加上自己上層不能是自己下層\n # .validate 若有下層不能刪除\n # .validate 若文章目錄有文章, 不能刪除\n name = StringField('名稱', [\n DataRequired()],render_kw={'class':'form-control'})\n parent = QuerySelectField('上層', \n query_factory=ArticleCategory.get_tree(id),\n allow_blank=True, \n render_kw={'class':'form-control'})\n is_leaf = BooleanField('文章目錄', \n render_kw={'class':'form-check-input'})\n\n submit = SubmitField('存檔', \n render_kw={'class':'btn btn-default'}) \n \n @classmethod\n def get_fields(cls):\n return [('name','名稱'),('parent','上層'),('is_leaf','文章目錄')]\n \n @classmethod\n def get_model(cls):\n return ArticleCategory\n\n @classmethod\n def get_form(cls,id=None):\n update_layout = cls.get_layout()\n if id: #\"\"\"for update\"\"\"\n \"\"\" 自訂表單輸出:若是母層,把is_leaf 欄位拿掉 \n \"\"\"\n item = cls.get_model().query.get(id)\n form = cls(obj=item)\n if ArticleCategory.is_has_child(id):\n del form.is_leaf\n update_layout[0].remove('is_leaf')\n\n else: #\"\"\"for insert\"\"\"\n item = cls.get_model()() # 這裡不能只回傳function , 再實例它才可以:()()\n form = cls()\n return form,item,update_layout\n \n @classmethod\n def get_search_filters(cls,search):\n model = cls.get_model()\n filters = []\n if search:\n if 'name' in search and search.name.data:\n filters.append(model.name.like('%{}%'.format(search.name.data)))\n if 'parent' in search and search.parent.data:\n statement = db.text(\n \"\"\"\n WITH RECURSIVE category_path (id, path) AS\n (\n SELECT id, name as path\n FROM articlecategory\n WHERE parent_id ==:id /*IS NULL or ==2*/\n UNION ALL\n SELECT c.id, cp.path|| ' > '|| c.name as path\n FROM category_path AS cp JOIN articlecategory AS c\n ON cp.id = c.parent_id\n )\n SELECT * FROM category_path\n ORDER BY path;\n \"\"\")\n child = [i.id for i in db.session.execute(statement,{\"id\":search.parent.data.id})]\n filters.append(model.id.in_(child))\n return filters\n \n @classmethod\n def get_listrows(cls,rows):\n out_rows = []\n for row in rows:\n out_rows.append([''.format(row.id),\n '{}'.format(row.id,row.name),\n row.parent if row.parent else \"無\",\n \"文章目錄\" if row.is_leaf else \"類別目錄\"])\n return out_rows\n \n @classmethod\n def get_layout(cls): \n return [\n ['name','parent','is_leaf']\n ] \n \n class UpdateProductAttribute(BaseForm):\n name = StringField('屬性名稱', [\n DataRequired()],render_kw={'class':'form-control'})\n submit = SubmitField('存檔', \n render_kw={'class':'btn btn-default'}) \n \n @classmethod\n def get_fields(cls):\n return [('name','名稱')] \n \n @classmethod\n def get_model(cls):\n return ProductAttribute\n \n @classmethod\n def get_search_filters(cls,search):\n model = cls.get_model()\n filters = []\n if search:\n if 'name' in search and search.name.data:\n filters.append(model.name.like('%{}%'.format(search.name.data)) )\n return filters\n\n @classmethod\n def get_listrows(cls,rows):\n out_rows = []\n for row in rows:\n out_rows.append([''.format(row.id),\n '{}'.format(row.id,row.name)])\n return out_rows\n \n @classmethod\n def get_layout(cls): \n return [\n ['name']\n ]\n \n class UpdateUser(BaseForm):\n \n \"\"\"Contact form.\"\"\"\n name = StringField('名稱', [\n DataRequired()],render_kw={'class':'form-control'})\n email = StringField('Email', [\n Email(message=('郵件格式有問題, 請確認')),\n DataRequired()],render_kw={'class':'form-control'}) \n source = SelectField('帳號來源', [\n DataRequired(),\n Length(min=4, message=('Your message is too short.'))], \n render_kw={'class':'form-control'},\n choices = [('google', 'google'),\n ('facebook', 'facebook'),\n ('local', '本地')])\n roles = QuerySelectMultipleField('權限', \n query_factory=getRoles, \n render_kw={'class':'form-control'})\n active = BooleanField('有效', \n render_kw={'class':'form-check-input'}) \n\n submit = SubmitField('存檔', \n render_kw={'class':'btn btn-default'}) \n \n @classmethod\n def get_model(cls):\n return User\n \n @classmethod\n def update_data(cls,form,item):\n form.populate_obj(item)\n db.session.add(item)\n item.roles.clear()\n for role in form.roles.data:\n item.roles.append(role) \n db.session.commit()\n \n @classmethod\n def get_fields(cls):\n return [('name','名稱'),('email','郵箱'),('active','有效'),('source','來源'),('roles','權限')] \n\n @classmethod\n def get_search_filters(cls,search):\n filters = []\n if search:\n if 'name' in search and search.name.data:\n filters.append(User.name==search.name.data)\n if 'email' in search and search.email.data:\n filters.append(User.email.like('%{}%'.format(search.email.data)) ) \n if 'source' in search and search.source.data:\n filters.append(User.source==search.source.data) \n if 'roles' in search and search.roles.data:\n filters.append(User.roles.any(Roles.role==search.roles.data.role))\n if 'active' in search and search.active.data:\n filters.append(User.active==search.active.data) \n return filters\n\n @classmethod\n def get_listrows(cls,rows):\n out_rows = []\n for row in rows:\n out_rows.append([''.format(row.id),\n '{}'.format(row.id,row.name),\n row.email,'有效' if row.active else '失效',row.source,'{}'.format(row.id,row.roles)])\n return out_rows\n \n @classmethod\n def get_layout(cls): \n return [\n ['name','email','source'],\n ['roles','active']\n ]\n\n class UserRolesForm(BaseForm):\n #from ..models.user import Roles,getRoles\n id = HiddenField('id')\n name = StringField('Name', render_kw={'readonly': True})\n roles = QuerySelectMultipleField('Roles', \n query_factory=getRoles)\n submit = SubmitField('Submit') \n \n # Custom validate\n def validate(self):\n # ... custom validation\n \n return True \n\n @classmethod\n def update_data(cls,form,item):\n item.roles.clear()\n for role in form.roles.data:\n item.roles.append(role)\n db.session.commit()\n \n @classmethod\n def get_form(cls,id):\n item = User.query.get(id)\n return cls(obj=item),item\n \n model_form = {\"User\":(\"帳戶\",UpdateUser),\"UserRoles\":(\"帳戶權限\",UserRolesForm)\n ,\"ProductAttribute\":(\"商品屬性\",UpdateProductAttribute),\"Article\":(\"文章\",UpdateArticle)\n ,\"ArticleCategory\":(\"文章類別\",UpdateArticleCategory)}\n return model_form[model]\n \ndef mapSearchForm(model): \n\n class SearchArticleCategory(FlaskForm):\n \n parent = QuerySelectField('上層', \n query_factory=ArticleCategory.get_tree_for_search,\n allow_blank=True, \n render_kw={'class':'form-control'})\n name = StringField('名稱', render_kw={'class':'form-control'})\n\n class SearchArticle(FlaskForm):\n \n category = QuerySelectField('上層', \n query_factory=ArticleCategory.get_tree_for_article,\n allow_blank=True, \n render_kw={'class':'form-control'})\n title = StringField('標題', render_kw={'class':'form-control'})\n content = StringField('內容', render_kw={'class':'form-control'})\n \n class SearchProductAttribute(FlaskForm):\n name = StringField('屬性名稱', render_kw={'class':'form-control'})\n\n class SearchUser(FlaskForm):\n \n \"\"\"Contact form.\"\"\"\n name = StringField('名稱', render_kw={'class':'form-control'})\n email = StringField('Email', \n render_kw={'class':'form-control'}) \n source = SelectField('帳號來源', \n render_kw={'class':'form-control'},\n choices = [('',''),('google', 'google'),\n ('facebook', 'facebook'),\n ('local', '本地')])\n roles = QuerySelectField('權限', \n query_factory=getRoles,\n allow_blank=True, \n render_kw={'class':'form-control'})\n active = SelectField('有效', \n render_kw={'class':'form-control'},\n choices = [('',''),('1', '有效'),('0', '無效')])\n\n \n\n\n class SearchUserRoles(FlaskForm):\n pass\n\n\n model_form = {\"User\":SearchUser,\"UserRoles\":SearchUserRoles,\"ProductAttribute\":SearchProductAttribute\n ,\"ArticleCategory\":SearchArticleCategory,\"Article\":SearchArticle}\n return model_form[model]\n \n\n\ndef mapDeleteForm(model):\n \n class DeleteUser():\n @classmethod\n def delete_data(cls,item):\n try:\n \"\"\"檢查:管理員不能刪除\"\"\"\n for user in item:\n _del = User.query.filter(User.id==user).first()\n if _del.email == current_app.config['ADMIN']:\n return \"此帳號-{},是管理員,不能刪除!\".format(_del.name)\n if _del.id ==current_user.id:\n return \"此帳號-{},己登入,不能刪除!\".format(_del.name)\n\n str_roles = \"delete from user_roles where user_id in (:item);\"\n db.session.execute(str_roles,{'item':item})\n stm = User.__table__.delete().where(User.id.in_(item))\n db.session.execute(stm)\n db.session.commit()\n except exc.SQLAlchemyError as e:\n \"\"\" 捕獲錯誤, 否則無法回傳\n \"\"\"\n db.session().rollback() \n if 'orig' in e.__dict__:\n return str(e.__dict__['orig'])\n #raise e\n return '刪除失敗!!'\n return None \n\n class DeleteUserRoles():\n pass\n\n class DeleteProductAttribute():\n \n @classmethod\n def delete_data(cls,item):\n try:\n stm = ProductAttribute.__table__.delete().where(ProductAttribute.id.in_(item))\n db.session.execute(stm)\n db.session.commit()\n except exc.SQLAlchemyError as e:\n \"\"\" 捕獲錯誤, 否則無法回傳\n \"\"\"\n db.session().rollback() \n if 'orig' in e.__dict__:\n return str(e.__dict__['orig'])\n return '刪除失敗!!'\n return None \n\n class DeleteArticleCategory():\n @classmethod\n def delete_data(cls,item):\n try:\n #檢查是否有目錄及文章在此目錄下\n for cat in item:\n articles = Article.query.filter(Article.category_id == cat).all()\n if articles:\n return '刪除失敗!!尚有文章在此目錄下'\n categories = ArticleCategory.query.filter(ArticleCategory.parent_id == cat).all()\n if categories:\n return '刪除失敗!!尚有其它目錄在此目錄下'\n stm = ArticleCategory.__table__.delete().where(ArticleCategory.id==cat)\n db.session.execute(stm)\n db.session.commit()\n except exc.SQLAlchemyError as e:\n \"\"\" 捕獲錯誤, 否則無法回傳\n \"\"\"\n db.session().rollback() \n if 'orig' in e.__dict__:\n return str(e.__dict__['orig'])\n return '刪除失敗!!'\n return None \n\n class DeleteArticle():\n @classmethod\n def delete_data(cls,item):\n try:\n stm = Article.__table__.delete().where(Article.id.in_(item))\n db.session.execute(stm)\n db.session.commit()\n except exc.SQLAlchemyError as e:\n \"\"\" 捕獲錯誤, 否則無法回傳\n \"\"\"\n db.session().rollback() \n if 'orig' in e.__dict__:\n return str(e.__dict__['orig'])\n return '刪除失敗!!'\n return None \n\n model_form = {\"User\":DeleteUser,\"UserRoles\":DeleteUserRoles,\"ProductAttribute\":DeleteProductAttribute\n ,\"ArticleCategory\":DeleteArticleCategory,\"Article\":DeleteArticle}\n return model_form[model]\n \n ","sub_path":"data/app/application/admin/forms_admin.py","file_name":"forms_admin.py","file_ext":"py","file_size_in_byte":22652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"199854","text":"import math\nfrom decimal import *\n\nif __name__ == \"__main__\":\n n = input()\n t = n[-1]\n if t == \"2\" or t == \"4\" or t == \"5\" or t == \"7\" or t == \"9\":\n print(\"hon\")\n elif t == \"0\" or t == \"1\" or t == \"6\" or t == \"8\":\n print(\"pon\")\n else:\n print(\"bon\")\n","sub_path":"procon-archive/atcoder.jp/abc168/abc168_a/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"246446289","text":"\"\"\"-----------------------------------------------------------------------------\n Viral Shot Weapon\n\n Slow moving pellets that multiply exponentially.\n-----------------------------------------------------------------------------\"\"\"\n\nfrom ..base_weapon import Base_Weapon\nfrom ..base_projectile import Base_Projectile\nfrom modules.shockwave import play_sound\nimport random\n\nclass Weapon(Base_Weapon):\n def __init__(self):\n Base_Weapon.__init__(\n self,\n mechanism_type=\"semiautomatic\",\n name=\"viralshot\",\n display_name=\"Viral Shot\",\n projectile=Projectile,\n ammo_count=4,\n heat=50,\n fire_sound=\"gun1\")\n\n\nclass Projectile(Base_Projectile):\n def __init__(self, robot):\n Base_Projectile.__init__(\n self,\n robot,\n name=\"viral\",\n damage=12,\n hit_radius=14,\n velocity=16,\n angle_scatter=12,\n life=0.5,)\n self.generation = 0\n\n def on_expiry(self):\n for i in [-15, 15]:\n if ((random.random() * 8) + 2) > self.generation:\n projectile = Projectile(self.robot)\n projectile.location = self.location\n projectile.heading = self.heading + i + ((random.random() - 0.5) * 10)\n projectile.overburn = self.overburn\n projectile.generation = self.generation + 1\n projectile.life = self.life - (self.life / 4)\n self.robot.match.projectiles.append(projectile)\n","sub_path":"modules/equipment_modules/weapon_equipment/Viral_Shot.py","file_name":"Viral_Shot.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"316945600","text":"'''\n\tDaniel Jacobs 2020\n'''\n\nimport numpy as np\nimport h5py\nfrom matplotlib import pyplot as plt\nimport argparse\nimport random as rand\nfrom generation_utils import find_nearest\n\ndef plot_with_pure(pure_signals, noisy_signals, signal_parameters):\n\tprint(signal_parameters)\n\tfig, axes = plt.subplots(nrows=3)\n\tplt.subplots_adjust(hspace=0.45, top=0.93, bottom=0.1)\n\n\n\tfor i, det in enumerate(['H1', 'L1', 'V1']):\n\t\taxes[i].plot(noisy_signals[det].sample_times, noisy_signals[det], 'b')\n\t\taxes[i].plot(pure_signals[det].sample_times, pure_signals[det], 'r')\n\n\n\tfor i, detname in enumerate(['Hanford', 'Livingston', 'Virgo']):\n\t\taxes[i].set_ylabel('{0} strain'.format(detname))\n\t\t#add line at merge point\n\t\taxes[i].axvline(x=0, color='black', ls='--', lw=1)\n\n\taxes[2].set_xlabel('Seconds from merger')\n\tparameters = 'Mass1={:.2f}, Mass2={:.2f}, SNR={:.2f}, Spin1={:.2f}, Spin2={:.2f}, Distance={:.2f}, RA={:.2f}, Dec={:.2f}'.format(\n\t\t\t\t\t\tsignal_parameters['m1'], signal_parameters['m2'], signal_parameters['snr'], \n\t\t\t\t\t\tsignal_parameters['x1'], signal_parameters['x2'], signal_parameters['dist'], \n\t\t\t\t\t\tsignal_parameters['ra'], signal_parameters['dec'])\n\tplt.figtext(0.5, 0.95, parameters, fontsize=8, ha='center')\n\tplt.gcf().set_size_inches(12, 6, forward=True)\n\treturn fig\n\ndef plot_sigs(xxx_todo_changeme, xxx_todo_changeme1, xxx_todo_changeme2, signal_parameters):\n\t(h1_strain, h1_time) = xxx_todo_changeme\n\t(l1_strain, l1_time) = xxx_todo_changeme1\n\t(v1_strain, v1_time) = xxx_todo_changeme2\n\tparam_dict = {param[0]: float(param[1]) for param in signal_parameters}\n\tfig, axes = plt.subplots(nrows=3)\n\tplt.subplots_adjust(hspace=0.45, top=0.93, bottom=0.1)\n\n\tzIdx = find_nearest(h1_time,0)\n\taxes[0].plot(h1_time[zIdx:], h1_strain[zIdx:], label='Hanford')\n\tzIdx = find_nearest(l1_time,0)\n\taxes[1].plot(l1_time[zIdx:], l1_strain[zIdx:], label='Livingston')\n\tzIdx = find_nearest(v1_time,0)\n\taxes[2].plot(v1_time[zIdx:], v1_strain[zIdx:], label='Virgo')\n\n\tfor i, detname in enumerate(['Hanford', 'Livingston', 'Virgo']):\n\t\taxes[i].set_ylabel('{0} strain'.format(detname))\n\t\t#add line at merge point\n\t\taxes[i].axvline(x=0, color='black', ls='--', lw=1)\n\n\taxes[2].set_xlabel('Seconds from merger')\n\tparameters = 'Mass1={:.2f}, Mass2={:.2f}, SNR={:.2f}, Spin1={:.2f}, Spin2={:.2f}, Distance={:.2f}, RA={:.2f}, Dec={:.2f}'.format(\n\t\t\t\t\t\tparam_dict['m1'], param_dict['m2'], param_dict['snr'], \n\t\t\t\t\t\tparam_dict['x1'], param_dict['x2'], param_dict['dist'], param_dict['ra'], param_dict['dec'])\n\tplt.figtext(0.5, 0.95, parameters, fontsize=8, ha='center')\n\tplt.gcf().set_size_inches(12, 6, forward=True)\n\treturn fig\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description='Plot a signal given in hdf format')\n\tparser.add_argument('-i', '--input')\n\targs = vars(parser.parse_args())\n\n\twith h5py.File(args['input'], 'r') as f:\n\t\tprint((list(f.keys())))\n\t\tprint((f['signals']['Signal parameters']))\n\t\tprint((f['signals']['H1 strain']))\n\t\tprint((f['signals']['H1 times']))\n\n\n\t\th1_strain = f['signals']['H1 strain']\n\t\th1_times = f['signals']['H1 times']\n\t\tl1_strain = f['signals']['L1 strain']\n\t\tl1_times = f['signals']['L1 times']\n\t\tv1_strain = f['signals']['V1 strain']\n\t\tv1_times = f['signals']['V1 times']\n\t\tsignal_parameters = f['signals']['Signal parameters']\n\n\t\tmasses = []\n\t\tsnrs = []\n\t\tspins = []\n\t\tfor i in range(signal_parameters.shape[0]):\n\t\t\tparams = {d[0]: float(d[1].replace('e-','')) for d in signal_parameters[i,:,:]}\n\t\t\tmasses.append(params['m1'])\n\t\t\tmasses.append(params['m2'])\n\n\t\t\tspins.append(params['x1'])\n\t\t\tspins.append(params['x2'])\n\t\t\tsnrs.append(params['snr'])\n\t\tmasses = np.array(masses)\n\t\tspins = np.array(spins)\n\t\tsnrs = np.array(snrs)\n\n\t\tfig, axes = plt.subplots(nrows=3)\n\t\taxes[0].hist(masses, bins=80)\n\t\taxes[0].set_xlabel('Component masses (M-Sun)')\n\t\taxes[1].hist(spins, bins=100)\n\t\taxes[1].set_xlabel('Component spins')\n\t\taxes[2].hist(snrs, bins=20, label='SNRs')\n\t\taxes[2].set_xlabel('Injected signal SNR')\n\t\tfig.suptitle('Distributions of Masses, Spins and SNRs')\n\t\tplt.subplots_adjust(hspace=0.45)\n\t\tplt.show()","sub_path":"dataset generation/plot_signal.py","file_name":"plot_signal.py","file_ext":"py","file_size_in_byte":4053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"300892080","text":"import unittest\nimport os\nfrom operations import *\nfrom test.common import *\nfrom disco.util import jobname\n\nclass TestMinDim(unittest.TestCase):\n\tdef validate(self, expDim, actDim):\n\t\tassert expDim == actDim, \"expecting dimension (%d,%d) but received (%d,%d)\" % (expDim[0], expDim[1], actDim[0], actDim[1])\n\n\tdef test1(self):\n\t\t\"\"\"\n\t\tTest normal usage.\n\t\t\"\"\"\n\t\trandom.seed(13)\n\t\tm, n = 101, 51\n\t\tsparsityA = 0.5\n\t\tprotocolA = MatrixWrapper.RAW\n\t\tdfsDirA = None\n\t\tmaxCoresA = 11\n\t\t# instantiate A\n\t\tA = randomSparseMatrix(m, n, sparsityA)\n\t\tAwrap = MatrixWrapper.wrapMatrix(A, protocolA, dfsDirA, maxCoresA)\n\t\t# get minimum dimension \n\t\tdim = minDim(disco, Awrap)\n\t\t# validate\n\t\tself.validate((m,n), dim)\n\n\tdef test2(self):\n\t\t\"\"\"\n\t\tTest empty matrix \n\t\t\"\"\"\n\t\trandom.seed(13)\n\t\tm, n = 99, 111\n\t\tsparsityA = 1 \n\t\tprotocolA = MatrixWrapper.RAW\n\t\tdfsDirA = None\n\t\tmaxCoresA = 5\n\t\t# instantiate A\n\t\tA = randomSparseMatrix(m, n, sparsityA)\n\t\tAwrap = MatrixWrapper.wrapMatrix(A, protocolA, dfsDirA, maxCoresA)\n\t\t# get minimum dimension \n\t\tdim = minDim(disco, Awrap)\n\t\t# validate\n\t\tself.validate((0,0), dim)\n\nif __name__ == \"__main__\":\n\tunittest.main()\n","sub_path":"examples/discolala/test/testOperations/testMinDim.py","file_name":"testMinDim.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"81109182","text":"from pawns.pawn import pawn as basePawn\r\nfrom direct.actor.Actor import Actor\r\n\r\nclass PandaPawn(basePawn):\r\n \"\"\"\r\n the pawn known as panda containing everything that only applies to this pawn\r\n subclass of basePawn\r\n \"\"\"\r\n def __init__(self, id, hmap):\r\n \"initate pawn using the hmap and the id refering to the posion the pawn is at\"\r\n basePawn.__init__(self, id, hmap)\r\n self.actor = self.generateActor()\r\n \"the actor for redneering crated by generateActor func\"\r\n\r\n def generateActor(self):\r\n \"\"\"crates and initaiats the actor for rendering\r\n \"\"\"\r\n actor = Actor(\"panda\")\r\n actor.setScale(.1, .1, .1)\r\n return actor\r\n","sub_path":"pyplaygrounds/pawns/panda.py","file_name":"panda.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"129357494","text":"def bwt(inputStr:str) -> str:\n \"\"\"\n Burrows-Wheeler-Transform\n \"\"\"\n newRow = inputStr*2\n bwMatrix = []\n for i in range(len(inputStr)): bwMatrix.append(newRow[i:len(inputStr)+i])\n\n \"\"\"\n Description of what's coming -.-'\n bwMatrix = [2,3,1,4,5] -> Unsorted list\n sortedBwMatrix = [1,2,3,4,5] -> Sorted list\n indexBwMatrix = [2,0,1,3,4] -> Sorted list (by key) of unsorted list index *kill me*\n bwPosition = 1 -> Index in sorted list of the first element in unsorted list\n \"\"\"\n sortedBwMatrix = sorted(bwMatrix)\n indexBwMatrix = sorted(range(len(bwMatrix)),key=lambda x: bwMatrix[x])\n bwPosition = indexBwMatrix.index(0)\n\n outputStr = chr(bwPosition)\n for row in sortedBwMatrix: outputStr += row[-1]\n return outputStr\n\ndef rbw(inputStr:str) -> str:\n \"\"\"\n Reverse-Burrows-Wheeler\n \"\"\"\n outputSelect = ord(inputStr[0])\n inputStr = inputStr[1:]\n bwMatrix = sorted(list(inputStr))\n for i in range(len(inputStr)-1):\n for j in range(len(inputStr)):\n bwMatrix[j]=inputStr[j]+bwMatrix[j]\n bwMatrix.sort()\n return bwMatrix[outputSelect]\n\ndef testBurrowsWeeler():\n testString = 'PABLOPABLITOCLAVOUNCLAVITO'\n print('-'*50)\n print(' > Test String = '+ testString)\n bwtString = bwt(testString)\n print(' > BWT String = '+ bwtString)\n rbwString = rbw(bwtString)\n print(' > RBW String = '+ rbwString)\n print('-'*50)\n\ndef testBinary():\n fileName = 'background.png'\n file_path = f'fileTest\\{fileName}'\n inputFile = open(rf'fileTest\\{fileName}','rb')\n bytesString = inputFile.read()\n print(bytesString)\n print('-'*50)\n # charString = ''.join(int.from_bytes(bytes, byteorder='big') for byte in bytesString)\n charString = bytesString.decode\n print(charString)\n print('-'*50)\n\ndef testHeader():\n # listA = [('A',1),('B',2)]\n # listB = [('C',3)]\n # listC = []\n # listC += listA\n # listC += listB\n\n # listA = [['']*3]*3\n # listA = [['','',''],['','',''],['','','']]\n listA = [['' for j in range(3)] for i in range(3)]\n listA[1][2] = 'a'\n\n print(listA)\n print(listA[1])\n\nif __name__ == \"__main__\":\n testHeader()\n","sub_path":"PC2_Compresor-master/draft.py","file_name":"draft.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"620542350","text":"# vim: ts=4:sw=4:expandtabs\n\n__author__ = 'zach.mott@gmail.com'\n\nfrom unittest import mock\n\nfrom django.core.cache import cache\nfrom django.test import TestCase, override_settings\nfrom django.urls import reverse\nfrom django.utils import timezone\n\nfrom snacksdb.models import Nomination\nfrom snacksdb.tests.factories import NominationFactory, UserFactory\nfrom snacksdb.utils import SnackSourceException, get_tzinfo\nfrom snacksdb.views import Nominate\n\n\nclass NominateTestCase(TestCase):\n \"\"\"\n Test cases for snacksdb.views.Nominate.\n \"\"\"\n view_url = reverse('snacksdb:nominate')\n\n def setUp(self):\n # If the cache is set up, we need to clear it before running each test,\n # so that cached results from previous tests don't interfer with the\n # current test.\n cache.clear()\n\n @mock.patch('snacksdb.views.Nominate.FormView.dispatch')\n def test_dispatch(self, mock_dispatch):\n view_instance = Nominate()\n mock_request = mock.MagicMock(user=UserFactory())\n\n # Test that a user with no nominations remaining is redirected to views.Vote.\n with override_settings(NOMINATIONS_PER_MONTH=0):\n response = view_instance.dispatch(mock_request)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(response['Location'], reverse('snacksdb:vote'))\n mock_dispatch.assert_not_called()\n\n # Use a new user because the old one's monthly nomination count is cached.\n mock_request.user = UserFactory()\n\n # Test that a user with nominations remaining proceeds to the view.\n with override_settings(NOMINATIONS_PER_MONTH=100):\n view_instance.dispatch(mock_request)\n mock_dispatch.assert_called_once()\n mock_dispatch.assert_called_with(mock_request)\n\n def test_nominate_existing(self):\n \"\"\"\n Test that nominating an exsting snacks correctly creates a Nomination instance\n and redirects the user back to 'snacksdb:vote'.\n \"\"\"\n user = UserFactory()\n self.client.force_login(user)\n\n nomination_qs = Nomination.objects.filter(snack_id=1001, user=user)\n self.assertEqual(nomination_qs.count(), 0)\n\n response = self.client.post(\n self.view_url,\n {\n 'snack_id': \"{id}{delim}{name}\".format(\n id=1001,\n delim=Nominate.DELIMITER,\n name='Apples'\n ),\n }\n )\n\n self.assertEqual(nomination_qs.count(), 1)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(response['Location'], reverse('snacksdb:vote'))\n\n @mock.patch('snacksdb.utils.SnackAPISource.suggest')\n def test_nominate_new_success(self, mock_suggest):\n \"\"\"\n Test that nominating a new snack correctly invokes AbstractSnackSource.suggest,\n creates a Nomination instance, and redirects the user to 'snacksdb:vote'.\n \"\"\"\n user = UserFactory()\n self.client.force_login(user)\n mock_suggest.return_value = {'id': 1002, 'name': 'Bananas'}\n post_data = {'name': 'Bananas', 'location': 'Safeway'}\n\n nomination_qs = Nomination.objects.filter(snack_id=1002, user=user)\n self.assertEqual(nomination_qs.count(), 0)\n\n response = self.client.post(self.view_url, post_data)\n\n mock_suggest.assert_called_once()\n mock_suggest.assert_called_with(latitude=None, longitude=None, **post_data)\n self.assertEqual(nomination_qs.count(), 1)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(response['Location'], reverse('snacksdb:vote'))\n\n @mock.patch('snacksdb.utils.SnackAPISource.list')\n @mock.patch('snacksdb.utils.SnackAPISource.suggest')\n def test_nominate_new_failure(self, mock_suggest, mock_list):\n \"\"\"\n Test that nominating a new snack with invalid form data re-renders\n the form with the validation errors.\n \"\"\"\n user = UserFactory()\n self.client.force_login(user)\n post_data = {\n 'name': 'Oranges', 'location': 'Food Lion',\n 'latitude': 112.87, 'longitude': 0.00\n }\n\n nomination_qs = Nomination.objects.filter(user=user)\n self.assertEqual(nomination_qs.count(), 0)\n\n response = self.client.post(self.view_url, post_data)\n\n mock_suggest.assert_not_called()\n mock_list.assert_called_once()\n self.assertEqual(nomination_qs.count(), 0)\n self.assertEqual(response.status_code, 200)\n\n @override_settings(NOMINATIONS_PER_MONTH=5)\n @mock.patch('snacksdb.views.Nominate.get_unnominated_snacks')\n def test_get_context_data(self, mock_unnominated_snacks):\n mock_unnominated_snacks.return_value = 'PARTICULARRETURNVALUE'\n\n # Set up the view instance with a (fake) request and a user.\n view_instance = Nominate()\n view_instance.request = mock.MagicMock(user=UserFactory())\n NominationFactory(user=view_instance.request.user)\n NominationFactory(user=view_instance.request.user)\n\n context = view_instance.get_context_data()\n\n self.assertEqual(context['nominations_remaining'], 3)\n self.assertEqual(context['unnominated_snacks'], 'PARTICULARRETURNVALUE')\n self.assertEqual(context['delimiter'], view_instance.DELIMITER)\n\n mock_unnominated_snacks.assert_called_once()\n mock_unnominated_snacks.assert_called_with()\n\n @mock.patch('snacksdb.utils.SnackAPISource.list')\n def test_get_unnominated_snacks(self, mock_list):\n \"\"\"\n Test that Nominate.get_unnominated_snacks returns a list of optional\n snacks that haven't been nominated yet this month.\n \"\"\"\n view_instance = Nominate()\n mock_list.return_value = [\n {'id': 1001, 'optional': True},\n {'id': 1002, 'optional': False},\n {'id': 1003, 'optional': True},\n {'id': 1004, 'optional': True},\n ]\n\n # Nominate an optional snack. This one shouldn't in the list.\n NominationFactory(snack_id=1001)\n\n # Nominate an optional snack a year in the past. This one should be in the list.\n when = timezone.datetime(2017, 1, 1, 0, 0, 0, tzinfo=get_tzinfo())\n NominationFactory.make_in_the_past(when, snack_id=1003)\n\n # We should only get back snacks that aren't optional and haven't been nominated this month.\n expected_snacks = [\n {'id': 1003, 'optional': True},\n {'id': 1004, 'optional': True},\n ]\n\n self.assertEqual(view_instance.get_unnominated_snacks(), expected_snacks)\n\n @mock.patch('snacksdb.views.Nominate.finalize_nomination')\n @mock.patch('snacksdb.views.Nominate.form_invalid')\n @mock.patch('snacksdb.utils.SnackAPISource.suggest')\n def test_form_valid(self, mock_suggest, mock_fi, mock_fn):\n view_instance = Nominate()\n view_instance.request = mock.MagicMock()\n form = mock.MagicMock()\n\n # Test that Nominate.form_invalid is called when\n # the source raises a SnackSourceException.\n mock_suggest.side_effect = SnackSourceException('exception!')\n view_instance.form_valid(form)\n mock_suggest.assert_called_once()\n mock_fi.assert_called_once()\n mock_fi.assert_called_with(form)\n mock_fn.assert_not_called()\n\n # Reset the mocks.\n mock_suggest.reset_mock()\n mock_suggest.side_effect = None\n mock_fi.reset_mock()\n mock_fn.reset_mock()\n\n # Test that Nominate.finalize_nomination is called when no exception is raised.\n mock_suggest.return_value = {'id': 1001, 'name': 'Apples'}\n view_instance.form_valid(form)\n mock_suggest.assert_called_once()\n mock_fn.assert_called_once()\n mock_fn.assert_called_with(1001, 'Apples')\n mock_fi.assert_not_called()\n\n def test_finalize_nomination(self):\n \"\"\"\n Test that Nominate.finalize_nomination correctly creates a Nomination\n instance and returns a redirect to 'snacksdb:vote'.\n \"\"\"\n user = UserFactory()\n view_instance = Nominate()\n view_instance.request = mock.MagicMock(user=user)\n\n self.assertEqual(Nomination.objects.filter(user=user).count(), 0)\n\n response = view_instance.finalize_nomination(1001, 'Apples')\n\n self.assertEqual(Nomination.objects.filter(snack_id=1001, user=user).count(), 1)\n\n self.assertEqual(response.status_code, 302)\n self.assertEqual(response['Location'], reverse('snacksdb:vote'))\n","sub_path":"snacksdb/tests/views/test_Nominate.py","file_name":"test_Nominate.py","file_ext":"py","file_size_in_byte":8594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"128709666","text":"from typing import Iterator\nimport logging\nimport os\nimport urllib\n\nimport requests\n\nlogger = logging.getLogger(__name__)\n\n\nclass Client:\n def __init__(self):\n self.scheme = \"https\"\n self.netloc = \"api.github.com\"\n self.session = requests.Session()\n self.session.headers[\"Accept\"] = \"application/vnd.github.v3+json\"\n\n @classmethod\n def from_env(\n cls, user_var: str = \"GITHUB_USER\", password_var: str = \"GITHUB_TOKEN\"\n ):\n user = os.getenv(user_var)\n password = os.getenv(password_var)\n client = cls()\n client.session.auth = (user, password) # HTTPBasicAuth(user, password)\n logger.debug(\"Authenticating with user %r and password\", user)\n return client\n\n @classmethod\n def from_token(cls, token: str):\n client = cls()\n client.session.headers[\"Authorization\"] = f\"token {token}\"\n logger.debug(\"Authenticating with token: ...%s\", token[-8:])\n return client\n\n def request(self, url: str, method: str = \"GET\", **kwargs) -> requests.Response:\n \"\"\"\n Perform generic GitHub API request, returning Response model.\n \"\"\"\n # set default scheme and netloc and separate query\n scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)\n url = urllib.parse.urlunsplit(\n (scheme or self.scheme, netloc or self.netloc, path, None, fragment)\n )\n # merge query from url with params, giving precedence to values from url\n if query:\n kwargs.setdefault(\"params\", {}).update(urllib.parse.parse_qsl(query))\n logger.debug(\"Requesting URL: %s\", url)\n response = self.session.request(method, url, **kwargs)\n logger.debug(\"Response headers: %s\", response.headers)\n response.raise_for_status()\n return response\n\n def iter_responses(self, url: str, **kwargs) -> Iterator[requests.Response]:\n \"\"\"\n Iterate over paginated responses.\n \"\"\"\n kwargs.setdefault(\"params\", {}).setdefault(\"per_page\", 100)\n try:\n response = self.request(url, **kwargs)\n except requests.exceptions.HTTPError as exc:\n # intercept '409 Conflict' errors (which indicate an empty repo)\n if exc.response.status_code == 409:\n logger.debug(\"Stopping due to 409 error: %s\", exc)\n return\n # re-raise all other errors\n raise\n yield response\n link_header = response.headers.get(\"Link\", \"\")\n links = {\n link[\"rel\"]: link[\"url\"]\n for link in requests.utils.parse_header_links(link_header)\n }\n # the last page has no rel=\"next\" link\n if \"next\" in links:\n yield from self.iter_responses(links[\"next\"], **kwargs)\n\n def iter_items(self, url: str, **kwargs) -> Iterator:\n \"\"\"\n Assuming each response is a JSON list.\n \"\"\"\n for response in self.iter_responses(url, **kwargs):\n yield from response.json()\n\n def iter_first_and_last_responses(\n self, url: str, **kwargs\n ) -> Iterator[requests.Response]:\n kwargs.setdefault(\"params\", {}).setdefault(\"per_page\", 100)\n response = self.request(url, **kwargs)\n yield response\n link_header = response.headers.get(\"Link\", \"\")\n links = {\n link[\"rel\"]: link[\"url\"]\n for link in requests.utils.parse_header_links(link_header)\n }\n # jump to last page if there are multiple pages\n if \"last\" in links:\n response = self.request(links[\"last\"], **kwargs)\n yield response\n\n def iter_repos(\n self,\n username: str = None,\n org: str = None,\n type: str = None, # all | owner | public | private | forks | sources | member | internal\n sort: str = None, # created | updated | pushed | full_name\n direction: str = None, # asc | desc\n visibility: str = None, # all | public | private\n affiliation: str = None, # owner + collaborator + organization_member\n ) -> Iterator[dict]:\n \"\"\"\n Docs: https://docs.github.com/en/rest/reference/repos#list-repositories-for-the-authenticated-user\n https://docs.github.com/en/rest/reference/repos#list-repositories-for-a-user\n https://docs.github.com/en/rest/reference/repos#list-organization-repositories\n \"\"\"\n path = \"/user/repos\"\n if username:\n path = f\"/users/{username}/repos\"\n elif org:\n path = f\"/orgs/{org}/repos\"\n params = {\n \"type\": type,\n \"sort\": sort,\n \"direction\": direction,\n \"visibility\": visibility,\n \"affiliation\": affiliation,\n }\n yield from self.iter_items(path, params=params)\n\n def iter_commits(\n self,\n owner: str,\n repo: str,\n sha: str = None,\n path: str = None,\n author: str = None,\n since: str = None,\n until: str = None,\n ) -> Iterator[dict]:\n \"\"\"\n Docs: https://docs.github.com/en/rest/reference/repos#list-commits\n \"\"\"\n params = {\n \"sha\": sha,\n \"path\": path,\n \"author\": author,\n \"since\": since,\n \"until\": until,\n }\n yield from self.iter_items(f\"/repos/{owner}/{repo}/commits\", params=params)\n\n def iter_branches(\n self,\n owner: str,\n repo: str,\n protected: str = None,\n ) -> Iterator[dict]:\n \"\"\"\n Docs: https://docs.github.com/en/rest/reference/repos#list-branches\n \"\"\"\n params = {\"protected\": protected}\n yield from self.iter_items(f\"/repos/{owner}/{repo}/branches\", params=params)\n\n def iter_all_commits(\n self, owner: str, repo: str, author: str = None\n ) -> Iterator[dict]:\n \"\"\"\n Iterate over all commits in all branches.\n \"\"\"\n branches = list(self.iter_branches(owner, repo))\n logger.debug(\n \"Getting all commits from all %d branches: %s\",\n len(branches),\n \", \".join(branch[\"name\"] for branch in branches),\n )\n seen_shas = set()\n for branch in branches:\n branch_name = branch[\"name\"]\n for commit in self.iter_commits(\n owner, repo, sha=branch_name, author=author\n ):\n sha = commit[\"sha\"]\n # since we're always iterating upward through the tree, from HEAD to the initial commit,\n # I think (?) we can assume that if we run into a SHA we've already seen,\n # continuing along that path will only retrace our steps from a previous branch\n # (But idk, maybe there's some weird merge functionality that might void that guarantee?)\n if sha in seen_shas:\n logger.debug(\n \"Breaking out of branch %r early (already seen %r)\",\n branch_name,\n sha,\n )\n break\n seen_shas.add(sha)\n yield commit\n","sub_path":"git_utils/github/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":7165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"491635070","text":"# other.py\n\nfrom typing import Union, Any, Callable, Tuple, Dict, Iterable, List\nfrom pathlib import Path\nimport pickle\nimport time\nfrom functools import wraps\nfrom urllib.request import urlopen\nimport sys\nfrom concurrent.futures import as_completed, ProcessPoolExecutor, ThreadPoolExecutor\n\nimport pandas as pd\nimport json\nimport numpy as np\nimport PIL\nfrom IPython.display import display\n\n\ndef new_save(out_path: Path, data, file_format: str='pickle'):\n \"\"\"(Over)write data to new pickle/json file.\"\"\"\n out_path.parent.mkdir(parents=True, exist_ok=True)\n if file_format == 'pickle':\n with open(out_path, \"wb\") as f:\n pickle.dump(data, f)\n elif file_format == 'json':\n with open(out_path, \"w\") as f:\n json.dump(data, f, indent=4)\n\n\ndef read_saved(in_path: Path, file_format: str='pickle'):\n \"\"\"Read saved pickle/json file.\"\"\"\n if file_format == 'pickle':\n with open(in_path, \"rb\") as f:\n data = pickle.load(f)\n elif file_format == 'json':\n with open(in_path, \"r\") as f:\n data = json.load(f)\n return data\n\n\ndef read_or_new_save(path: Path,\n default_data: Union[Callable, Any],\n callable_args: Dict=None,\n file_format: str='pickle'\n ) -> Any:\n \"\"\"Write data to new pickle/json file or read pickle/json if that file already exists.\n\n Example:\n df = cgeo.other.read_or_new_save(path=Path('output\\preprocessed_marker_small.pkl'),\n default_data=preprocess_vector,\n callable_args={'inpath': fp_fields, 'meta': meta})\n Args:\n path: in/output pickle/json file path.\n file_format: Either 'pickle' or 'json'.\n default_data: Data that is written to a pickle/json file if the pickle/json does not already exist.\n When giving a function, do not call the function, only give the function\n object name. Function arguments can be provided via callable_args.\n callable_args: args for additional function arguments when default_data is a callable function.\n\n Returns:\n Contents of the read or newly created pickle/json file.\n \"\"\"\n try:\n if file_format == 'pickle':\n data = read_saved(path, file_format=file_format)\n elif file_format == 'json':\n data = read_saved(path, file_format=file_format)\n print(f'Reading from {file_format} file... {path.name}')\n except (FileNotFoundError, OSError, IOError, EOFError):\n if not callable(default_data):\n data = default_data\n else:\n if callable_args is None:\n data = default_data()\n else:\n data = default_data(**callable_args)\n print(f'Writing new {file_format} file... {path.name}')\n if file_format == 'pickle':\n new_save(out_path=path, data=data, file_format=file_format)\n elif file_format == 'json':\n new_save(out_path=path, data=data, file_format=file_format)\n\n return data\n\n\ndef lprun(func):\n \"\"\"Line profile decorator.\n\n Put @lprun on the function you want to profile.\n From pavelpatrin: https://gist.github.com/pavelpatrin/5a28311061bf7ac55cdd\n \"\"\"\n @wraps(func)\n def wrapper(*args, **kwargs):\n from line_profiler import LineProfiler\n prof = LineProfiler()\n try:\n return prof(func)(*args, **kwargs)\n finally:\n prof.print_stats()\n return wrapper\n\n\ndef printfull(df):\n \"\"\"Displays full dataframe (deactivates rows/columns wrapper). Prints if not in Notebook.\"\"\"\n with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n display(df)\n\n\ndef sizeof_memvariables(locals):\n \"\"\"Prints size of all variables in memory in human readable output.\n \n By Fred Cirera, after https://stackoverflow.com/a/1094933/1870254\n \"\"\"\n\n def sizeof_fmt(num, suffix='B'):\n for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:\n if abs(num) < 1024.0:\n return \"%3.1f%s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f%s%s\" % (num, 'Yi', suffix)\n\n for name, size in sorted(((name, sys.getsizeof(value)) for name,value in locals().items()),\n key=lambda x: -x[1])[:10]:\n print(\"{:>30}: {:>8}\".format(name, sizeof_fmt(size)))\n \n\ndef download_url(url, out_path):\n \"\"\"Download file from URL.\n\n Example: download_file(\"url\", 'data.tif')\n \"\"\"\n print('downloading {} to {}'.format(url, out_path))\n with open(out_path, \"wb\") as local_file:\n local_file.write(urlopen(url).read())\n\n\ndef print_file_tree(dir: Path=None):\n \"\"\"Print file tree of the selected directory.\n\n Taken from https://realpython.com/python-pathlib/\n\n Args:\n dir: The directory to print the file tree for. Defaults to current working directory.\n \"\"\"\n if dir is None:\n dir = Path.cwd()\n print(f'+ {dir}')\n for path in sorted(dir.rglob('*')):\n depth = len(path.relative_to(dir).parts)\n spacer = ' ' * depth\n print(f'{spacer}+ {path.name}')\n\n\ndef track_time(task):\n \"\"\"Track time start/end of running function.\"\"\"\n start_time = time.time()\n state = task.status()['state']\n print('RUNNING...')\n while state in ['READY', 'RUNNING']:\n time.sleep(3)\n state = task.status()['state']\n elapsed_time = time.time() - start_time\n print('Done in', elapsed_time, 's')\n print(task.status())\n\n\ndef multithread_iterable(func: Callable,\n iterable: Iterable,\n func_kwargs: Dict=None,\n max_workers: int=2,\n iter_is_tuple=False):\n \"\"\"Wrapper for simplified multithreading of iterable.\n\n Uses concurrent.futures.ThreadPoolExecutor instead of manually spinning up threads via the threading module.\n\n Args:\n func: callable function.\n iterable: list, generator etc. that should be iterated over via one thread per iteration. If the iterable\n yields a tuple,\n func_kwargs: additional function arguments.\n max_workers: number of threads.\n iter_is_tuple: Set True if iterable yields tuples.\n\n Returns:\n The function return value in a list.\n\n Example:\n def task(i, iter, add=2): # i and iter are required arguments!\n print(\"Processing {}\".format(i))\n return iter*iter + add\n print(multithreading(func=task, iterable=[2,3,4], func_kwargs={'add':10}, max_workers=2))\n \"\"\"\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n if not iter_is_tuple:\n futures = [executor.submit(func, i, iter, **func_kwargs) for i, iter in enumerate(iterable)]\n else:\n futures = [executor.submit(func, i, *iter, **func_kwargs) for i, iter in enumerate(iterable)]\n\n res = [fut.result() for fut in as_completed(futures)]\n return res\n\n\ndef roman_numbers_to_arrays(text_list: List[str],\n fontsize: int = 12,\n display=True\n ) -> List[np.array]:\n \"\"\"Create binary arrays displaying Roman numbers.\n\n Inspired by https://stackoverflow.com/questions/36384353/generate-pixel-matrices-from-characters-in-string\n Args:\n text_list: List of Roman numbers as string. Defaults to I-X.\n fontsize: Should be at least 12, otherwise deformations\n display: In addition to returning the arrays plot them.\n\n Returns:\n List of binary numpy arrays, all with the same dimensions.\n\n Example: roman_arrays = roman_to_pixels(['I', 'II'], 22, display=False)\n \"\"\"\n font = PIL.ImageFont.truetype('arialbd.ttf', fontsize)\n\n if not text_list:\n roman = {1: 'I', 2: 'II', 3: 'III', 4: 'IV', 5: 'V', 6: 'VI', 7: 'VII', 8: 'VIII', 9: 'IX', 10: 'X'}\n text_list = list(roman.values())\n if fontsize < 12:\n raise ValueError('fontsize needs to be at least 12, smaller will cause font deformations')\n\n widths = []\n for text in text_list:\n w, h = font.getsize(text) # calc the size of text in pixels\n h *= 2\n widths.append(w)\n w, h = max(widths), h\n\n arrays = []\n for text in text_list:\n image = PIL.Image.new('L', (w, h), 1)\n draw = PIL.ImageDraw.Draw(image)\n draw.text((0, 0), text, font=font)\n arr = np.asarray(image)\n arr = np.where(arr, 0, 1)\n arr = arr[(arr != 0).any(axis=1)]\n arrays.append(arr)\n\n if display is True:\n result = np.where(arr, '#', ' ')\n print('shape', arr.shape)\n print('\\n'.join([''.join(row) for row in result]))\n\n return arrays\n","sub_path":"cgeo/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":8808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"145116699","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import fields, models, api, exceptions, _\nfrom datetime import datetime, timedelta\n\n\nPROGRESS_INFO = [('draft', 'draft'), ('open', 'Open'), ('closed', 'Closed')]\n\n\n# Week Schedule\nclass MonthAttendance(models.Model):\n _name = \"month.attendance\"\n _rec_name = \"period_id\"\n\n period_id = fields.Many2one(comodel_name=\"period.period\", string=\"Month\", required=True)\n month_detail = fields.One2many(comodel_name=\"time.attendance\",\n inverse_name=\"month_id\",\n string=\"Month Detail\")\n progress = fields.Selection(PROGRESS_INFO, string='Progress', default=\"draft\")\n company_id = fields.Many2one(comodel_name=\"res.company\",\n string=\"Company\",\n default=lambda self: self.env.user.company_id.id,\n readonly=True)\n\n _sql_constraints = [('unique_period_id', 'unique (period_id)', 'Error! Month must be unique')]\n\n def get_days_in_month(self):\n from_date = self.period_id.from_date\n till_date = self.period_id.till_date\n\n from_date_obj = datetime.strptime(from_date, \"%Y-%m-%d\")\n till_date_obj = datetime.strptime(till_date, \"%Y-%m-%d\")\n\n return (till_date_obj - from_date_obj).days\n\n def get_total_days(self, person):\n total_days = self.env[\"time.attendance.detail\"].search_count([(\"person_id\", \"=\", person.id),\n (\"attendance_id.month_id\", \"=\", self.id),\n (\"day_progress\", \"in\",\n [\"working_day\", \"holiday\"])])\n\n return total_days\n\n def get_present_days(self, person):\n full_day = self.env[\"time.attendance.detail\"].search_count([(\"person_id\", \"=\", person.id),\n (\"attendance_id.month_id\", \"=\", self.id),\n (\"availability_progress\", \"=\", \"full_day\")])\n\n half_day = self.env[\"time.attendance.detail\"].search_count([(\"person_id\", \"=\", person.id),\n (\"attendance_id.month_id\", \"=\", self.id),\n (\"availability_progress\", \"=\", \"half_day\")])\n\n return full_day + (0.5 * half_day)\n\n def get_absent_days(self, person):\n absent = self.env[\"time.attendance.detail\"].search_count([(\"person_id\", \"=\", person.id),\n (\"attendance_id.month_id\", \"=\", self.id),\n (\"day_progress\", \"=\", \"working_day\"),\n (\"availability_progress\", \"=\", \"absent\")])\n\n half_day = self.env[\"time.attendance.detail\"].search_count([(\"person_id\", \"=\", person.id),\n (\"attendance_id.month_id\", \"=\", self.id),\n (\"day_progress\", \"=\", \"working_day\"),\n (\"availability_progress\", \"=\", \"half_day\")])\n\n return absent + (0.5 * half_day)\n\n def get_working_days(self, person):\n working_day = self.env[\"time.attendance.detail\"].search_count([(\"person_id\", \"=\", person.id),\n (\"attendance_id.month_id\", \"=\", self.id),\n (\"day_progress\", \"=\", \"working_day\")])\n\n return working_day\n\n def get_holidays(self, person):\n holiday = self.env[\"time.attendance.detail\"].search_count([(\"person_id\", \"=\", person.id),\n (\"attendance_id.month_id\", \"=\", self.id),\n (\"day_progress\", \"=\", \"holiday\")])\n\n return holiday\n\n def get_holidays_present(self, person):\n full_day = self.env[\"time.attendance.detail\"].search_count([(\"person_id\", \"=\", person.id),\n (\"attendance_id.month_id\", \"=\", self.id),\n (\"day_progress\", \"=\", \"holiday\"),\n (\"availability_progress\", \"=\", \"full_day\")])\n\n half_day = self.env[\"time.attendance.detail\"].search_count([(\"person_id\", \"=\", person.id),\n (\"attendance_id.month_id\", \"=\", self.id),\n (\"day_progress\", \"=\", \"holiday\"),\n (\"availability_progress\", \"=\", \"half_day\")])\n\n return full_day + (0.5 * half_day)\n\n def get_lop_days(self, person):\n total = 0\n recs = self.env[\"leave.item\"].search([(\"period_id\", \"=\", self.period_id.id),\n (\"person_id\", \"=\", person.id),\n (\"leave_account_id\", \"=\", self.env.user.company_id.leave_lop_id.id)])\n\n for rec in recs:\n total = total + rec.credit\n\n return total\n\n def get_leave_available(self, person):\n employee_id = self.env[\"hr.employee\"].search([(\"person_id\", \"=\", person.id)])\n leave_account_id = employee_id.leave_account_id.id\n recs = self.env[\"leave.item\"].search([(\"leave_account_id\", \"=\", leave_account_id),\n (\"debit\", \">\", 0),\n (\"reconcile_id\", \"=\", False)])\n\n available = 0\n for rec in recs:\n available = available + rec.debit\n\n return available\n\n def generate_header(self, date_list):\n header = \"\"\n\n header_list = [\"Employee\"] + date_list + [\"Total Days\",\n \"Present Days\",\n \"Absent Days\",\n \"Holidays\",\n \"Holidays Present\"]\n\n for rec in header_list:\n header = \"{0}\\n{1}\".format(header, rec)\n\n header = \"{0}\".format(header)\n return header\n\n def generate_body(self, date_list, person_list):\n body = \"\"\n\n for person in person_list:\n person_id = self.env[\"hos.person\"].search([(\"id\", \"=\", person)])\n body = \"{0}\\n{1}\".format(body, person_id.name)\n\n for date in date_list:\n attendance = self.env[\"time.attendance.detail\"].search([(\"person_id\", \"=\", person),\n (\"attendance_id.date\", \"=\", date)])\n body = \"{0}\\n{1}\".format(body, attendance.availability_progress)\n\n total_days = self.get_total_days(person_id)\n present_days = self.get_present_days(person_id)\n absent_days = self.get_absent_days(person_id)\n holidays = self.get_holidays(person_id)\n holiday_present = self.get_holidays_present(person_id)\n\n body = \"\"\"{0}{1}\n {2}\n {3}\n {4}\n {5}\"\"\".format(body,\n total_days,\n present_days,\n absent_days,\n holidays,\n holiday_present)\n\n return body\n\n def trigger_preview(self):\n recs = self.month_detail\n\n date_list = []\n person_list = []\n for rec in recs:\n date_list.append(rec.date)\n\n recs = self.env[\"time.attendance.detail\"].search([(\"attendance_id.month_id\", \"=\", self.id)])\n\n for rec in recs:\n if rec.person_id.id not in person_list:\n person_list.append(rec.person_id.id)\n\n header = self.generate_header(date_list)\n body = self.generate_body(date_list, person_list)\n\n html = self.env.user.company_id.template_attendance\n report = html.format(header, body)\n\n view = self.env.ref('shesha.view_month_attendance_wiz_form')\n\n return {\n 'name': 'Monthly Attendance',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'view_id': view.id,\n 'res_model': 'month.attendance.wiz',\n 'target': 'new',\n 'type': 'ir.actions.act_window',\n 'context': {'report': report}\n }\n\n @api.multi\n def trigger_closed(self):\n draft = self.env[\"time.attendance\"].search_count([(\"month_id\", \"=\", self.id), (\"progress\", \"!=\", \"verified\")])\n\n if draft:\n raise exceptions.ValidationError(\"Error! Daily attendance report is not verified\")\n\n employees = self.env[\"hr.employee\"].search([])\n\n for employee in employees:\n total_absent = self.get_absent_days(employee.person_id)\n\n voucher = {\"period_id\": self.period_id.id,\n \"person_id\": employee.person_id.id,\n \"count\": total_absent}\n\n # Check already voucher is created\n check_voucher = self.env[\"leave.voucher\"].search([(\"period_id\", \"=\", self.period_id.id),\n (\"person_id\", \"=\", employee.person_id.id)])\n\n if not check_voucher:\n voucher_id = self.env[\"leave.voucher\"].create(voucher)\n voucher_id.get_cr_lines()\n voucher_id.update_count()\n voucher_id.trigger_posting()\n\n self.write({\"progress\": \"closed\"})\n\n def get_model_data(self, period_id, employee, leave_item):\n\n journal = {\"period_id\": period_id.id,\n \"person_id\": employee.person_id.id,\n \"journal_detail\": leave_item,\n \"progress\": \"posted\",\n \"reference\": period_id.name}\n\n return journal\n\n def get_model_line_data(self, period_id, employee):\n leave_item = []\n configs = self.env[\"leave.configuration\"].search([(\"leave_level_id\", \"=\", employee.leave_level_id.id)])\n\n # Credit Detail - Employee\n for config in configs:\n journal_detail = {\"period_id\": period_id.id,\n \"person_id\": employee.person_id.id,\n \"leave_account_id\": employee.leave_account_id.id,\n \"description\": \"{0} Leave Credit\".format(config.leave_type_id.name),\n \"reference\": period_id.name,\n \"leave_order\": config.leave_order}\n\n # Leave Journal Credit\n journal_credit = journal_detail\n journal_credit.update({\"debit\": config.leave_credit,\n \"leave_account_id\": employee.leave_account_id.id})\n leave_item.append((0, 0, journal_credit))\n\n # Leave Journal Debit\n journal_debit = journal_detail\n journal_debit.update({\"credit\": config.leave_credit,\n \"leave_account_id\": self.env.user.company_id.leave_credit_id.id})\n leave_item.append((0, 0, journal_debit))\n\n return leave_item\n\n @api.multi\n def trigger_open(self):\n if self.env[\"month.attendance\"].search_count([(\"progress\", \"=\", \"open\"), (\"id\", \"!=\", self.id)]):\n raise exceptions.ValidationError(\"Error! Please close all open months before open\")\n\n # Leave Credits from leave configuration\n employees = self.env[\"hr.employee\"].search([])\n\n for employee in employees:\n leave_item = self.get_model_line_data(self.period_id, employee)\n journal = self.get_model_data(self.period_id, employee, leave_item)\n\n self.env[\"leave.journal\"].create(journal)\n\n self.write({\"progress\": \"open\"})\n\n","sub_path":"models/time_management/month_attendance.py","file_name":"month_attendance.py","file_ext":"py","file_size_in_byte":12543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"52232911","text":"# Copyright (c) 2015 Rackspace\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport re\nimport six\n\nfrom oslo_config import cfg\n\nfrom neutron._i18n import _\nfrom neutron.api import extensions\nfrom neutron.api.v2 import attributes as attr\nfrom neutron.common import exceptions as n_exc\n\nDNS_LABEL_MAX_LEN = 63\nDNS_LABEL_REGEX = \"[a-z0-9-]{1,%d}$\" % DNS_LABEL_MAX_LEN\nFQDN_MAX_LEN = 255\nDNS_DOMAIN_DEFAULT = 'openstacklocal.'\n\n\ndef _validate_dns_name(data, max_len=FQDN_MAX_LEN):\n msg = _validate_dns_format(data, max_len)\n if msg:\n return msg\n request_dns_name = _get_request_dns_name(data)\n if request_dns_name:\n msg = _validate_dns_name_with_dns_domain(request_dns_name)\n if msg:\n return msg\n\n\ndef _validate_dns_format(data, max_len=FQDN_MAX_LEN):\n # NOTE: An individual name regex instead of an entire FQDN was used\n # because its easier to make correct. The logic should validate that the\n # dns_name matches RFC 1123 (section 2.1) and RFC 952.\n if not data:\n return\n try:\n # Trailing periods are allowed to indicate that a name is fully\n # qualified per RFC 1034 (page 7).\n trimmed = data if not data.endswith('.') else data[:-1]\n if len(trimmed) > 255:\n raise TypeError(\n _(\"'%s' exceeds the 255 character FQDN limit\") % trimmed)\n names = trimmed.split('.')\n for name in names:\n if not name:\n raise TypeError(_(\"Encountered an empty component.\"))\n if name.endswith('-') or name[0] == '-':\n raise TypeError(\n _(\"Name '%s' must not start or end with a hyphen.\") % name)\n if not re.match(DNS_LABEL_REGEX, name):\n raise TypeError(\n _(\"Name '%s' must be 1-63 characters long, each of \"\n \"which can only be alphanumeric or a hyphen.\") % name)\n # RFC 1123 hints that a TLD can't be all numeric. last is a TLD if\n # it's an FQDN.\n if len(names) > 1 and re.match(\"^[0-9]+$\", names[-1]):\n raise TypeError(_(\"TLD '%s' must not be all numeric\") % names[-1])\n except TypeError as e:\n msg = _(\"'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s\") % {\n 'data': data, 'reason': str(e)}\n return msg\n\n\ndef _validate_dns_name_with_dns_domain(request_dns_name):\n # If a PQDN was passed, make sure the FQDN that will be generated is of\n # legal size\n dns_domain = _get_dns_domain()\n higher_labels = dns_domain\n if dns_domain:\n higher_labels = '.%s' % dns_domain\n higher_labels_len = len(higher_labels)\n dns_name_len = len(request_dns_name)\n if not request_dns_name.endswith('.'):\n if dns_name_len + higher_labels_len > FQDN_MAX_LEN:\n msg = _(\"The dns_name passed is a PQDN and its size is \"\n \"'%(dns_name_len)s'. The dns_domain option in \"\n \"neutron.conf is set to %(dns_domain)s, with a \"\n \"length of '%(higher_labels_len)s'. When the two are \"\n \"concatenated to form a FQDN (with a '.' at the end), \"\n \"the resulting length exceeds the maximum size \"\n \"of '%(fqdn_max_len)s'\"\n ) % {'dns_name_len': dns_name_len,\n 'dns_domain': cfg.CONF.dns_domain,\n 'higher_labels_len': higher_labels_len,\n 'fqdn_max_len': FQDN_MAX_LEN}\n return msg\n return\n\n # A FQDN was passed\n if (dns_name_len <= higher_labels_len or not\n request_dns_name.endswith(higher_labels)):\n msg = _(\"The dns_name passed is a FQDN. Its higher level labels \"\n \"must be equal to the dns_domain option in neutron.conf, \"\n \"that has been set to '%(dns_domain)s'. It must also \"\n \"include one or more valid DNS labels to the left \"\n \"of '%(dns_domain)s'\") % {'dns_domain':\n cfg.CONF.dns_domain}\n return msg\n\n\ndef _get_dns_domain():\n if not cfg.CONF.dns_domain:\n return ''\n if cfg.CONF.dns_domain.endswith('.'):\n return cfg.CONF.dns_domain\n return '%s.' % cfg.CONF.dns_domain\n\n\ndef _get_request_dns_name(data):\n dns_domain = _get_dns_domain()\n if ((dns_domain and dns_domain != DNS_DOMAIN_DEFAULT)):\n return data\n return ''\n\n\ndef convert_to_lowercase(data):\n if isinstance(data, six.string_types):\n return data.lower()\n msg = _(\"'%s' cannot be converted to lowercase string\") % data\n raise n_exc.InvalidInput(error_message=msg)\n\n\nattr.validators['type:dns_name'] = (\n _validate_dns_name)\n\n\nDNSNAME = 'dns_name'\nDNSASSIGNMENT = 'dns_assignment'\nEXTENDED_ATTRIBUTES_2_0 = {\n 'ports': {\n DNSNAME: {'allow_post': True, 'allow_put': True,\n 'default': '',\n 'convert_to': convert_to_lowercase,\n 'validate': {'type:dns_name': FQDN_MAX_LEN},\n 'is_visible': True},\n DNSASSIGNMENT: {'allow_post': False, 'allow_put': False,\n 'is_visible': True},\n }\n}\n\n\nclass Dns(extensions.ExtensionDescriptor):\n \"\"\"Extension class supporting DNS Integration.\"\"\"\n\n @classmethod\n def get_name(cls):\n return \"DNS Integration\"\n\n @classmethod\n def get_alias(cls):\n return \"dns-integration\"\n\n @classmethod\n def get_description(cls):\n return \"Provides integration with internal DNS.\"\n\n @classmethod\n def get_updated(cls):\n return \"2015-08-15T18:00:00-00:00\"\n\n def get_extended_resources(self, version):\n if version == \"2.0\":\n return EXTENDED_ATTRIBUTES_2_0\n else:\n return {}\n","sub_path":"neutron/extensions/dns.py","file_name":"dns.py","file_ext":"py","file_size_in_byte":6333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"407856920","text":"# implement the bubble sort algorithm by completing the module mysort\r\ndef mysort( myList ):\r\n return myList\r\n\r\ndef listCompare( list1, list2 ):\r\n sameList = len( list1 ) == len( list2 )\r\n\r\n if sameList == True:\r\n i = 0\r\n while i < len( list1 ):\r\n if list1[i] != list2[i] :\r\n sameList = False\r\n break\r\n i = i + 1\r\n return sameList\r\n\r\ntestData = [\r\n [7,3,9,4,10,2],\r\n [1,2,10,3,5,7,9,12],\r\n [1,2,3,4,5],\r\n [99, 98, 97, 96, 95, 94, 93, 92, 91]\r\n ]\r\n\r\nfor aList in testData:\r\n mySortedList = mysort( aList )\r\n sortedList = aList[:]\r\n sortedList.sort()\r\n\r\n if listCompare( mySortedList, sortedList):\r\n message = str( aList ) + \" was correctly sorted to \" + str( sortedList )\r\n else:\r\n message = str( aList ) + \" was incorrectly sorted to \" + str( sortedList )\r\n\r\n print( message )\r\n","sub_path":"sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"239222198","text":"# Copyright 2019 Red Hat\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nfrom __future__ import absolute_import\n\nimport inspect\n\nfrom keystoneclient.v3 import endpoints\nfrom octaviaclient.api.v2 import octavia as octaviaclient\n\nfrom tobiko.openstack import keystone\nfrom tobiko.openstack import octavia\nfrom tobiko.tests import unit\nfrom tobiko.tests.unit import openstack\nfrom tobiko.tests.unit.openstack import test_client\n\n\nclass KeystoneModulePatch(unit.PatchFixture):\n\n client = object()\n endpoint = endpoints.Endpoint(manager=None,\n info={'url': 'http://some/endpoint'})\n session = None\n name = None\n\n def setup_fixture(self):\n module = inspect.getmodule(octavia.OctaviaClientFixture)\n self.patch(module, 'keystone', self)\n\n def get_keystone_client(self, session):\n self.session = session\n return self.client\n\n def find_service_endpoint(self, name, client):\n self.name = name\n assert self.client is client\n return self.endpoint\n\n\nclass OctaviaClientFixtureTest(test_client.OpenstackClientFixtureTest):\n\n def setUp(self):\n super(OctaviaClientFixtureTest, self).setUp()\n self.useFixture(KeystoneModulePatch())\n\n def create_client(self, session=None):\n return octavia.OctaviaClientFixture(session=session)\n\n\nclass GetOctaviaClientTest(openstack.OpenstackTest):\n\n def setUp(self):\n super(GetOctaviaClientTest, self).setUp()\n self.useFixture(KeystoneModulePatch())\n\n def test_get_octavia_client(self, session=None, shared=True):\n client1 = octavia.get_octavia_client(session=session, shared=shared)\n client2 = octavia.get_octavia_client(session=session, shared=shared)\n if shared:\n self.assertIs(client1, client2)\n else:\n self.assertIsNot(client1, client2)\n self.assertIsInstance(client1, octaviaclient.OctaviaAPI)\n self.assertIsInstance(client2, octaviaclient.OctaviaAPI)\n\n def test_get_octavia_client_with_not_shared(self):\n self.test_get_octavia_client(shared=False)\n\n def test_get_octavia_client_with_session(self):\n session = keystone.get_keystone_session()\n self.test_get_octavia_client(session=session)\n\n\nclass OctaviaClientTest(openstack.OpenstackTest):\n\n def setUp(self):\n super(OctaviaClientTest, self).setUp()\n self.useFixture(KeystoneModulePatch())\n\n def test_octavia_client_with_none(self):\n default_client = octavia.get_octavia_client()\n client = octavia.octavia_client(None)\n self.assertIsInstance(client, octaviaclient.OctaviaAPI)\n self.assertIs(default_client, client)\n\n def test_octavia_client_with_client(self):\n default_client = octavia.get_octavia_client()\n client = octavia.octavia_client(default_client)\n self.assertIsInstance(client, octaviaclient.OctaviaAPI)\n self.assertIs(default_client, client)\n\n def test_octavia_client_with_fixture(self):\n fixture = octavia.OctaviaClientFixture()\n client = octavia.octavia_client(fixture)\n self.assertIsInstance(client, octaviaclient.OctaviaAPI)\n self.assertIs(client, fixture.client)\n","sub_path":"tobiko/tests/unit/openstack/octavia/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"525095672","text":"from splitwise import Splitwise\nfrom user import User\nfrom constants import *\n\n\nu1 = User(\"gopal\", \"gopal@mail.com\", \"9687121229\")\nu2 = User(\"yadav\", \"yadav@mail.com\", \"5656556661\")\nu3 = User(\"karan\", \"karan@mail.com\", \"5455121229\")\nusers_list = [ u1, u2, u3]\n\nsplitwise = Splitwise(users_list)\nsplitwise.add_users(users_list)\n\nusers_list = splitwise.get_users()\n\nwhile True:\n print(\"Input your query.....Type EXIT to exit...\")\n expense = input()\n expense = expense.split(' ')\n if expense[0] == EXPENSE:\n splitwise.add_expense(expense)\n elif expense[0] == SHOW and len(expense)==1:\n splitwise.show_all_expense(expense)\n elif expense[0] == SHOW and len(expense)==2:\n splitwise.show_expense_of(expense)\n elif expense == EXIT:\n print(\"Bye..\")\n break\n else:\n print(\"Please give valid input....\")\n","sub_path":"splitwise/main_start.py","file_name":"main_start.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"240831444","text":"print(\"hola\")\r\nfrom random import random, choice\r\nfrom sys import stdout\r\n\r\ncolores = [\"black\", \"blue\", \"pink\", \"orange\", \"red\"]\r\n\r\ndef cabecera(aristas, eps=False):\r\n if eps:\r\n print(\"set term postscript eps\", file = aristas)\r\n print(\"set output 'grafo2.eps'\", file = aristas)\r\n else:\r\n print(\"set term png\", file = aristas)\r\n print(\"set output 'grafo2.png'\", file = aristas)\r\n print(\"set xrange [-10.0:10.0]\", file = aristas)\r\n print(\"set yrange [-10.0:10.0]\", file = aristas)\r\n print('set size square', file = aristas) \r\n print('set key off', file = aristas)\r\n\r\ndef pie(destino, aristas):\r\n print(\"plot '{:s}' using 1:2 with points pt 5\".format(destino), file = aristas)\r\n\r\nclass Grafo:\r\n \r\n def __init__(self):\r\n self.n = None # se crean las variables pero aun no se inicializan\r\n self.x = dict()\r\n self.y = dict()\r\n self.E = []\r\n self.destino = None\r\n\r\n def creaNodos(self, orden): # creando los nodos\r\n self.n = orden\r\n for nodo in range(self.n):\r\n self.x[nodo] = random() * 10\r\n self.y[nodo] = random() * 10\r\n\r\n def imprimir(self, dest): # guardando los pares X y Y en un archivo\r\n self.destino = dest\r\n with open(self.destino , \"w\") as archivo:\r\n for nodo in range(self.n):\r\n print(self.x[nodo], self.y[nodo], file=archivo)\r\n print(self.destino)\r\n\r\n def conecta(self, prob):\r\n for nodo in range(self.n - 1):\r\n for otro in range(nodo + 1, self.n): \r\n if random() < prob:\r\n color = choice(colores)\r\n self.E.append((nodo, otro, color))\r\n print(len(self.E))\r\n\r\n def grafica(self, plot): # imprimiendo el grafo con aristas\r\n assert self.destino is not None\r\n with open(plot, \"w\") as aristas:\r\n cabecera(aristas)\r\n num = 1\r\n for (v, w, c) in self.E:\r\n x1 = self.x[v]\r\n x2 = self.x[w]\r\n y1 = self.y[v]\r\n y2 = self.y[w]\r\n flecha = \"set arrow {:d} from {:f}, {:f} to {:f}, {:f} lw 2 lt 5 lc rgb '{:s}' nohead\".format(num,x1,y1,x2,y2,c)\r\n print(flecha, file=aristas)\r\n num += 1 \r\n pie(self.destino, aristas)\r\n\r\n\r\n","sub_path":"Tarea2/Version1/grafo2.py","file_name":"grafo2.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"179537593","text":"\nclass employee():\n\tdef __init__(self,name,age,position):\n\t\tself.name=name\n\t\tself.age=age\n\t\tself.position=position\n\tdef isaged(self):\n\t\tif self.age >= 25:\n\t\t\tprint(\"he is aged \")\n\t\t\treturn true\n\t\telse:\n\t\t\tprint(\"not aged 25\")\n\n\tdef show(self):\n\t\tprint(\"Name : \"+self.name)\n\t\tprint(\"Age : \"+str(self.age))\n\t\tprint(\"Position : \"+self.position)\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"408388384","text":"import sys\n\nf = open(sys.argv[1], 'r')\n\nfor line in f:\n\tline = line.split(',')\n\tline[1] = line[1].strip()\n\tans = len(line[0]) -1\n\tfor i in line[0][::-1]:\n\t\tif i == line[1]:\n\t\t\tbreak\n\t\telse:\n\t\t\tans -= 1\n\tprint(ans)\n\nf.close()","sub_path":"EasyChallenges/rightChar/rightChar1.py","file_name":"rightChar1.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"164433867","text":"\"\"\"\nPreparing the data for both the stanford parser\nJava implementations and the Jython implementations.\n\n\"\"\"\nimport pandas as pd\n\n\nclass PrepareStanfordParser:\n \"\"\"Useful functions for pipeline to iterate towards the use of the\n Stanford parser\"\"\"\n\n def __init__(self):\n self.genes = self.get_genes()\n self.drugs = self.get_drugs()\n\n def get_genes(\n self,\n path=\"/Users/mtaruno/Documents/DevZone/Stem-Away-group-5/data/genes/genes.tsv\",\n ):\n return list(pd.read_csv(path, delimiter=\"\\t\")[\"Name\"])\n\n def get_drugs(\n self,\n path=\"/Users/mtaruno/Documents/DevZone/Stem-Away-group-5/data/drugs/drugs.tsv\",\n ):\n return list(pd.read_csv(path, delimiter=\"\\t\")[\"Name\"])\n\n def tag_drug_gene(self, sent: str) -> tuple:\n \"\"\"\n Parameters\n =====\n str: biomedical sentence\n\n Returns\n ========\n (sent, i, j) where i is the set of drug indices and j is the set of gene indices\n \"\"\"\n\n tokenized = sent.split(\" \")\n\n drug_indices = []\n gene_indices = []\n\n for i, token in enumerate(tokenized):\n if token in self.drugs:\n drug_indices.append(i)\n if token in self.genes:\n gene_indices.append(i)\n\n return (sent, drug_indices, gene_indices)\n\n def apply_to_all_sentences(self, biomedical_sentences: list) -> list:\n \"\"\"Getting all the drug indices and tag indices of each of the pubmed sentences\"\"\"\n return [self.tag_drug_gene(sent) for sent in biomedical_sentences]\n\n def parse_to_stanford(self) -> None:\n \"\"\"Take the input of biomedical sentences CSV and parse\n them in a format that is readable for the Java Stanford Parser\n implementation (as a text file).\n \"\"\"\n biomedical = pd.read_csv(\"./data/biomedical_sentences.csv\")\n biomedical_data = \"\"\n for i in biomedical[\"Text\"]:\n biomedical_data += \"\\n\" + i\n\n # getting a sample\n sample = biomedical.sample(100)\n sample_data = \"\"\n for i in sample[\"Text\"]:\n sample_data += \"\\n\" + i\n\n # Converting the input to txt\n with open(\"biomedical_output.txt\", \"w\") as text_file:\n text_file.write(biomedical_data)\n\n with open(\"sample_data.txt\", \"w\") as text_file:\n text_file.write(sample_data)\n","sub_path":"module2/prepare_pubmed_data.py","file_name":"prepare_pubmed_data.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"161610675","text":"# -*- coding: utf-8 -*-\n\n\nclass Uml(object):\n def __init__(self, _class=None, interface=None, package=None, association=None, uml=None):\n self._class = _class\n self.interface = interface\n self.association = association\n self.package = package\n self.uml = uml\n","sub_path":"umlml/definitions/uml_definition.py","file_name":"uml_definition.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"429157723","text":"# Escreva um programa que leia a velocidade de um carro. Se ele ultrapassar 80Km/h, mostre uma mensagem dizendo que ele foi multado.\n# A multa vai custar R$7,00 por cada Km acima do limite.\n\ncores = {\n 'amarelo': '\\033[33m',\n 'azul': '\\033[34m',\n 'roxa': '\\033[35m',\n 'cinza': '\\033[36m',\n 'vermelho': '\\033[31m',\n 'verde': '\\033[32m'\n }\n\nvelocidade = float(input(\"Qual a velocidade atual o carro?\"))\nif velocidade > 80:\n print(\"MULTADO! Você excedeu o limite permitido que é de 80 km/h\")\n multa = (velocidade - 80) * 7\n print(\"{}Você deve pagar uma multa de R${:.2f} \".format(cores['vermelho'], multa))\nprint(\"{}Tenha um bom dia! Dirija com segurança\".format(cores['verde']))\n","sub_path":"Curso em Vídeo/Python3/Mundo 1/ex029.py","file_name":"ex029.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"483091481","text":"nums = [1,2,2,3,3,5,5,5,25,75,75,75,97,97,97,97]\n\n\ndef left(nums,target):\n\tres = None\n\tl = 0\n\tr = len(nums)-1\n\twhile l<=r:\n\t\tm = r-(r-l)//2\n\t\tif nums[m]target:\n\t\t\tr = m - 1\n\t\telse:\n\t\t\tif nums[m]==target:\n\t\t\t\tres = m\n\t\t\tl = m + 1\n\n\treturn res\n\ndef count_ele(nums,target):\n\tl = left(nums,target)\n\tr = right(nums,target)\n\treturn r-l+1 if l!=None else 0\n\ndef sum_target(nums):\n\tfor i in range(len(nums)):\n\t\thash = {}\n\t\tfor j in range(len(nums)):\n\t\t\tif nums[i]-nums[j] in hash and hash[nums[i]-nums[j]]!=i:\n\t\t\t\treturn (i,j,hash[nums[i]-nums[j]])\n\t\t\thash[nums[j]] = j\n\treturn None\ndef diff_target(nums,k):\n\thash = {}\n\tfor j in range(len(nums)):\n\t\tif nums[j]-k in hash:\n\t\t\treturn (j,hash[nums[j]-k])\n\t\thash[nums[j]] = j\n\treturn None\nprint(sum_target(nums),diff_target(nums,2))\n","sub_path":"week2.py","file_name":"week2.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"100181273","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 8 18:27:09 2019\n\n@author: Jacky\n\"\"\"\n\nimport cv2\nimport datetime as dt\nimport h5py\nimport matplotlib.pyplot as plt\nimport matplotlib.pylab as plb\nimport numpy as np\nimport os\nimport pandas as pd\nfrom glob import glob\n\ndef proc_images(img_path='dt_cat', img_name='cat', \n img_ext='png', out_file=\"data.h5\",\n start_index=1, img_label=0):\n \"\"\"\n Saves compressed, resized images as HDF5 datsets\n Returns\n data.h5, where each dataset is an image or class label\n e.g. X23,y23 = image and corresponding class label\n \"\"\"\n start = dt.datetime.now()\n # ../input/\n #PATH = os.path.abspath(os.path.join('..', 'input'))\n # ../input/sample/images/\n #SOURCE_IMAGES = os.path.join(PATH, \"sample\", \"images\")\n # ../input/sample/images/*.png\n #images = glob(os.path.join(SOURCE_IMAGES, \"*.png\"))\n images = glob(os.path.join(img_path, \"*\" + img_ext))\n \n # Load labels\n #labels = pd.read_csv('../input/sample_labels.csv')\n # Get all image files\n img_files = [f for f in os.listdir(img_path) if os.path.isfile(os.path.join(img_path, f))]\n \n # Size of data\n NUM_IMAGES = len(images)\n HEIGHT = 128\n WIDTH = 128\n CHANNELS = 3\n SHAPE = (HEIGHT, WIDTH, CHANNELS)\n \n with h5py.File(out_file, 'a') as hf:\n img_index = start_index\n img_end_index = start_index\n \n for i,img in enumerate(images):\n if img_index > start_index:\n img_end_index = img_index\n \n # Images\n image = cv2.imread(img)\n image = cv2.resize(image, (WIDTH,HEIGHT), interpolation=cv2.INTER_CUBIC)\n Xset = hf.create_dataset(\n name='X'+str(img_index),\n data=image,\n shape=(HEIGHT, WIDTH, CHANNELS),\n maxshape=(HEIGHT, WIDTH, CHANNELS),\n compression=\"gzip\",\n compression_opts=9)\n yset = hf.create_dataset(\n name='y'+str(img_index),\n data=img_label,\n shape=(1,),\n maxshape=(None,),\n compression=\"gzip\",\n compression_opts=9)\n img_id = '{0}_{1}'.format(img_name, os.path.basename(img))\n idset = hf.create_dataset(\n name='id'+str(img_index),\n data=img_id,\n shape=(1,),\n maxshape=(None,),\n compression=\"gzip\",\n compression_opts=9)\n end=dt.datetime.now()\n \n if img_index % 100 == 0:\n print(img_index, \": \", img_name, \", \", (end-start).seconds, \"seconds\")\n img_index += 1\n \n return img_end_index\n\ndef store_total_img_indexes(out_file='data.h5', start_index=0, end_index=0):\n with h5py.File(out_file, 'a') as hf:\n hf.create_dataset(\n name='start_index',\n data=str(start_index),\n shape=(1,),\n maxshape=(None,),\n compression=\"gzip\",\n compression_opts=9)\n print('Store start index', start_index)\n\n hf.create_dataset(\n name='end_index',\n data=str(end_index),\n shape=(1,),\n maxshape=(None,),\n compression=\"gzip\",\n compression_opts=9)\n print('Store end index', end_index)\n\n\nimg_start_index=0\nimg_end_index=0\n\nh5_file = 'img_128.h5'\nstart_processing = dt.datetime.now()\n\nif os.path.exists(h5_file):\n os.remove(h5_file)\n\nimg_end_index = proc_images(img_path='..\\\\dt_cat', img_name='cat', img_ext='jpg', \n out_file=h5_file, start_index=img_start_index, img_label=0)\nprint('----------------')\nprint('start: {}, end: {}'.format(img_start_index, img_end_index))\nprint('----------------')\n\nimg_start_index = img_end_index + 1\n\nimg_end_index = proc_images(img_path='..\\\\dt_bird', img_name='bird', img_ext='jpg', \n out_file=h5_file, start_index=img_start_index, img_label=1)\nprint('----------------')\nprint('start: {}, end: {}'.format(img_start_index, img_end_index))\nprint('----------------')\n\nimg_start_index = img_end_index + 1\nimg_end_index = proc_images(img_path='..\\\\dt_dog', img_name='dog', img_ext='jpg', \n out_file=h5_file, start_index=img_start_index, img_label=2)\n\nprint('----------------')\nprint('start: {}, end: {}'.format(img_start_index, img_end_index))\nprint('----------------')\n\nstore_total_img_indexes(out_file=h5_file, start_index=0, end_index=img_end_index)\n\nend_processing = dt.datetime.now()\nprint('Total spent: ', (end_processing - start_processing))\n\n#!ls -lha\n\nwith h5py.File(h5_file, 'r') as hf:\n plb.imshow(hf[\"X2383\"])\n print(hf[\"y2383\"].value)\n print(hf[\"id2383\"].value)\n print(hf['start_index'].value)\n print(hf['end_index'].value)\n\n\n","sub_path":"optimize_img/HDF5_Convertor_optimize_img.py","file_name":"HDF5_Convertor_optimize_img.py","file_ext":"py","file_size_in_byte":4897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"500977591","text":"import argparse\n\nfrom .log import BookLog\nfrom .prompt import Prompt\n\ndef main():\n parser = argparse.ArgumentParser(prog='bookstats')\n parser.add_argument('file', help='Log file')\n parser.add_argument('--all', action='store_true', help='Print all authors and books')\n parser.add_argument('--year', action='store', help='Print books from year')\n parser.add_argument('--authors', action='store_true', help='Print all authors')\n parser.add_argument('--most-read', action='store_true', help='Print most read authors')\n parser.add_argument('--unique-authors', action='store_true', help='Print number of unique authors')\n parser.add_argument('--search-authors', nargs='*', metavar='term', help='Print authors found for [term]')\n parser.add_argument('--search-books', nargs='*', metavar='term', help='Print books found for [term]')\n parser.add_argument('--yearly-tally', action='store_true', help='Print yearly tally')\n parser.add_argument('--all-tally', action='store_true', help='Print yearly tally')\n args = parser.parse_args()\n\n log = BookLog(args.file)\n\n# if args.interactive:\n# from textwrap import dedent\n# title = '''\n# . __ __ __ ___ ___ __\n# | / \\ / _` /__` | /\\ | /__`\n# |___ \\__/ \\__> .__/ | /~~\\ | .__/\n#\n# '''\n# print(dedent(title))\n#\n# prompt = Prompt(log)\n# prompt.prompt_help()\n# try:\n# prompt()\n# except (KeyboardInterrupt, EOFError):\n# exit('')\n\n if args.all:\n log.print_author_dict()\n\n if args.authors:\n for author in sorted(log.author_dict.keys()):\n print(author)\n\n if args.most_read:\n print('Most read authors:')\n for k, v in log.most_read_authors():\n author = ' '.join(reversed(k.split(',')))\n print(f'{v:4} {author}')\n\n if args.search_authors:\n print(f'Author Search [{\" \".join(args.search_authors)}]:')\n log.print_author_dict(' '.join(args.search_authors))\n\n if args.search_books:\n print(f'Book Search [{\" \".join(args.search_books)}]:')\n books = log.search_books(' '.join(args.search_books))\n for book in books:\n print(book)\n\n if args.unique_authors:\n print(f'Unique authors: {len(log.author_dict.keys())}')\n\n if args.year:\n log.print_books_for_year(args.year)\n\n if args.yearly_tally:\n log.print_yearly_tally()\n\n if args.all_tally:\n log.print_all_and_tally()\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"bookstats/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"366864811","text":"#!/usr/bin/env python3\n# pylint: disable=C0103\n\n# TODO Audit this code later for cleanup.\n\nimport html\nimport random\n\nimport asyncio\nimport discord\nfrom discord.ext import commands\n\nURL_NUMBERS_API = \"http://numbersapi.com/{0}/{1}\"\nOPTIONS_NUMBERS_API = [\"math\", \"trivia\"]\nURL_TRIVIA_API = \"https://opentdb.com/api.php?amount=1\"\n\nsystemrandom = random.SystemRandom()\n\n\nasync def input_number(ctx: commands.Context,\n message: str=\"Please enter a number within 10 seconds.\",\n *, timeout: int=10, min_value: int=None, max_value: int=None):\n \"\"\"Number input helper, with timeout.\n\n * ctx - The context in which the question is being asked.\n * message - Optional messsage that the question should ask.\n * timeout - Timeout, in seconds, before automatically failing. Defaults to 10.\n * min_value - Minimum accepted value for the input. Defaults to None.\n * max_value - Maximum accepted value for the input. Defaults to None.\n \"\"\"\n await ctx.send(message)\n\n def check(message):\n \"\"\"The check function used in bot.wait_for().\"\"\"\n if message.author != ctx.message.author or not message.clean_content.isdecimal():\n return False\n\n number = int(message.clean_content)\n\n if (min_value and number < min_value) or (max_value and number > max_value):\n return False\n\n return True\n\n try:\n message = await ctx.bot.wait_for(\"message\", timeout=timeout, check=check)\n\n except asyncio.TimeoutError:\n raise commands.UserInputError(\"Timed out waiting.\")\n\n return int(message.clean_content)\n\n\nclass Trivia:\n \"\"\"Trivia commands.\"\"\"\n\n @commands.command(aliases=[\"numberfact\", \"number\"])\n @commands.cooldown(12, 12, commands.BucketType.channel)\n async def numfact(self, ctx, number: int):\n \"\"\"Display a random fact about a number.\"\"\"\n kind = systemrandom.choice(OPTIONS_NUMBERS_API)\n url = URL_NUMBERS_API.format(number, kind)\n async with ctx.bot.session.get(url) as response:\n if response.status == 200:\n data = await response.text()\n await ctx.send(data)\n else:\n await ctx.send(\"Could not fetch fact. :<\")\n\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def trivia(self, ctx):\n \"\"\"Ask a random trivia question.\"\"\"\n async with ctx.bot.session.get(URL_TRIVIA_API) as response:\n if response.status == 200:\n data = await response.json()\n\n trivia = data[\"results\"][0]\n\n correct_answer = html.unescape(trivia[\"correct_answer\"])\n incorrect_answers = []\n for answer in trivia[\"incorrect_answers\"]:\n incorrect_answers.append(html.unescape(answer))\n\n choices = [correct_answer] + incorrect_answers\n\n systemrandom.shuffle(choices)\n\n embed = discord.Embed()\n embed.title = html.unescape(trivia[\"category\"])\n embed.description = html.unescape(trivia[\"question\"])\n\n difficulty = html.unescape(trivia[\"difficulty\"]).capitalize()\n footer_text = f\"Powered by Open Trivia DB | Difficulty: {difficulty}\"\n\n embed.set_footer(text=footer_text)\n\n paginator = commands.Paginator(prefix=\"```markdown\")\n\n for index in range(len(choices)):\n paginator.add_line(f\"{index+1}. {choices[index]}\")\n\n embed.add_field(name=\"Options\", value=paginator.pages[0])\n\n await ctx.send(ctx.author.mention, embed=embed)\n choice = await input_number(ctx, \"Answer by number in 15 seconds.\",\n timeout=15, min_value=1,\n max_value=len(choices))\n\n if choices[choice-1] == correct_answer:\n await ctx.send(\"Correct! :3\")\n\n else:\n await ctx.send(f\"Nope, the correct answer is {correct_answer}. :<\")\n\n else:\n await ctx.send(\"Could not fetch trivia. :<\")\n\n\ndef setup(bot):\n \"\"\"Set up the extension.\"\"\"\n bot.add_cog(Trivia())\n","sub_path":"cogs/fun/trivia.py","file_name":"trivia.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"258540859","text":"# -*- coding: UTF-8 -*-\n# @yasinkuyu\n\nimport os\nimport sys\nimport time\nimport config\nimport argparse\n\nfrom BinanceAPI import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--quantity\", type=float, help=\"Buy/Sell Quantity\", default=6)\nparser.add_argument(\"--symbol\", type=str, help=\"Market Symbol (Ex: IOTABTC)\", default='IOTABTC')\nparser.add_argument(\"--profit\", type=float, help=\"Target Profit\", default=1.3)\nparser.add_argument(\"--orderid\", type=int, help=\"Target Order Id\", default=0)\nparser.add_argument(\"--testmode\", type=bool, help=\"Test Mode True/False\", default=False)\nparser.add_argument(\"--wait_time\", type=int, help=\"Wait Time (seconds)\", default=3)\nparser.add_argument(\"--increasing\", type=float, help=\"Buy Price +Increasing (0.00000001)\", default=0.00000001)\nparser.add_argument(\"--decreasing\", type=float, help=\"Sell Price -Decreasing (0.00000001)\", default=0.00000001)\n\noption = parser.parse_args()\n\nTEST_MODE = option.testmode\n\nPROFIT = option.profit\nORDER_ID = option.orderid\nQUANTITY = option.quantity\nWAIT_TIME = option.wait_time # seconds\nWAIT_TIME_SELL = 4 # seconds\n\nclient = BinanceAPI(config.api_key, config.api_secret)\n\ndef write(data):\n file = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'ORDER'), 'w')\n file.write(data)\n \ndef buy_limit(symbol, quantity, buyPrice):\n global TEST_MODE\n \n if TEST_MODE:\n return \"100000\"\n \n ret = client.buy_limit(symbol, quantity, buyPrice)\n if 'msg' in ret:\n message(ret['msg'])\n \n #Order created.\n orderId = ret['orderId']\n \n write(\"%s,%d,%lf,%lf\" % (symbol, orderId, quantity, buyPrice))\n \n print ('Order Id: %d' % orderId)\n\n return orderId\n\ndef sell_limit(symbol, orderId, lastPrice, sell_price):\n global TEST_MODE\n\n if TEST_MODE:\n print (\"Test sell price: %.8f \" % sell_price)\n return 0\n\n # Get active order info\n order = get_order(symbol, orderId)\n \n # Sell price\n price = float(order['price'])\n \n filled_qty = float(order['executedQty'])\n \n # Todo: check filled or remaining qty.\n quantity = float(order['origQty']) #executedQty\n\n print (\"Order(buy): %s %d: %.8f\" % (symbol, order['orderId'], price))\n\n #Wait 4 seconds to be sold.\n time.sleep(WAIT_TIME_SELL)\n \n # Did profit get caught\n if sell_price >= lastPrice:\n\n if filled_qty > 0:\n ret = client.sell_limit(symbol, filled_qty, sell_price)\n \n print (\"Sales were made at %.8f price.\" % (sell_price))\n\n if 'msg' in ret:\n message(ret['msg'])\n \n print (\"symbol: %.8f executedQty: %.8f origQty: %.8f\" % (ret['symbol'], ret['executedQty'], ret['origQty']))\n \n else:\n \n print (\"Wait fill/partial fill. filledQty: %s \" % (filled_qty))\n \n else:\n\n cancel_order(symbol, ORDER_ID)\n\n # Reset order id\n ORDER_ID = 0\n \n # Empty ORDER file\n write(\" \") \n \ndef cancel_order(symbol, orderId):\n\n ret = client.cancel(symbol, orderId)\n if 'msg' in ret:\n message(ret['msg'])\n\ndef get_order(symbol, orderId):\n\n ret = client.query_order(symbol, orderId)\n if 'msg' in ret:\n message(ret['msg'])\n return False\n\n # Canceled #Filled #Partial Fill\n if ret['status'] != \"CANCELED\":\n return ret\n \ndef get_ticker(symbol):\n ret = client.get_ticker(symbol)\n return float(ret[\"lastPrice\"])\n\ndef message(msg):\n print (\"Error: \" + msg)\n exit(1)\n \ndef calc(lastBid):\n return lastBid + (lastBid * PROFIT / 100)\n \ndef action(symbol):\n \n global ORDER_ID\n \n # Order amount\n quantity = option.quantity\n\n lastPrice = get_ticker(symbol)\n btcPrice = get_ticker(\"BTCUSDT\")\n \n ret = client.get_orderbooks(symbol, 5)\n lastBid = float(ret['bids'][0][0]) #last buy price (bid)\n lastAsk = float(ret['asks'][0][0]) #last sell price (ask)\n \n buyPrice = lastBid + option.increasing #target buy price\n sellPrice = lastAsk - option.decreasing #target sell price\n \n # Spread \n profitableSellingPrice = calc(lastBid)\n earnTotal = profitableSellingPrice - buyPrice\n \n if ORDER_ID is 0:\n\n print ('price:%.8f buyp:%.8f sellp:%.8f-bid:%.8f ask:%.8f BTC:$%.1f' % (lastPrice, buyPrice, profitableSellingPrice, lastBid, lastAsk, btcPrice))\n\n # Did profit get caught\n if lastAsk >= profitableSellingPrice:\n \n try:\n\n ORDER_ID = buy_limit(symbol, quantity, buyPrice)\n\n print (\"Percentage of %s profit. Order created from %.8f. Earn: %.8f satoshi\" % (PROFIT, buyPrice, earnTotal))\n\n except:\n print (\"... buy try again...\")\n\n else:\n \n try:\n \n # Order information will be kept on file\n file = open(\"ORDER\", \"r\") \n data = file.read().split(',')\n \n profitableSellingPrice_file = calc(data[3]) #stored buyPrice\n \n # If the order is complete, try to sell it.\n ORDER_ID = sell_limit(symbol, ORDER_ID, lastPrice, profitableSellingPrice_file)\n\n print (\"Profit is lost, order canceled %s\" % (ORDER_ID))\n\n except:\n print (\"... sell try again...\")\n \ndef main():\n \n symbol = option.symbol\n\n print (\"@yasinkuyu, 2017\")\n print (\"Auto Trading for Binance.com (Beta). Enter your symbol. Ex: %s\" % symbol)\n \n name = raw_input()\n \n if name != \"\":\n symbol = name\n \n print (\"trader.py --quantity %s --symbol %s --profit %s --wait_time %s --orderid %s \\n\" % (option.quantity, symbol, option.profit, option.wait_time, option.orderid))\n \n print ('%%%s profit scanning for %s \\n' % (PROFIT, symbol))\n \n if TEST_MODE:\n print (\"Test mode active\")\n \n while True:\n \n startTime = time.time()\n action(symbol)\n endTime = time.time()\n\n if endTime - startTime < WAIT_TIME:\n time.sleep(WAIT_TIME - (endTime - startTime))\n \nif __name__ == \"__main__\":\n main()","sub_path":"trader.py","file_name":"trader.py","file_ext":"py","file_size_in_byte":6128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"346691318","text":"'''\n@file : get_label.py\n@brief : get label from split files and then stored with npy file\n@author: Lee\n@date : 12/1/2018\n@history:\n'''\n\nimport csv\nimport numpy as np\n\n'''\n@input : file(a str for file path); l(a list will be stored to the file)\n@output: None\n@brief: file has already been opened\n@history: it is not used in this file\n\n'''\n\n'''\n@brief: main function process\n'''\n\nbase_name = '../data/split_'\nsuffix_name = '.csv'\nlabel_list_save = []\nf_label = file('../data/label.npy', 'wb')\npre_fileid = 0\nfor file_num in xrange(1, 10):\n filename = base_name + str(file_num) + suffix_name # a better expression\n with open(filename) as f:\n print('\\n\\n handing %s' % filename)\n reader = csv.reader(f)\n try:\n for row in reader:\n # get fileid and tid\n fileid = int(row[0])\n label = int(row[1])\n\n # save one label for each kind of fileid\n if pre_fileid != fileid:\n label_list_save.append(label)\n print(fileid, label)\n\n pre_fileid = fileid # update pre_fileid\n except:\n print('Error')\n exit(0)\n\n# save label information to npy file\nnp.save(f_label, label_list_save)\nprint('total label: %d' % len(label_list_save))\nf_label.close()\n","sub_path":"utils/get_label.py","file_name":"get_label.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"249904958","text":"from django.test import TestCase\n\nfrom hc.api.models import Check\n\n\nclass BasicsTestCase(TestCase):\n\n def test_it_shows_welcome(self):\n response = self.client.get(\"/\")\n self.assertContains(response, \"Get Notified\", status_code=200)\n\n def test_welcome_code(self):\n response = self.client.get(\"/\")\n code = self.client.session[\"welcome_code\"]\n assert Check.objects.filter(code=code).exists()\n\n self.client.session[\"welcome_code\"] = \"x\"\n response = self.client.get(\"/\")\n code = self.client.session[\"welcome_code\"]\n assert response.status_code == 200\n assert code != \"x\"\n assert Check.objects.filter(code=code).exists()\n","sub_path":"hc/front/tests/test_basics.py","file_name":"test_basics.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"431757595","text":"# Code by Infiltrat8r\r\n\r\n\r\n# We will be declaring variables C language style :)\r\n\r\ncounting_list = ['first', 'second', 'third', 'fourth', 'fifth'] # A list for having some coding magic in inputs.\r\nlis_subjects = []\r\nlis_marks = []\r\nlis_subject_grades = []\r\ntotal_marks = 0\r\n\r\nname = input('Enter your name: ')\r\nfather_name = input('Enter your father\\'s name: ')\r\nrollnumber = input('Enter your roll number: ')\r\n\r\ncounter = 0 # The counter for the while loop below.\r\nwhile counter != 5: # This loop runs five times and creates a list of subjects and marks.\r\n subject_input = input('Enter the name of the {} subject: '.format(counting_list[counter]))\r\n marks_input = int(input('Enter your marks in {} : '.format(subject_input)))\r\n lis_subjects.append(subject_input)\r\n lis_marks.append(marks_input)\r\n counter = counter + 1\r\n\r\n\r\ndef compute_subject_grade(marks): # Simple function to compute grade\r\n if marks < 0:\r\n return 'Inputted marks cannot be graded'\r\n elif marks < 50:\r\n return 'Fail'\r\n elif 50 <= marks < 60:\r\n return 'C'\r\n elif 60 <= marks < 70:\r\n return 'B'\r\n elif 70 <= marks < 80:\r\n return 'A'\r\n elif 80 <= marks <= 100:\r\n return 'A+'\r\n else:\r\n return 'Inputted marks cannot be graded'\r\n\r\n\r\nfor sub_marks in lis_marks: # This for loop creates a list containing the grades of respective subjects\r\n sub_grade = compute_subject_grade(sub_marks)\r\n lis_subject_grades.append(sub_grade)\r\n\r\nfor sub_marks in lis_marks : # Computing Overall Total Percentage\r\n total_marks = total_marks + sub_marks\r\n percentage = ((total_marks/500)*100)\r\n\r\ntotal_grade = compute_subject_grade(int(percentage))\r\n\r\nprint('\\n\\n********* Result Card *********\\n\\n')\r\nprint('Student Name: {}\\nFather\\'s Name: {}\\nRoll no: {}'.format(name, father_name, rollnumber))\r\nprint('****************************************************')\r\nprint('*Name of Subject * Obtained Marks * Obtained Grade *')\r\ncounter_2 = 0\r\nwhile counter_2 != 5:\r\n print('****************************************************')\r\n print('* {:13} * {:12} * {:12} *'.format(lis_subjects[counter_2], str(lis_marks[counter_2]), lis_subject_grades[counter_2]))\r\n counter_2 = counter_2 + 1\r\nprint('*************************************************************************')\r\nprint('* Total Obtained * Total Marks * Obtained Percentage * Obtained Grade*')\r\nprint('*************************************************************************')\r\nprint('* {:11} * {:14} * {:17} * {:9} *'.format(str(total_marks), '500', str(percentage), total_grade))\r\nprint('*************************************************************************')","sub_path":"Week 4/(4)Grade_Calculator.py","file_name":"(4)Grade_Calculator.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"261662364","text":"# coding: utf-8\nfrom __future__ import division, absolute_import\nfrom __future__ import print_function, unicode_literals\n\nimport numpy as np\nimport random\nimport gym\nimport deeprl_hw1.lake_envs as lake_env\nimport datetime\n\n\ndef evaluate_policy(env, gamma, policy, max_iterations=int(1e3), tol=1e-3):\n \"\"\"Evaluate the value of a policy.\n\n See page 87 (pg 105 pdf) of the Sutton and Barto Second Edition\n book.\n\n http://webdocs.cs.ualberta.ca/~sutton/book/bookdraft2016sep.pdf\n\n Parameters\n ----------\n env: gym.core.Environment\n The environment to compute value iteration for. Must have nS,\n nA, and P as attributes.\n gamma: float\n Discount factor, must be in range [0, 1)\n policy: np.array\n The policy to evaluate. Maps states to actions.\n max_iterations: int\n The maximum number of iterations to run before stopping.\n tol: float\n Determines when value function has converged.\n\n Returns\n -------\n np.ndarray, int\n The value for the given policy and the number of iterations till\n the value function converged.\n \"\"\"\n prob = 0\n reward = 2\n next_state = 1\n value_function = []\n for s in range(env.nS):\n value_function.append(random.uniform(0, 3))\n k = 0\n delta = 1.0\n while (delta >= tol) & (k <= max_iterations):\n delta = 0.0\n value_temporary = [0] * env.nS\n for s in range(env.nS):\n value_temporary[s] = value_function[s]\n action = policy[s]\n result = env.P[s][action]\n value_function[s] = 0\n for i, j in enumerate(result):\n value_function[s] += result[i][prob] * (\n result[i][reward] + gamma * value_function[result[i][next_state]])\n delta = max(delta, abs(value_temporary[s] - value_function[s]))\n k = k + 1\n return value_function, k\n\n\ndef value_function_to_policy(env, gamma, value_function):\n \"\"\"Output action numbers for each state in value_function.\n\n Parameters\n ----------\n env: gym.core.Environment\n Environment to compute policy for. Must have nS, nA, and P as\n attributes.\n gamma: float\n Discount factor. Number in range [0, 1)\n value_function: np.ndarray\n Value of each state.\n\n Returns\n -------\n np.ndarray\n An array of integers. Each integer is the optimal action to take\n in that state according to the environment dynamics and the\n given value function.\n \"\"\"\n prob = 0\n reward = 2\n next_state = 1\n policy = []\n for s in range(env.nS):\n policy.append(0)\n for s in range(env.nS):\n value_action_pairs = [0, 0, 0, 0]\n for action in range(env.nA):\n result = env.P[s][action]\n for i, j in enumerate(result):\n value_action_pairs[action] += result[i][prob] * (\n result[i][reward] + gamma * value_function[result[i][next_state]])\n policy[s] = np.argmax(value_action_pairs)\n return policy\n\n\ndef improve_policy(env, gamma, value_func, policy):\n \"\"\"Given a policy and value function improve the policy.\n\n See page 87 (pg 105 pdf) of the Sutton and Barto Second Edition\n book.\n\n http://webdocs.cs.ualberta.ca/~sutton/book/bookdraft2016sep.pdf\n\n Parameters\n ----------\n env: gym.core.Environment\n The environment to compute value iteration for. Must have nS,\n nA, and P as attributes.\n gamma: float\n Discount factor, must be in range [0, 1)\n value_func: np.ndarray\n Value function for the given policy.\n policy: dict or np.array\n The policy to improve. Maps states to actions.\n\n Returns\n -------\n bool, np.ndarray\n Returns true if policy changed. Also returns the new policy.\n \"\"\"\n prob = 0\n reward = 2\n next_state = 1\n old_policy = []\n for s in range(env.nS):\n old_policy.append(0)\n for s in range(env.nS):\n old_policy[s] = policy[s]\n value_action_pairs = [0, 0, 0, 0]\n for action in range(env.nA):\n result = env.P[s][action]\n for i, j in enumerate(result):\n value_action_pairs[action] += result[i][prob] * (\n result[i][reward] + gamma * value_func[result[i][next_state]])\n policy[s] = np.argmax(value_action_pairs)\n return (policy == old_policy), policy\n\n\ndef policy_iteration(env, gamma, max_iterations=int(1e3), tol=1e-3):\n \"\"\"Runs policy iteration.\n\n See page 87 (pg 105 pdf) of the Sutton and Barto Second Edition\n book.\n\n http://webdocs.cs.ualberta.ca/~sutton/book/bookdraft2016sep.pdf\n\n You should use the improve_policy and evaluate_policy methods to\n implement this method.\n\n Parameters\n ----------\n env: gym.core.Environment\n The environment to compute value iteration for. Must have nS,\n nA, and P as attributes.\n gamma: float\n Discount factor, must be in range [0, 1)\n max_iterations: int\n The maximum number of iterations to run before stopping.\n tol: float\n Determines when value function has converged.\n\n Returns\n -------\n (np.ndarray, np.ndarray, int, int)\n Returns optimal policy, value function, number of policy\n improvement iterations, and number of value iterations.\n \"\"\"\n policy = []\n for s in range(env.nS):\n policy.append(env.action_space.sample())\n policy_stable = False\n j = 0\n k_total = 0\n while not policy_stable & (j <= max_iterations):\n value_function, k = evaluate_policy(env, gamma, policy, max_iterations, tol)\n k_total += k\n policy_stable, policy = improve_policy(env, gamma, value_function, policy)\n j = j + 1\n return policy, value_function, j, k_total\n\n\ndef value_iteration(env, gamma, max_iterations=int(1e3), tol=1e-3):\n \"\"\"Runs value iteration for a given gamma and environment.\n\n See page 90 (pg 108 pdf) of the Sutton and Barto Second Edition\n book.\n\n http://webdocs.cs.ualberta.ca/~sutton/book/bookdraft2016sep.pdf\n\n Parameters\n ----------\n env: gym.core.Environment\n The environment to compute value iteration for. Must have nS,\n nA, and P as attributes.\n gamma: float\n Discount factor, must be in range [0, 1)\n max_iterations: int\n The maximum number of iterations to run before stopping.\n tol: float\n Determines when value function has converged.\n\n Returns\n -------\n np.ndarray, iteration\n The value function and the number of iterations it took to converge.\n \"\"\"\n prob = 0\n reward = 2\n next_state = 1\n value_function = []\n for s in range(env.nS):\n value_function.append(random.uniform(0, 3))\n delta = 1.0\n k = 0\n while (delta >= tol) & (k <= max_iterations):\n delta = 0.0\n value_temporary = [0] * env.nS\n for s in range(env.nS):\n value_temporary[s] = value_function[s]\n value_action_pairs = [0, 0, 0, 0]\n for action in range(env.nA):\n result = env.P[s][action]\n value_function[s] = 0\n for i, j in enumerate(result):\n value_action_pairs[action] += result[i][prob] * (\n result[i][reward] + gamma * value_function[result[i][next_state]])\n value_function[s] = np.max(value_action_pairs)\n delta = max(delta, abs(value_temporary[s] - value_function[s]))\n k = k+1\n return value_function, k\n\n\ndef print_policy(policy, action_names):\n \"\"\"Print the policy in human-readable format.\n\n Parameters\n ----------\n policy: np.ndarray\n Array of state to action number mappings\n action_names: dict\n Mapping of action numbers to characters representing the action.\n \"\"\"\n str_policy = policy.astype('str')\n for action_num, action_name in action_names.items():\n np.place(str_policy, policy == action_num, action_name)\n print(str_policy)\n\ndef main():\n env = gym.make('Deterministic-4x4-FrozenLake-v0')\n begin_policy_iteration = datetime.datetime.now()\n policy_policy_iteration, value_function_policy_iteration, j_policy_iteration, k_policy_iteration = policy_iteration(env, 0.9, int(1e3), 1e-3)\n end_policy_iteration = datetime.datetime.now()\n begin_value_iteration = datetime.datetime.now()\n value_function_value_iteration, k_value_iteration = value_iteration(env, 0.9, int(1e3), 1e-3)\n end_value_iteration = datetime.datetime.now()\n policy_value_iteration = value_function_to_policy(env, 0.9, value_function_value_iteration)\n print('policy iteration cost : %s' % (end_policy_iteration - begin_policy_iteration))\n print('value iteration cost : %s' % (end_value_iteration - begin_value_iteration))\n action_names = {0: 'L', 1: 'D', 2: 'R', 3: 'U'}\n print('policy of policy iteration:')\n print_policy(np.array(policy_policy_iteration), action_names)\n print('policy of value iteration:')\n print_policy(np.array(policy_value_iteration), action_names)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"deeprl_hw1/rl.py","file_name":"rl.py","file_ext":"py","file_size_in_byte":9023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"142843432","text":"import json\nimport os\nimport sys\n\nimport requests\n\nAPI = \"https://api.districtr.org\"\nTOKEN = os.environ.get(\"DISTRICTR_API_TOKEN\")\nHEADERS = {\"Authorization\": \"Bearer \" + TOKEN}\n\n\ndef main(path):\n with open(path) as f:\n places = json.load(f)\n\n for place in places:\n serialized_place = {\n \"name\": place[\"name\"],\n \"description\": place.get(\"description\", \"\"),\n }\n response = requests.post(\n API + \"/places/\", headers=HEADERS, json=serialized_place\n )\n print(response)\n if response.status_code >= 200 and response.status_code < 400:\n print(response.json())\n else:\n print(response.text)\n break\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n","sub_path":"scripts/add_places.py","file_name":"add_places.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"613864055","text":"# -*- coding: utf-8 -*-\n# Django\nfrom django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.db.models import Count\nfrom django.forms import modelformset_factory\n# REST\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.decorators import api_view, renderer_classes\nfrom rest_framework.renderers import JSONRenderer\n# Tarteel\nfrom evaluation.models import TajweedEvaluation, Evaluation\nfrom evaluation.serializers import TajweedEvaluationSerializer, EvaluationSerializer\nfrom restapi.models import AnnotatedRecording\n# Python\nimport io\nimport json\nimport os\nimport random\n\n# =============================================== #\n# Constant Global Definitions #\n# =============================================== #\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# ===================================== #\n# Utility Functions #\n# ===================================== #\n\n\ndef get_tajweed_rule(surah_num=0, ayah_num=0, random_rule=False):\n \"\"\"If random_rule is true then we get a random tajweed rule. Otherwise returns a\n specific rule. Both options return the text and word index.\n :return: A tuple with the surah & ayah number, text, rule, and word position\n :rtype: tuple(int, int, str, str, int) or tuple(str, str, int)\n \"\"\"\n TAJWEED_FILE = os.path.join(BASE_DIR, 'utils/data-rules.json')\n with io.open(TAJWEED_FILE) as file:\n tajweed_rules = json.load(file)\n tajweed_rules = tajweed_rules['quran']\n file.close()\n\n UTHMANI_FILE = os.path.join(BASE_DIR, 'utils/data-uthmani.json')\n with io.open(UTHMANI_FILE, 'r', encoding=\"utf-8-sig\") as file:\n uthmani_q = json.load(file)\n uthmani_q = uthmani_q['quran']\n file.close()\n\n if random_rule:\n random_surah = random.choice(tajweed_rules['surahs'])\n surah_num = random_surah['num']\n random_ayah = random.choice(random_surah['ayahs'])\n ayah_num = random_ayah['num']\n rule_dict = random.choice(random_ayah['rules'])\n else:\n rule_dict = tajweed_rules['surah'][surah_num - 1]['ayahs'][ayah_num - 1]\n rule = rule_dict['rule']\n rule_start = rule_dict['start']\n rule_end = rule_dict['end']\n\n # 1-indexed\n ayah_text = uthmani_q['surahs'][surah_num - 1]['ayahs'][ayah_num - 1]['text']\n ayah_text_list = ayah_text.split(\" \")\n # Get the index of the word we're looking for\n position = 0\n curr_word_ind = 0\n for i, word in enumerate(ayah_text_list):\n position += len(word)\n if position >= rule_start:\n curr_word_ind = i\n break\n\n if random_rule:\n return surah_num, ayah_num, ayah_text, rule, curr_word_ind\n\n return ayah_text, rule, curr_word_ind\n\n\ndef is_evaluator(user):\n if user:\n return user.groups.filter(name='evaluator').exists()\n return False\n\ndef get_low_evaluation_count():\n \"\"\"Finds a recording with the lowest number of evaluations\n :returns: A random AnnotatedRecording object which has the minimum evaluations\n :rtype: AnnotatedRecording\n \"\"\"\n\n recording_evals = AnnotatedRecording.objects.annotate(total=Count('evaluation'))\n recording_evals_dict = {entry : entry.total for entry in recording_evals}\n\n min_evals = min(recording_evals_dict.values())\n min_evals_recordings = [k for k, v in recording_evals_dict.items() if v==min_evals]\n\n return random.choice(min_evals_recordings)\n\n# ================================= #\n# API Functions #\n# ================================= #\n\n\nclass TajweedEvaluationList(APIView):\n \"\"\"API Endpoint that allows tajweed evaluations to be posted or\n retrieved \"\"\"\n\n def get(self, request, format=None):\n evaluations = TajweedEvaluation.objects.all().order_by('-timestamp')\n tajweed_serializer = TajweedEvaluationSerializer(evaluations, many=True)\n return Response(tajweed_serializer.data)\n\n def post(self, request, *args, **kwargs):\n print(\"EVALUATOR: Received a tajweed evaluation:\\n{}\".format(request.data))\n new_evaluation = TajweedEvaluationSerializer(data=request.data)\n if new_evaluation.is_valid(raise_exception=True):\n new_evaluation.save()\n return Response(new_evaluation.data, status=status.HTTP_201_CREATED)\n return Response(new_evaluation.errors, status=status.HTTP_400_BAD_REQUEST)\n\n# ===================================== #\n# Static Page Views #\n# ===================================== #\n\n\ndef evaluator(request):\n \"\"\"Returns a random ayah for an expert to evaluate for any mistakes.\n :param request: rest API request object.\n :type request: Request\n :return: Rendered view of evaluator page with ayah and audio url\n :rtype: HttpResponse\n \"\"\"\n if not request.session.session_key:\n request.session.create()\n session_key = request.session.session_key\n\n random_recording = get_low_evaluation_count()\n # Load the Arabic Quran from JSON\n file_name = os.path.join(BASE_DIR, 'utils/data-uthmani.json')\n with io.open(file_name, 'r', encoding='utf-8-sig') as file:\n uthmani_quran = json.load(file)\n uthmani_quran = uthmani_quran[\"quran\"]\n\n # Fields\n surah_num = random_recording.surah_num\n ayah_num = random_recording.ayah_num\n audio_url = random_recording.file.url\n ayah_text = uthmani_quran[\"surahs\"][surah_num - 1][\"ayahs\"][ayah_num - 1][\"text\"]\n recording_id = random_recording.id\n\n # Create a form to have user input degree/category of mistake\n degree_cat_form = modelformset_factory(TajweedEvaluation,\n fields=('degree', 'category'))()\n evaluation_count = Evaluation.objects.all().count()\n recording_count = AnnotatedRecording.objects.filter(\n file__gt='', file__isnull=False).count()\n context = {'degree_category_form': degree_cat_form,\n 'surah_num': surah_num,\n 'ayah_num': ayah_num,\n 'ayah_text': ayah_text,\n 'audio_url': audio_url,\n 'session_key': session_key,\n 'recording_id': recording_id,\n 'evaluation_count': evaluation_count,\n 'recording_count': recording_count}\n return render(request, 'evaluation/evaluator.html', context)\n\n\ndef evaluator_help(request):\n \"\"\"Returns a simple static page with evaluation instructions.\n :param request: rest API request object.\n :type request: Request\n :return: Rendered view of evaluator page with ayah and audio url\n :rtype: HttpResponse\n \"\"\"\n return render(request, 'evaluation/help.html', {})\n\n\n@api_view(('GET',))\n@renderer_classes((JSONRenderer,))\ndef get_evaluations_count(request, format=None):\n evaluations = Evaluation.objects.all().count()\n res = {\n \"count\": evaluations\n }\n return Response(res)\n\n\n@login_required\n@user_passes_test(is_evaluator, login_url='/')\ndef tajweed_evaluator(request):\n \"\"\"Returns a random ayah for an expert to evaluate for any mistakes.\n\n :param request: rest API request object.\n :type request: Request\n :return: Rendered view of evaluator page with form, ayah info, and URL.\n :rtype: HttpResponse\n \"\"\"\n # User tracking - Ensure there is always a session key.\n if not request.session.session_key:\n request.session.create()\n session_key = request.session.session_key\n\n # Get a random tajweed rule and make sure we have something to display\n recordings = None\n while not recordings:\n surah_num, ayah_num, ayah_text, rule, word_index = get_tajweed_rule(random_rule=True)\n recordings = AnnotatedRecording.objects.filter(file__gt='', file__isnull=False,\n surah_num=surah_num,\n ayah_num=ayah_num)\n random_recording = random.choice(recordings)\n\n # Make sure we avoid negative count\n prev_word_ind = word_index - 1 if word_index > 0 else None\n # Make sure we avoid overflow\n ayah_text_list = ayah_text.split(\" \")\n next_word_ind = word_index + 1 if word_index + 1 < len(ayah_text_list) else None\n # Fields\n audio_url = random_recording.file.url\n recording_id = random_recording.id\n\n # Get text rep of rule\n category_dict = dict(TajweedEvaluation.CATEGORY_CHOICES)\n rule_text = category_dict[rule]\n\n return render(request, 'evaluation/tajweed_evaluator.html',\n {'session_key': session_key,\n 'rule_text': rule_text,\n 'rule_id': rule,\n 'surah_num': surah_num,\n 'ayah_num': ayah_num,\n 'ayah_text': ayah_text_list,\n 'word_index': word_index,\n 'prev_word_index': prev_word_ind,\n 'next_word_index': next_word_ind,\n 'audio_url': audio_url,\n 'recording_id': recording_id})\n\n\nclass EvaluationList(APIView):\n def get(self, request, *args, **kwargs):\n random_recording = get_low_evaluation_count()\n # Load the Arabic Quran from JSON\n file_name = os.path.join(BASE_DIR, 'utils/data-uthmani.json')\n with io.open(file_name, 'r', encoding='utf-8-sig') as file:\n uthmani_quran = json.load(file)\n uthmani_quran = uthmani_quran[\"quran\"]\n\n # Fields\n surah_num = random_recording.surah_num\n ayah_num = random_recording.ayah_num\n audio_url = random_recording.file.url\n ayah_text = uthmani_quran[\"surahs\"][surah_num - 1][\"ayahs\"][ayah_num - 1][\"text\"]\n recording_id = random_recording.id\n res = {\n \"audio_url\": audio_url,\n \"ayah_text\": ayah_text,\n \"recording_id\": recording_id,\n \"surah_num\": surah_num,\n \"ayah_num\": ayah_num\n }\n return Response(res)\n\n def post(self, request, *args, **kwargs):\n session_key = request.session.session_key or request.data[\"session_id\"]\n data = {\n \"session_id\": session_key\n }\n ayah = request.data[\"ayah\"]\n data[\"associated_recording\"] = ayah[\"recording_id\"]\n data[\"evaluation\"] = ayah[\"evaluation\"]\n new_evaluation = EvaluationSerializer(data=data)\n\n if new_evaluation.is_valid(raise_exception=True):\n new_evaluation.save()\n return Response(status=status.HTTP_201_CREATED)\n return Response(\"Invalid hash or timed out request\",\n status=status.HTTP_400_BAD_REQUEST)\n\n","sub_path":"evaluation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"60960647","text":"from django.test import TestCase\n\nfrom users.models import User\nfrom recipes.models import Recipe\n\n\nclass RecipeTestCase(TestCase):\n def setUp(self):\n self.password1 = 'testpass1'\n self.password2 = 'testpass2'\n\n self.user1 = User.objects.create_user(\n 'test1',\n 'test1@test.com',\n self.password1)\n self.user2 = User.objects.create_user(\n 'test2',\n 'test2@test.com',\n self.password2)\n\n self.recipe1 = Recipe.objects.create(\n name='apple',\n user=self.user1,\n rating=Recipe.NOT_RATED,\n url='',\n description='')\n self.recipe1.tags.add('tag1', 'tag2')\n\n self.recipe2 = Recipe.objects.create(\n name='cake',\n user=self.user1,\n rating=Recipe.AWFUL,\n url='http://www.google.com/',\n description='')\n self.recipe2.tags.add('tag2', 'tag3')\n\n self.recipe3 = Recipe.objects.create(\n name='banana',\n user=self.user1,\n rating=Recipe.GOOD,\n url='',\n description='a test recipe')\n self.recipe3.tags.add('tag1', 'tag3')\n\n self.recipe4 = Recipe.objects.create(\n name='pie',\n user=self.user2,\n rating=Recipe.BAD,\n url='http://www.test.com/',\n description='')\n self.recipe4.tags.add('test', 'bad')\n\n self.recipe5 = Recipe.objects.create(\n name='chocolate',\n user=self.user2,\n rating=Recipe.AVERAGE,\n url='',\n description='an average recipe')\n self.recipe5.tags.add('avg')\n\n self.recipe6 = Recipe.objects.create(\n name='rice',\n user=self.user2,\n rating=Recipe.NOT_RATED,\n url='',\n description='')\n\n self.recipe7 = Recipe.objects.create(\n name='pasta',\n user=self.user2,\n rating=Recipe.GREAT,\n url='',\n description='great recipe!')\n self.recipe7.tags.add('dinner', 'great')\n\n self.recipe8 = Recipe.objects.create(\n name='ice cream',\n user=self.user2,\n rating=Recipe.NOT_RATED,\n url='',\n description='need to try')\n self.recipe8.tags.add('todo')\n\n def test_login_required(self):\n response = self.client.get('/recipes/', follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.redirect_chain[0],\n (u'http://testserver/accounts/login/?next=/recipes/', 302))\n\n response = self.client.get(\n '/recipes/view/' + self.recipe1.slug + '/', follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.redirect_chain), 1)\n self.assertEqual(\n response.redirect_chain[0],\n (u'http://testserver/accounts/login/?next=/recipes/view/' +\n self.recipe1.slug + '/', 302))\n\n response = self.client.get('/recipes/add/', follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.redirect_chain), 1)\n self.assertEqual(\n response.redirect_chain[0],\n (u'http://testserver/accounts/login/?next=/recipes/add/', 302))\n\n response = self.client.post('/recipes/add/', follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.redirect_chain), 1)\n self.assertEqual(\n response.redirect_chain[0],\n (u'http://testserver/accounts/login/?next=/recipes/add/', 302))\n\n response = self.client.get(\n '/recipes/edit/' + self.recipe2.slug + '/', follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.redirect_chain), 1)\n self.assertEqual(\n response.redirect_chain[0],\n (u'http://testserver/accounts/login/?next=/recipes/edit/' +\n self.recipe2.slug + '/', 302))\n\n response = self.client.post(\n '/recipes/edit/' + self.recipe2.slug + '/', follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.redirect_chain), 1)\n self.assertEqual(\n response.redirect_chain[0],\n (u'http://testserver/accounts/login/?next=/recipes/edit/' +\n self.recipe2.slug + '/', 302))\n\n response = self.client.get(\n '/recipes/delete/' + self.recipe2.slug + '/', follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.redirect_chain), 1)\n self.assertEqual(\n response.redirect_chain[0],\n (u'http://testserver/accounts/login/?next=/recipes/delete/' +\n self.recipe2.slug + '/', 302))\n\n response = self.client.post(\n '/recipes/delete/' + self.recipe2.slug + '/', follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.redirect_chain), 1)\n self.assertEqual(\n response.redirect_chain[0],\n (u'http://testserver/accounts/login/?next=/recipes/delete/' +\n self.recipe2.slug + '/', 302))\n\n def test_index_auth(self):\n self.assertEqual(\n self.client.login(\n username=self.user1.username,\n password=self.password1),\n True)\n response = self.client.get('/recipes/')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['recipes']), 3)\n self.assertEqual(response.context['recipes'][0].name, 'apple')\n self.assertEqual(response.context['recipes'][1].name, 'banana')\n self.assertEqual(response.context['recipes'][2].name, 'cake')\n self.client.logout()\n\n def test_view(self):\n self.client.login(\n username=self.user2.username, password=self.password2)\n response = self.client.get('/recipes/view/' + self.recipe4.slug + '/')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context['recipe'].name, self.recipe4.name)\n self.assertEqual(\n set(response.context['recipe'].tags.names()),\n set(self.recipe4.tags.names()))\n self.client.logout()\n\n def test_update(self):\n self.client.login(\n username=self.user2.username, password=self.password2)\n response = self.client.get('/recipes/edit/' + self.recipe4.slug + '/')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context['object'].name, self.recipe4.name)\n self.assertEqual(\n set(response.context['object'].tags.names()),\n set(self.recipe4.tags.names()))\n response = self.client.post(\n '/recipes/edit/' + self.recipe4.slug + '/',\n {'rating': Recipe.AVERAGE, 'url': '', 'description': 'new',\n 'tags': 'newtag1, newtag2'}, follow=True)\n self.assertEqual(\n response.redirect_chain[0],\n (u'http://testserver/recipes/', 302))\n updated_recipe = Recipe.objects.get(slug=self.recipe4.slug)\n self.assertEqual(updated_recipe.rating, Recipe.AVERAGE)\n self.assertEqual(updated_recipe.url, '')\n self.assertEqual(updated_recipe.description, 'new')\n self.assertEqual(\n set(updated_recipe.tags.names()),\n set(['newtag1', 'newtag2']))\n self.client.logout()\n\n def test_create(self):\n self.client.login(\n username=self.user1.username, password=self.password1)\n response = self.client.get('/recipes/add/')\n self.assertEqual(response.status_code, 200)\n response = self.client.post(\n '/recipes/add/',\n {'name': 'another test recipe', 'rating': Recipe.GREAT,\n 'url': 'http://www.wonkabar.com/', 'description': 'foobar',\n 'tags': 'scrumdiddlyumptious'}, follow=True)\n self.assertEqual(\n response.redirect_chain[0],\n (u'http://testserver/recipes/', 302))\n created_recipe = Recipe.objects.get(slug='another-test-recipe')\n self.assertEqual(created_recipe.rating, Recipe.GREAT)\n self.assertEqual(created_recipe.url, 'http://www.wonkabar.com/')\n self.assertEqual(created_recipe.description, 'foobar')\n self.assertEqual(\n set(created_recipe.tags.names()),\n set(['scrumdiddlyumptious']))\n self.client.logout()\n\n def test_delete(self):\n self.client.login(\n username=self.user1.username, password=self.password1)\n response = self.client.get(\n '/recipes/delete/' + self.recipe1.slug + '/')\n self.assertEqual(response.status_code, 200)\n response = self.client.post(\n '/recipes/delete/' + self.recipe1.slug + '/',\n follow=True)\n self.assertEqual(\n response.redirect_chain[0],\n (u'http://testserver/recipes/', 302))\n with self.assertRaises(Recipe.DoesNotExist):\n Recipe.objects.get(slug=self.recipe1.slug)\n self.client.logout()\n","sub_path":"meal_planner/recipes/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":9233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"612190136","text":"\nimport json \n\nimport requests as rq \nimport urllib\n\nclass Response:\n \"\"\" \n Response Class Doc\n ===================\n\n \"\"\"\n def __init__(self, code='', desc=''):\n self.params = {\"responseCode\": code, \n 'responseDesc': desc, \"data\": []}\n\n\n def add_params(self, key, value):\n self.params['data'].append({key: value})\n\n def mode(self, indicator):\n self.params['validation'] = indicator\n\n def add_message(self, message):\n self.params[\"responseDesc\"] = message\n\n def success(self):\n self.params['responseCode'] = \"0\"\n\n def failed(self):\n self.params['responseCode'] = \"1\"\n\n def get_body(self):\n print('API Response', self.params, '\\n\\n')\n return json.dumps(self.params)\n\n def api_response_format(self, obj_resp):\n\n # {'statusCode': '00',\n # 'statusDescription': 'Login Successful',\n # 'mac_address': '02:00:00:44:55:66',\n # 'transaction_ref': '$2y$10$aAMtVP5QGeXt1vNMJwipO.hkwRGle.UrUk3L8Y14aHJ3336Tnd5X.'}\n\n for key, value in obj_resp.items():\n\n if key == \"statusCode\":\n if value == \"00\":\n self.success()\n else: \n self.failed()\n\n continue\n\n if key == \"statusDescription\":\n msg = value\n if isinstance(value, dict):\n for key, val in value.items():\n if isinstance(val, list):\n msg = val[0]\n else:\n msg = val\n \n break\n\n self.add_message(msg)\n continue\n\n elif key == 'statusMessage':\n \n msg = value\n if isinstance(value, dict):\n for key, val in value.items():\n if isinstance(val, list):\n msg = val[0]\n else:\n msg = val\n \n break\n\n\n self.add_message(msg)\n \n continue\n\n\n self.add_params(key, value)\n\n\n def status(self):\n return self.params['responseCode'] == \"0\"\n\n\n\nclass RequestHandler:\n\n def __init__(self, url, method=0, data={}, headers={\"Content-Type\": \"application/json\"}):\n\n self.method = \"GET\" if method == 0 else \"POST\"\n self.url = url\n self.data = data \n self.headers = headers \n\n def send(self):\n\n if self.method == \"POST\":\n assert self.data , \"Data parameter is missing for post method\"\n\n print(\"=== Request Data ===\", self.data, '\\n')\n if self.method == \"GET\":\n self.format_get_params()\n output = rq.get(self.url, headers=self.headers)\n else:\n output = rq.post(self.url, data=json.dumps(self.data), headers=self.headers)\n\n try:\n resp = output.status_code, output.json() \n except Exception as e:\n resp = output.status_code, {} \n # log this exception \n \n print(\"=== Response Data ===\", resp)\n return resp \n\n def format_get_params(self):\n if self.data:\n self.url = self.url + \"?\" + urllib.parse.urlencode(self.data)\n\n\n\nclass FormHandler:\n\n def __init__(self, form, exclude_data=[], exclude_field=[], readonly_field=[]):\n self.form = form\n self.fields = []\n self.exclude_data = exclude_data\n self.exclude_field = exclude_field\n self.readonly_field = readonly_field\n\n def render(self):\n\n prev = None\n total = len(self.form._fields) - len(self.exclude_field)\n count = 1\n field_names = []\n\n self.readonly_field = self.form.__readonlyfields__\n \n # self.form.init_func() ## to run inits before displaying the form\n\n \n for x in self.form:\n if x.name in self.exclude_field:\n continue\n\n field_names.append(x.name)\n\n\n for field, obj in self.form._fields.items():\n if field in self.exclude_field:\n continue\n\n if field == 'customerDtNumber':\n print('\\n\\n',obj.type, '\\n\\n')\n\n _f = {\n \"name\" : obj.label.text,\n \"field\": field,\n \"value\": self.get_data(field, obj.data),\n \"retkey\" : \"next\" if count < total else \"done\" ,\n \"error\" : obj.errors[0] if obj.errors else None,\n \"nextfield\" : field_names[count] if count < total else None,\n \"keytype\": self.set_keytype(obj.type),\n \"encrypt\": True if obj.type == 'PasswordField' else False,\n \"is_editable\": False if field in self.readonly_field else True,\n \"is_hidden\": True if obj.type == 'HiddenField' else False,\n \"type\": self.get_type(obj.type),\n \"choices\": self.get_field_choices(obj)\n }\n\n self.fields.append(_f)\n count += 1 \n\n return self.fields\n\n def get_field_choices(self, obj):\n\n if not hasattr(obj, \"choices\"):\n return []\n\n return obj.choices \n\n def get_type(self, _type):\n\n if _type == 'SelectField':\n return \"Picker\"\n\n elif _type == 'HiddenField':\n return 'Hidden'\n\n elif _type == 'BooleanField':\n return 'Checkbox'\n\n elif _type == 'ButtonField':\n return \"Button\"\n\n\n return \"TextInput\"\n\n\n def get_data(self, name, value):\n\n if name not in self.exclude_data:\n if value:\n return str(value)\n\n return None\n\n\n\n def set_keytype(self, _type):\n\n if _type =='IntegerField':\n return 'numeric'\n \n if _type == \"BooleanField\":\n return \"checkbox\"\n \n # if _type == 'HiddenField':\n # return 'hidden'\n \n return 'default'\n\n\n def check_ishidden(self, _type):\n\n if _type == 'HiddenField':\n return {\"display\": None}\n\n\n def get_errormsg(self): \n\n for fld, obj in self.form.errors.items():\n # return \"field {} {}\".format(fld, obj[0])\n return obj[0]\n\n\n def is_validate(self): \n return self.form.validate()\n\n\n def get_fields(self):\n \n retv = {}\n\n for key, obj in self.form._fields.items():\n retv[key] = obj.data\n\n return retv\n\n\n ","sub_path":"applib/api/resp_handler.py","file_name":"resp_handler.py","file_ext":"py","file_size_in_byte":6626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"162384633","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io.wavfile as wav\nimport os.path\nfrom fdicapy import fdica\n\nRATE = 44100\ncomponents=2\nframe_length = 1024\n\n#input voice data\nfilenames = [('mixed' + str(i+1) + '.wav') for i in range(components)]\nlength_array = [(os.path.getsize(filenames[i])-44)/2 for i in range(components)]\ndata_length = min(length_array)\ninput = np.zeros([components,data_length])\n\nfor i in range(components):\n input[i] = wav.read(filenames[i])[1][0:data_length]\n\noutput = fdica(input, frame_length)\n\n\n#volume up = normalization\noutput = output/np.max(np.abs(output[:,0:data_length-RATE]))*32767\noutput = output.astype(np.int16)\n\nfilenames = [('output' + str(i+1) + '.wav') for i in range(components)]\nfor i in range(components):\n wav.write(filenames[i],RATE,output[i][0:data_length-RATE])\n\n\n","sub_path":"cocktail/FDICA_real2/fdica_test.py","file_name":"fdica_test.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"633858734","text":"#!/home/user/anaconda3/bin/python\nimport json\nimport requests\nimport mysql.connector\nimport datetime\n\nheader = {'Content-Type': 'application/json', \\\n 'Accept': 'application/json'}\n\nimport pandas as pd\nfrom influxdb import InfluxDBClient, DataFrameClient\n\n\n\nclass Link():\n\n def __init__(self, host, port, user, password, db_name, type_of_link):\n\n self.host = host\n\n self.port = port\n\n self.user = user\n\n self.password = password\n\n self.db_name = db_name\n\n self.type_of_link = type_of_link\n\n\n\n if (type_of_link == \"influxdb\") :\n\n self.client = InfluxDBClient(self.host, self.port, self.user, self.password, self.db_name)\n\n if (type_of_link == \"mysql\") :\n\n self.client = mysql.connector.connect(user=self.user, password=self.password, host=self.host,\n database=self.db_name)\n\n ################################# QUERY ###############################################\n\n def query(self, query):\n\n\n if (self.type_of_link == \"influxdb\"):\n\n self.influx_get_data_as_list(query)\n\n return self.influx_get_data_as_df()\n\n if (self.type_of_link == \"mysql\"):\n\n self.mysql_get_data(query)\n\n return self.influx_get_data_as_df() # <<<<<<<<<<<< CHANGE CHANGE\n\n\n\n\n\n################ MYSQL queries\n\n def mysql_get_data(self):\n\n cursor = self.client.cursor()\n\n\n\n################ Influx queries\n def influx_get_data_as_list(self, query_body):\n\n self.rs_tag = self.client.query(query_body)\n\n self.data = list(self.rs_tag.get_points())\n\n\n\n def influx_convert_to_df_second(self, data):\n main_d = dict()\n for i in range(len(data)):\n main_d[list(data[i].values())[1]] = list(data[i].values())[0]\n data1 = pd.Series(main_d)\n #print(data1)\n df = data1.to_frame()\n df = df.reset_index()\n df.columns = ['date', 'value']\n df['date'] = pd.to_datetime(df['date'])\n df = df.set_index('date')\n return df\n\n\n def influx_convert_to_df_first(self, data):\n main_d = dict()\n for i in range(len(data)):\n main_d[list(data[i].values())[0]] = list(data[i].values())[1]\n data1 = pd.Series(main_d)\n # print(data1)\n df = data1.to_frame()\n df = df.reset_index()\n df.columns = ['date', 'value']\n df['date'] = pd.to_datetime(df['date'])\n df = df.set_index('date')\n return df\n\n\n def influx_get_data_as_df(self):\n\n data_to_df = self.data\n\n if list(data_to_df[0].keys())[1] == \"time\":\n df_tag = self.influx_convert_to_df_second(data_to_df)\n else:\n df_tag = self.influx_convert_to_df_first(data_to_df)\n\n self.current_time = pd.to_datetime(df_tag.index.values[-1])\n\n return df_tag\n\n def add_ten_seconds(self, needed_time):\n needed_time_1 = pd.to_datetime(needed_time)\n needed_time_2 = needed_time_1 + datetime.timedelta(0, 10)\n return pd.tslib.Timestamp(needed_time_2)\n\n ################################# Write data ###############################################\n\n\n def write_data_to_influx(self, putting_data_in_list, measurement, ch_name):\n\n time_calc = []\n\n time_calc[0] = self.current_time\n\n for i in range(len(putting_data_in_list) - 1):\n time_calc[i + 1] = self.add_ten_seconds(time_calc[i])\n\n\n\n for i in range(len(putting_data_in_list)):\n\n json_influx = [\n {\n \"measurement\": measurement,\n \"time\": time_calc[i],\n \"fields\": {\n str(ch_name + \"_low_limit\"): putting_data_in_list[i] * 0.95,\n str(ch_name + \"_high_limit\"): putting_data_in_list[i] * 1.05,\n str(ch_name + \"_predicted\"): putting_data_in_list[i]\n }\n }\n ]\n\n self.client.write_points(json_influx)\n\n\n\n\n\n\n","sub_path":"17October/LinkClass.py","file_name":"LinkClass.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"200094460","text":"#analysis: Run poission regression models\n\n# import mysql.connector\nimport pandas as pd\nimport numpy as np \nimport statsmodels.api as sm\nimport math\nimport sys\nimport os, subprocess, re\nimport json\nfrom sqlalchemy import create_engine \nfrom patsy import dmatrices\nfrom urllib.parse import quote_plus as urlquote\n\n# cnx = mysql.connector.connect(user='ethgas', password='station', host='127.0.0.1', database='tx')\n# cursor = cnx.cursor()\n# 'mysql+mysqlconnector://ethgas:station@127.0.0.1:3306/tx', echo=False)\nengine = create_engine('postgresql://' + os.environ['DATABASE_USERNAME'] + ':' + urlquote(os.environ['DATABASE_PASSWORD']) + '@' + os.environ['DATABASE_HOSTNAME'] + ':' + os.environ['DATABASE_PORT'] + '/' + os.environ['DATABASE_NAME'], echo=False)\n# query = (\"SELECT * FROM minedtx2\")\n# cursor.execute(query)\n# head = cursor.column_names\n# predictData = pd.DataFrame(cursor.fetchall())\npredictData = pd.read_sql(\"SELECT * FROM minedtx2\", con=engine)\npredictData.columns = list(predictData)\n# cursor.close()\n\n\n#predictData = predictData.combine_first(postedData)\npredictData['confirmTime'] = predictData['block_mined']-predictData['block_posted']\nprint('num with confirm times')\nprint (predictData['confirmTime'].count())\nprint ('neg confirm time')\nprint (len(predictData.loc[predictData['confirmTime']<0]))\nprint ('zero confirm time')\nprint (len(predictData.loc[predictData['confirmTime']==0]))\nprint('pre-chained ' + str(len(predictData)))\npredictData.loc[predictData['chained']==1, 'confirmTime']=np.nan\npredictData = predictData.dropna(subset=['confirmTime'])\nprint('post-chained ' + str(len(predictData)))\npredictData = predictData.loc[predictData['confirmTime']>0]\npredictData = predictData.loc[predictData['tx_atabove']>0]\nprint ('cleaned transactions: ')\nprint (len(predictData))\n\nprint('gas offered data')\nmax_gasoffered = predictData['gas_offered'].max()\nprint('max :'+str(predictData['gas_offered'].max()))\nprint('delat at max')\nprint(predictData.loc[predictData['gas_offered'] == max_gasoffered, 'confirmTime'].values[0])\nquantiles= predictData['gas_offered'].quantile([.5, .75, .95, .99])\nprint(quantiles)\n\n#dep['gasCat1'] = (txData2['gasused'] == 21000).astype(int)\npredictData['gasCat1'] = ((predictData['gas_offered']<=quantiles[.5])).astype(int)\npredictData['gasCat2'] = ((predictData['gas_offered']>quantiles[.5]) & (predictData['gas_offered']<=quantiles[.75])).astype(int)\npredictData['gasCat3'] = ((predictData['gas_offered']>quantiles[.75]) & (predictData['gas_offered']<=quantiles[.95])).astype(int)\npredictData['gasCat4'] = ((predictData['gas_offered']>quantiles[.95]) & (predictData['gas_offered']=quantiles[.99]).astype(int)\n\n\n\npredictData['hpa2'] = predictData['hashpower_accepting']*predictData['hashpower_accepting']\n\n\n\ny, X = dmatrices('confirmTime ~ hashpower_accepting + highgas2 + tx_atabove', data = predictData, return_type = 'dataframe')\n\nprint(y[:5])\nprint(X[:5])\n\nmodel = sm.GLM(y, X, family=sm.families.Poisson())\nresults = model.fit()\nprint (results.summary())\n\n\ny['predict'] = results.predict()\ny['round_gp_10gwei'] = predictData['round_gp_10gwei']\ny['hashpower_accepting'] = predictData['hashpower_accepting']\ny['tx_atabove'] = predictData['tx_atabove']\ny['tx_unchained'] = predictData['tx_unchained']\ny['highgas2'] = predictData['highgas2']\n\n\nprint(y)\n","sub_path":"model_gas.py","file_name":"model_gas.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"455184666","text":"import requests\nimport datetime\nfrom realtime.models import Fire\nfrom django.contrib.gis.geos import Point\nfrom dateutil import tz\nfrom django.utils.timezone import localtime\nfrom bipad.settings import TIME_ZONE\n\n\nICIMOD_FIRE_QUERY_URL = \"http://geoapps.icimod.org/arcgis/rest/services/Nepal/NepalActiveFire/MapServer/0/query\"\n\n\ndef fetch_fire():\n date_to = localtime().strftime('%s')\n date_from = (localtime() + datetime.timedelta(-30)).strftime('%s')\n\n params = {\n 'time': date_from+','+date_to,\n 'outFields': 'SCAN,ACQ_DATE,ACQ_TIME,CONFIDENCE,LANDCOVER,LATITUDE,LONGITUDE,BRIGHTNESS',\n 'f': 'pjson'\n }\n\n fire_data_url = requests.get(ICIMOD_FIRE_QUERY_URL, params=params)\n fire_data = fire_data_url.json()\n fires = []\n for data in fire_data['features']:\n\n fire = Fire(\n point=Point(float(data['attributes']['LONGITUDE']), float(data['attributes']['LATITUDE'])),\n scan=data['attributes']['SCAN'],\n event_on=datetime.datetime.fromtimestamp(\n data['attributes']['ACQ_DATE'] / 1000).astimezone(tz.gettz(TIME_ZONE)),\n brightness=data['attributes']['BRIGHTNESS'],\n confidence=data['attributes']['CONFIDENCE'],\n land_cover=data['attributes']['LANDCOVER'],\n )\n fires.append(fire)\n\n Fire.objects.all().delete()\n Fire.objects.bulk_create(fires)\n","sub_path":"realtime/scripts/fire.py","file_name":"fire.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"588818154","text":"#!/usr/bin/python\n# -*- coding: utf8 -*-\n# auth : https://blog.naver.com/hdh0926\n\nimport socket,time\n\nHOST = '127.0.0.1' # Standard loopback interface address (localhost)\nPORT = 7777 # Port to listen on (non-privileged ports are > 1023)\n\nprint('server start port by', PORT)\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, PORT))\n s.listen()\n conn, addr = s.accept()\n with conn:\n print('Connected by', addr)\n time.sleep(0.5)\n # while True:\n # data = conn.recv(1024)\n # if not data:\n # break\n # conn.sendall(data)","sub_path":"python/pro/class_03_server.py","file_name":"class_03_server.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"224485939","text":"__author__ = \"Timber\"\n\n# Implement a basic parse tree #\n# Based on stack and binaryTree method #\n# August 12th, 2015 #\n\n# Import basic data structures\nfrom BasicDataStructure.stackBasedStringReverse import Stack\nfrom Tree.nodeAndReference import binaryTree\nimport operator\n\n# Build a parse tree\ndef buildParseTree(fpexp):\n fplist = fpexp.split() ## split expression into elements\n pStack = Stack() ## create an empty stack\n eTree = binaryTree(\"\") ## create an empty tree\n\n ## check element one by one and build the parse tree\n pStack.push(binaryTree)\n currentnode = eTree\n for ch in fplist:\n if ch == \"(\":\n currentnode.insertLeft('')\n pStack.push(currentnode)\n currentnode = currentnode.getLeftChild()\n elif ch not in ['+', '-', '*', '/', ')']:\n currentnode.setNodeVal(int(ch))\n currentnode = pStack.pop()\n elif ch in ['+', '-', '*', '/']:\n currentnode.setNodeVal(ch)\n currentnode.insertRight('')\n pStack.push(currentnode)\n currentnode = currentnode.getRightChild()\n elif ch == \")\":\n currentnode = pStack.pop()\n else:\n return ValueError\n return eTree\n\n# Build an evaluation function to test the parse tree\n# implemented based on recursive strategy\ndef evaluateParseTree(parsetree):\n opers = {\"+\":operator.add, \"-\":operator.sub, \"*\":operator.mul, \\\n \"/\":operator.truediv}\n leftchild = parsetree.getLeftChild()\n rightchild = parsetree.getRightChild()\n ## recursively check left and right child, return value of leaf node\n if leftchild and rightchild: ## this is an operator node\n fn = opers[parsetree.getNodeVal()]\n return fn(evaluateParseTree(leftchild), evaluateParseTree(rightchild))\n else: ## this is a leaf node\n return parsetree.getNodeVal()\n\n# Unit test\n## here, we need to provide a completed formula\ntesTree = buildParseTree(\"( ( 3 + 6 ) * ( 10 - 5 ) )\")\nprint(evaluateParseTree(tesTree))\n","sub_path":"Tree/parseTree.py","file_name":"parseTree.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"84976156","text":"#!/usr/bin/env python3\nimport pyoscar\nimport argparse\n\ncmdLineParser = argparse.ArgumentParser(description=\"Does a debug query\")\ncmdLineParser.add_argument('-f', help='path to oscar data', dest='f', nargs=1, type=str, required=True)\n\nparsedArgs = cmdLineParser.parse_args()\n\nhdl = pyoscar.MainHandler()\nhdl.energize(parsedArgs.f[0])\nengine = hdl.engine()\n\n\nresult = engine.query(\"#Bamberg @admin_level:6 @railway:station\")\ncells = result.cells()\nitems = cells.items()\nstore = engine.store()\ngraph = store.graph()\nrelhelp = hdl.relationHelpers()\nfor itemId in items:\n\titem = store.at(itemId)\n\tif (item.hasKey(\"name\") and item.value(\"name\") == \"Bamberg\"):\n\t\tvalue=itemId\n\t\tprint(value)\n\tprint(str(item))\n\tprint(str(itemId) + \"Osm id is \" + str(item.osmId()))\n","sub_path":"examples/bamberg_mwe.py","file_name":"bamberg_mwe.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"91623594","text":"'''\nCreated on 27.06.2017\n\n@author: Peer\n'''\n\nfrom bson.code import Code\nimport os\nimport pathlib\n\nimport pymongo\n\n\nTRANSLATION_TABLE = (('.', '\\uff0e'),\n ('$', '\\uff04'))\n\n\nclass GitExplorerBase(object):\n\n @staticmethod\n def get_gitexplorer_database():\n '''Returns the MongoDB for gitexplorer.\n\n The collections inside the database can be used as basis for specialized collections\n from which one can derive elevated statistics. Results can also be written into the\n database to be accessible by visualization routines.\n '''\n client = pymongo.MongoClient()\n return client.gitexplorer_database\n\n @staticmethod\n def _mongodb_escape(input_string):\n for translation in TRANSLATION_TABLE:\n input_string = input_string.replace(translation[0], translation[1])\n return input_string\n\n @staticmethod\n def _mongodb_unescape(input_string):\n for translation in TRANSLATION_TABLE:\n input_string = input_string.replace(translation[1], translation[0])\n return input_string\n\n @staticmethod\n def _get_code(file_name):\n current_working_directory = pathlib.Path(os.getcwd())\n\n with (current_working_directory / file_name).open(mode='r') as fid:\n code = fid.read()\n\n return Code(code)\n","sub_path":"gitexplorer/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"343802241","text":"import hashlib\nimport time\n\nfrom qcloud_cos import CosConfig\nfrom qcloud_cos import CosS3Client\nimport logging\nimport os\nimport json\n\n# logging.basicConfig(level=logging.INFO, stream=sys.stdout)\n\n\n# 计算某个文件的md5值\ndef md5sum(file_name):\n fo = open(file_name, 'rb')\n file_content = fo.read()\n fo.close()\n m = hashlib.md5(file_content)\n file_md5 = m.hexdigest()\n\n return file_md5\n\n\n# 获取基本配置\nuser_home = os.path.expanduser('~')\ncos_upload_config_filename = os.path.join(user_home, 'Documents', 'cos_upload_config.json')\ncos_upload_config_file = open(cos_upload_config_filename, 'r')\ncos_upload_config_str = cos_upload_config_file.read()\ncos_upload_config = json.loads(cos_upload_config_str)\n\nsecret_id = cos_upload_config['secret_id'] # 替换为用户的 secretId\nsecret_key = cos_upload_config['secret_key'] # 替换为用户的 secretKey\nregion = cos_upload_config['region'] # 替换为用户的 Region\nbucket = cos_upload_config['bucket']\nlocal_path = cos_upload_config['local_path']\ntoken = None # 使用临时密钥需要传入 Token,默认为空,可不填\nscheme = 'https' # 指定使用 http/https 协议来访问 COS,默认为 https,可不填\n\n# 获取客户端对象\nconfig = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token, Scheme=scheme)\nclient = CosS3Client(config)\n\n# 查询存储桶下的全部对象列表\ncontents = []\nmarker = \"\"\nwhile True:\n response = client.list_objects(\n Bucket=bucket,\n Prefix='',\n Marker=marker\n )\n # 存储桶有对象时才获取\n if len(response) != 0:\n contents = contents + response['Contents']\n\n if response['IsTruncated'] == 'false':\n break\n marker = response['NextMarker']\n\n# 把对象元数据中key和etag生成map\ncos_filename_etag_map = {}\nfor content in contents:\n key = content['Key']\n etag = str(content['ETag'])\n etag = etag.strip('\"')\n cos_filename_etag_map[key] = etag\n\n# 获取本地文件夹下所有文件\nlocal_files = []\nif not os.path.exists(local_path):\n print(\"ERROR: 本地路径不存在\")\nfor root, dirs, names in os.walk(local_path):\n for filename in names:\n local_files.append(os.path.join(root, filename))\n\n# 通过比较md5判断文件是否有更新,上传本地已更新的文件和新添加的文件\nupload_files = []\nfor file in local_files:\n obs_path = file[len(local_path)+len(os.sep):]\n obs_path = obs_path.replace('\\\\', '/')\n if obs_path not in cos_filename_etag_map:\n upload_files.append(file)\n elif md5sum(file) != cos_filename_etag_map[obs_path]:\n upload_files.append(file)\n\n# 删除本地不存在的文件\ndelete_files = []\ncos_files = cos_filename_etag_map.keys()\nfor cos_file in cos_files:\n cos_file = str(cos_file)\n cos_file_path = cos_file.replace('/', '\\\\')\n abs_path = os.path.join(local_path, cos_file_path)\n if abs_path not in local_files:\n delete_files.append(cos_file)\n\n# 执行上传操作\nfor upload_file in upload_files:\n obs_path = upload_file[len(local_path)+len(os.sep):]\n obs_path = obs_path.replace('\\\\', \"/\")\n client.upload_file(\n Bucket=bucket,\n Key=obs_path,\n LocalFilePath=upload_file,\n EnableMD5=False\n )\n print(upload_file + ' 上传成功')\n\n# 执行删除操作\nfor delete_file in delete_files:\n client.delete_object(\n Bucket=bucket,\n Key=delete_file\n )\n print(delete_file + ' 删除成功')\n\nprint('执行完成!')\n","sub_path":"cosupload.py","file_name":"cosupload.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"240094430","text":"def split_dates(df, column):\n \n # create a copy to prevent copy warnings\n df = df.copy()\n\n # convert the column to a date time format\n df[column] = pandas.to_datetime(df[column], infer_datetime_format=True)\n\n # create new columns for the month day and year of the given date column\n df['Month'] = df[column].dt.month\n df['Day'] = df[column].dt.day\n df['Year'] = df[column].dt.year\n\n return(df)","sub_path":"my_lambdata_tmbern_Unit3/split_dates.py","file_name":"split_dates.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"456925332","text":"\"\"\"\n61. KVSの検索\n\"\"\"\n\nimport plyvel\n\n\ndef main():\n db_name = 'materials/artist.ldb'\n # DBがなければ作る\n db = plyvel.DB(db_name, create_if_missing=True)\n\n artist_name = input('アーティスト名: ')\n name = artist_name.encode()\n area = db.get(name)\n if area is not None:\n if area != '':\n print('アーティスト名: [{0}], 活動場所: [{1}]'.format(name.decode(), area.decode()))\n else:\n print('アーティスト名: [{0}] の活動場所は登録されていません.'.format(name.decode()))\n else:\n print('アーティスト名: [{0}] はDBに登録されていません.'.format(name.decode()))\n\n # 最後は閉じる\n db.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"knock61.py","file_name":"knock61.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"55900546","text":"\"\"\"\nDecision trees with MapReduce\n\nFit phase\nDecision trees algorithm builds one decision tree on a subset of data and it estimates all attributes in every tree node.\n\nPredict phase\nEach tree votes and algorithm selects prediction with most votes.\n\nReference\nSimilar algorithm is proposed in Gongqing Wu, Haiguang Li, Xuegang Hu, Yuanjun Bi, Jing Zhang, and Xindong Wu. MRec4.5: C4. 5 ensemble classification with mapreduce.\n\"\"\"\n\ndef simple_init(interface, params):\n\treturn params\n\ndef map_fit(interface, state, label, inp):\n\timport numpy as np\n\timport decision_tree, measures\n\t\n\tout = interface.output(0)\n\tx, y, y_mapping = [], [], []\n\tmapping = [[] for i in range(len(state[\"X_meta\"]))]\n\n\tfor row in inp:\n\t\trow = row.strip().split(state[\"delimiter\"])\n\t\tif len(row) > 1:\n\t\t\tnew_row = []\n\t\t\tfor i, j in enumerate(state[\"X_indices\"]):\n\t\t\t\tif state[\"X_meta\"][i] == \"c\":\n\t\t\t\t\tnew_row.append(float(row[j]))\n\t\t\t\telse:\n\t\t\t\t\tif row[j] not in mapping[i]:\n\t\t\t\t\t\tmapping[i].append(row[j])\n\t\t\t\t\tnew_row.append(mapping[i].index(row[j]))\n\t\t\tx.append(new_row)\n\t\t\t\n\t\t\tif row[state[\"y_index\"]] not in y_mapping:\n\t\t\t\ty_mapping.append(row[state[\"y_index\"]])\t\n\t\t\ty.append(y_mapping.index(row[state[\"y_index\"]]))\n\n\ttree = decision_tree.fit(\n\t\tx = np.array(x), \n\t\ty = np.array(y), \n\t\tt = state[\"X_meta\"], \n\t\trandomized = False, \n\t\tmax_tree_nodes = state[\"max_tree_nodes\"], \n\t\tleaf_min_inst = state[\"leaf_min_inst\"], \n\t\tclass_majority = state[\"class_majority\"],\n\t\tintervals = state[\"intervals\"], \n\t\tmeasure = measures.info_gain if state[\"measure\"] == \"info_gain\" else measures.mdl,\n\t\tsplit_fun = measures.equal_freq_splits if state[\"split_fun\"] == \"equal_freq\" else measures.random_splits)\n\n\ttree_mapped = {}\n\tfor k,v in tree.iteritems():\n\t\ttree_mapped[k] = [None for i in range(2)]\t\n\t\tfor i, node in enumerate(v):\n\t\t\tdist_map = dict([(y_mapping[label],freq) for label, freq in node[3].iteritems()])\n\t\t\tsplit_map = set([mapping[node[1]][int(s)] for s in list(node[2])]) if node[5] == \"d\" else node[2]\n\t\t\ttree_mapped[k][i] = (node[0], node[1], split_map, dist_map, node[4],node[5])\n\tout.add(\"tree\", tree_mapped)\n\t\n\ndef reduce_fit(interface, state, label, inp):\t\n\tout = interface.output(0)\n\tout.add(\"X_names\", state[\"X_names\"])\n\tfor i, (key, value) in enumerate(inp):\n\t\tout.add(key+\" \"+str(i+1), value)\n\ndef map_predict(interface, state, label, inp):\n\timport numpy as np\n\timport decision_tree\n\t\n\tout = interface.output(0)\n\thalf_ensemble = round(len(state[\"forest\"])/2.)\n\t\n\tfor row in inp:\n\t\trow = row.strip().split(state[\"delimiter\"])\n\t\tif len(row) > 1:\n\t\t\tx_id = \"\" if state[\"id_index\"] == -1 else row[state[\"id_index\"]]\n\t\t\tx = [(float(row[j]) if state[\"X_meta\"][i] == \"c\" else row[j]) for i,j in enumerate(state[\"X_indices\"])]\n\t\t\t\n\t\t\tpredictions = {}\n\t\t\tfor i, tree in enumerate(state[\"forest\"]):\n\t\t\t\tpred = decision_tree.predict(tree, x)\n\t\t\t\tpredictions[pred] = predictions.get(pred, 0) + 1 \n\t\t\t\t\n\t\t\t\tif i >= half_ensemble-1:\n\t\t\t\t\tprediction = max(predictions, key=predictions.get)\n\t\t\t\t\tvalue = predictions[prediction]\n\t\t\t\t\tif value == half_ensemble:\n\t\t\t\t\t\tbreak\n\t\t\tout.add(x_id, (prediction, i+1))\n\n\ndef fit(input, max_tree_nodes = 50, leaf_min_inst = 5, class_majority = 1, measure = \"info_gain\", split_fun = \"equal_freq\", split_intervals = 100, save_results = True, show = False):\n\t\n\tfrom disco.worker.pipeline.worker import Worker, Stage\n\tfrom disco.core import Job\n\timport discomll\n\tpath = \"/\".join(discomll.__file__.split(\"/\")[:-1] + [\"ensemble\", \"core\",\"\"])\n\n\ttry:\n\t\tmax_tree_nodes = int(max_tree_nodes)\n\t\tleaf_min_inst = int(leaf_min_inst)\n\t\tclass_majority = float(class_majority)\n\t\tsplit_intervals = int(split_intervals)\n\t\tif max_tree_nodes <= 0 or leaf_min_inst <= 0 or class_majority <= 0 or split_intervals <= 0:\n\t\t\traise Exception(\"Parameters should be greater than 0.\") \n\texcept ValueError:\n\t\traise Exception(\"Parameters should be numerical.\")\n\n\tif measure not in [\"info_gain\", \"mdl\"]:\n\t\traise Exception(\"measure should be set to info_gain or mdl.\")\n\tif split_fun not in [\"equal_freq\", \"random\"]:\n\t\traise Exception(\"split_fun should be set to equal_freq or random.\")\n\n\n\n\tjob = Job(worker = Worker(save_results = save_results))\n\tjob.pipeline = [\n\t(\"split\", Stage(\"map\",input_chain = input.params[\"input_chain\"], init = simple_init, process = map_fit)),\n\t('group_all', Stage(\"reduce\", init = simple_init, process = reduce_fit, combine = True))]\n\n\tjob.params = input.params\n\tjob.params[\"max_tree_nodes\"] = max_tree_nodes\n\tjob.params[\"leaf_min_inst\"] = leaf_min_inst\n\tjob.params[\"class_majority\"] = class_majority\n\tjob.params[\"measure\"] = measure\n\tjob.params[\"split_fun\"] = split_fun\n\tjob.params[\"intervals\"] = split_intervals\n\n\tjob.run(name = \"decision_trees_fit\", input = input.params[\"data_tag\"], required_files =[path+\"decision_tree.py\", path+\"measures.py\"])\n\t\n\tfitmodel_url = job.wait(show = show)\n\treturn {\"dt_fitmodel\": fitmodel_url} #return results url\n\ndef predict(input, fitmodel_url, save_results = True, show = False):\n\tfrom disco.worker.pipeline.worker import Worker, Stage\n\tfrom disco.core import Job, result_iterator\n\timport discomll\n\tpath = \"/\".join(discomll.__file__.split(\"/\")[:-1] + [\"ensemble\", \"core\",\"\"])\n\n\tif \"dt_fitmodel\" not in fitmodel_url:\n\t\traise Exception(\"Incorrect fit model.\")\n\n\tjob = Job(worker = Worker(save_results = save_results))\n\tjob.pipeline = [(\"split\", Stage(\"map\",input_chain = input.params[\"input_chain\"], init = simple_init, process = map_predict))]\n\n\tjob.params = input.params\n\tjob.params[\"forest\"] = [v for k, v in result_iterator(fitmodel_url[\"dt_fitmodel\"]) if k != \"X_names\"]\n\t\n\n\tjob.run(name = \"decision_trees_predict\", input = input.params[\"data_tag\"], required_files = [path+\"decision_tree.py\"])\n\t\n\treturn job.wait(show = show)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"discomll/ensemble/decision_trees.py","file_name":"decision_trees.py","file_ext":"py","file_size_in_byte":5698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"648788619","text":"#\n# @lc app=leetcode.cn id=337 lang=python\n#\n# [337] 打家劫舍 III\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def rob(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n def dfs(root):\n if not root: return 0, 0 # 偷和不偷的最大金额\n left = dfs(root.left)\n right = dfs(root.right)\n do = root.val + left[1] + right[1]\n undo = max(left) + max(right)\n return do, undo\n return max(dfs(root))\n# @lc code=end\n\n","sub_path":"Week_06/337.打家劫舍-iii.py","file_name":"337.打家劫舍-iii.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"3283736","text":"from django.shortcuts import get_object_or_404\nfrom django.views.generic import list_detail\n\nimport plata\n\n\ndef product_list(request):\n shop = plata.shop_instance()\n\n return list_detail.object_list(request,\n queryset=shop.product_model.objects.active(),\n paginate_by=9,\n template_name='product/product_list.html',\n )\n\n\ndef product_detail(request, object_id):\n shop = plata.shop_instance()\n\n return shop.product_detail(request,\n get_object_or_404(shop.product_model.objects.active(), pk=object_id),\n )\n","sub_path":"example/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"579866758","text":"# ----------------------------------------------------------------------------\n# Copyright (c) 2015--, micronota development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# ----------------------------------------------------------------------------\n\nfrom unittest import TestCase, main\nfrom skbio.util import get_data_path\nfrom skbio.metadata import IntervalMetadata, Feature\n\nfrom micronota.parsers.cmscan import (_cmscan_to_metadata,\n _cmscan_to_generator,\n _cmscan_sniffer,\n CmscanFormatError)\nfrom wheel.signatures import assertTrue\n\n\nclass CmscanIOTests(TestCase):\n def setUp(self):\n self.file_valid = get_data_path('valid.cmscan')\n self.file_invalidOrientation = get_data_path(\n 'invalidOrientation.cmscan')\n self.file_charInPos = get_data_path('charInPos.cmscan')\n self.file_startStopSwop = get_data_path('startStopSwop.cmscan')\n\n self.subsequences = [{\n 'MODEL_NAME': 'LSU_rRNA_bacteria',\n 'MODEL_ACCESSION': 'RF02541',\n 'SEQUENCE_NAME': 'gi|15829254|ref|NC_002695.1|',\n 'SEQUENCE_ACCESSION': '-',\n 'TYPE_OF_MODEL': 'cm',\n 'MODEL_START_POSITION': '1',\n 'MODEL_END_POSITION': '2925',\n 'STRAND': '+',\n 'TRUNCATED': 'no',\n 'PASS': '1',\n 'GC_CONTENT': '0.53',\n 'BIAS': '45.4',\n 'BITSCORE': '2889.8',\n 'EVALUE': '0',\n 'INC': '!',\n 'DESCRIPTION': '-'\n }, {\n 'MODEL_NAME': 'SSU_rRNA_microsporidia',\n 'MODEL_ACCESSION': 'RF02542',\n 'SEQUENCE_NAME': 'gi|15829254|ref|NC_002695.1|',\n 'SEQUENCE_ACCESSION': '-',\n 'TYPE_OF_MODEL': 'cm',\n 'MODEL_START_POSITION': '1',\n 'MODEL_END_POSITION': '1312',\n 'STRAND': '+',\n 'TRUNCATED': 'no',\n 'PASS': '1',\n 'GC_CONTENT': '0.55',\n 'BIAS': '14.5',\n 'BITSCORE': '733.3',\n 'EVALUE': '5.8e-220',\n 'INC': '!',\n 'DESCRIPTION': '-'\n }, {\n 'MODEL_NAME': 'IS061',\n 'MODEL_ACCESSION': 'RF00115',\n 'SEQUENCE_NAME': 'gi|secondSeq|',\n 'SEQUENCE_ACCESSION': '-',\n 'TYPE_OF_MODEL': 'cm',\n 'MODEL_START_POSITION': '1',\n 'MODEL_END_POSITION': '180',\n 'STRAND': '-',\n 'TRUNCATED': 'no',\n 'PASS': '1',\n 'GC_CONTENT': '0.44',\n 'BIAS': '0.0',\n 'BITSCORE': '232.1',\n 'EVALUE': '1.5e-42',\n 'INC': '!',\n 'DESCRIPTION': '-'\n }, {\n 'MODEL_NAME': 'SIB_RNA',\n 'MODEL_ACCESSION': 'RF00113',\n 'SEQUENCE_NAME': 'gi|secondSeq|',\n 'SEQUENCE_ACCESSION': '-',\n 'TYPE_OF_MODEL': 'cm',\n 'MODEL_START_POSITION': '1',\n 'MODEL_END_POSITION': '147',\n 'STRAND': '+',\n 'TRUNCATED': 'no',\n 'PASS': '1',\n 'GC_CONTENT': '0.40',\n 'BIAS': '0.0',\n 'BITSCORE': '136.7',\n 'EVALUE': '4e-27',\n 'INC': '!',\n 'DESCRIPTION': '-'\n }]\n self.intervals = [\n [(4977823, 4980727)],\n [(4831659, 4833190)],\n [(1915102, 1915281)],\n [(3794337, 3794487)]\n ]\n\n self.seq1 = IntervalMetadata(features={\n Feature(**self.subsequences[0]): self.intervals[0],\n Feature(**self.subsequences[1]): self.intervals[1]\n })\n self.seq2 = IntervalMetadata(features={\n Feature(**self.subsequences[2]): self.intervals[2],\n Feature(**self.subsequences[3]): self.intervals[3]\n })\n\n\nclass ReaderTests(CmscanIOTests):\n def test_cmscan_to_metadata(self):\n # positive controls: will the information from a file parsed to what\n # we expect\n assertTrue(self.seq1 == _cmscan_to_metadata(self.file_valid, rec_num=1))\n assertTrue(self.seq2 == _cmscan_to_metadata(self.file_valid, rec_num=2))\n\n # negative control: we are parsing the wrong information from the file,\n # check if it is really unequal\n assertTrue(not self.seq1 == _cmscan_to_metadata(self.file_valid,\n rec_num=2))\n\n # test if parser raises error about an unknown character as strand\n # identifier\n self.assertRaisesRegex(CmscanFormatError,\n \"Unknown strand character\",\n _cmscan_to_metadata,\n self.file_invalidOrientation,\n rec_num=1)\n\n # test if parser complains about non digit characters in positional\n # arguments\n self.assertRaisesRegex(CmscanFormatError,\n \"must be an integer value for the start position\"\n \" of the hit. Here, it is\",\n _cmscan_to_metadata,\n self.file_charInPos,\n rec_num=1)\n\n # test if parser checks for wrong start and stop positions of the hit\n # in the query sequence\n self.assertRaisesRegex(CmscanFormatError,\n \"It might be, that this hit is in fact on the \"\n \"reverse strand. Please check strand orientation\"\n \" and positions\",\n _cmscan_to_metadata,\n self.file_startStopSwop,\n rec_num=1)\n\n def test_cmscan_to_generator(self):\n assertTrue(list(_cmscan_to_generator(self.file_valid))[0] == self.seq1)\n assertTrue(list(_cmscan_to_generator(self.file_valid))[1] == self.seq2)\n assertTrue(not list(_cmscan_to_generator(self.file_valid))[0] == self.seq2)\n\n\nclass SnifferTests(TestCase):\n def setUp(self):\n self.positive_fps = list(map(get_data_path, [\n 'charInPos.cmscan',\n 'invalidOrientation.cmscan',\n 'startStopSwop.cmscan',\n 'valid.cmscan']))\n self.negative_fps = list(map(get_data_path, [\n 'blank.sam',\n 'uniprot_multi.embl']))\n\n def test_positive(self):\n for fp in self.positive_fps:\n self.assertEqual(_cmscan_sniffer(fp), (True, {}))\n for fp in self.negative_fps:\n self.assertEqual(_cmscan_sniffer(fp), (False, {}))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"micronota/parsers/tests/test_cmscan.py","file_name":"test_cmscan.py","file_ext":"py","file_size_in_byte":6911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"571561273","text":"import pygame\r\nimport sys\r\nimport random\r\nimport statistics\r\nimport time\r\n\r\n\r\nclass LineLeft():\r\n def __init__(self):\r\n self.width = 10\r\n self.length = 130\r\n self.pos_y = 230\r\n self.speed = 0\r\n\r\n def draw(self):\r\n line_rect = pygame.Rect(20, self.pos_y, self.width, self.length)\r\n pygame.draw.rect(screen, (255, 255, 255), line_rect)\r\n\r\n def move(self):\r\n if self.pos_y >= 450:\r\n self.pos_y = 450\r\n elif self.pos_y <= 20:\r\n self.pos_y = 20\r\n\r\n self.pos_y += self.speed\r\n\r\n return self.pos_y, self.pos_y + self.length\r\n\r\n\r\nclass LineRight():\r\n def __init__(self):\r\n self.width = 10\r\n self.length = 130\r\n self.pos_y = 230\r\n self.speed = 0\r\n\r\n def draw(self):\r\n line_rect = pygame.Rect(770, self.pos_y, self.width, self.length)\r\n pygame.draw.rect(screen, (255, 255, 255), line_rect)\r\n\r\n def move(self):\r\n if main.ai:\r\n self.pos_y = main.ball.y -20\r\n\r\n if self.pos_y >= 450:\r\n self.pos_y = 450\r\n elif self.pos_y <= 20:\r\n self.pos_y = 20\r\n\r\n self.pos_y += self.speed\r\n\r\n return self.pos_y, self.pos_y + self.length\r\n\r\n\r\nclass Ball():\r\n def __init__(self):\r\n self.x = 400\r\n self.y = 300\r\n self.speed_x = 10\r\n self.speed_y = 0\r\n self.direction = \"left\"\r\n self.line_left = LineLeft()\r\n self.line_right = LineRight()\r\n\r\n def draw(self):\r\n pygame.draw.circle(screen, (255, 255, 255), (self.x, self.y), 5)\r\n\r\n def move(self):\r\n if self.y >= 580:\r\n if main.hard_mode:\r\n self.speed_y = -2\r\n else:\r\n self.speed_y = -1\r\n elif self.y <= 15:\r\n if main.hard_mode:\r\n self.speed_y = 2\r\n else:\r\n self.speed_y = 1\r\n\r\n if self.x == 20:\r\n if main.collision_left():\r\n self.direction = \"right\"\r\n else:\r\n main.player_two_score += 1\r\n main.game_over = True\r\n\r\n pos_line_l = statistics.mean([main.line_left.move()[0], main.line_left.move()[1]]) + 5\r\n\r\n if pos_line_l > self.y:\r\n self.speed_y -= 1\r\n elif pos_line_l < self.y:\r\n self.speed_y += 1\r\n\r\n elif self.x in [770, 760]:\r\n if main.collision_right():\r\n self.direction = \"left\"\r\n else:\r\n if main.hard_mode:\r\n main.player_one_score += 1\r\n main.game_over = True\r\n if not main.hard_mode:\r\n print(\"1\")\r\n main.player_one_score += 0.5\r\n main.game_over = True\r\n\r\n pos_line_r = statistics.mean([main.line_right.move()[0], main.line_right.move()[1]]) + 5\r\n\r\n if pos_line_r > self.y:\r\n self.speed_y -= 1\r\n elif pos_line_r < self.y:\r\n self.speed_y += 1\r\n\r\n if main.hard_mode:\r\n if self.direction == \"left\":\r\n self.speed_x = -20\r\n elif self.direction == \"right\":\r\n self.speed_x = 20\r\n else:\r\n if self.direction == \"left\":\r\n self.speed_x = -10\r\n elif self.direction == \"right\":\r\n self.speed_x = 10\r\n\r\n self.x += self.speed_x\r\n self.y += self.speed_y\r\n\r\n\r\nclass Main():\r\n def __init__(self):\r\n self.line_left = LineLeft()\r\n self.line_right = LineRight()\r\n self.ball = Ball()\r\n self.game_over = False\r\n\r\n self.player_one_score = 0\r\n self.player_two_score = 0\r\n\r\n self.menu = False\r\n self.options = False\r\n\r\n self.color = (0, 0, 0)\r\n\r\n self.hard_mode = False\r\n self.ai = False\r\n\r\n def update(self):\r\n if self.menu:\r\n self.menu_func()\r\n elif self.options:\r\n self.options_func()\r\n\r\n if not self.menu and not self.options:\r\n self.draw_board()\r\n\r\n self.line_left.draw()\r\n self.line_left.move()\r\n\r\n self.line_right.draw()\r\n self.line_right.move()\r\n\r\n self.ball.draw()\r\n self.ball.move()\r\n\r\n self.ball.y += self.ball.speed_y\r\n\r\n self.reset = False\r\n\r\n if self.game_over:\r\n self.game_over_fun()\r\n if self.reset:\r\n self.reset_func()\r\n\r\n def draw_board(self):\r\n middle_line = pygame.Rect(398, 10, 2, 578)\r\n top_line = pygame.Rect(0, 10, 800, 2)\r\n bottom_line = pygame.Rect(0, 588, 800, 2)\r\n\r\n pygame.draw.rect(screen, (255, 255, 255), middle_line)\r\n pygame.draw.rect(screen, (255, 255, 255), top_line)\r\n pygame.draw.rect(screen, (255, 255, 255), bottom_line)\r\n\r\n def collision_left(self):\r\n return self.ball.y in range(self.line_left.move()[0], self.line_left.move()[1])\r\n\r\n def collision_right(self):\r\n return int(self.ball.y) in range(self.line_right.move()[0], self.line_right.move()[1])\r\n\r\n def game_over_fun(self):\r\n font = pygame.font.Font(\"Andromeda-Bold.otf\", 64)\r\n game_over_font = font.render(\"Game Over\", True, (255, 255, 255))\r\n screen.blit(game_over_font, (210, 20))\r\n\r\n font_small = pygame.font.Font(\"Andromeda-Bold.otf\", 28)\r\n\r\n options_font_p = font_small.render(\"To play again press P\", True, (255, 255, 255))\r\n screen.blit(options_font_p, (100, 110))\r\n\r\n options_font_e = font_small.render(\"To return to menu press M\", True, (255, 255, 255))\r\n screen.blit(options_font_e, (420, 110))\r\n\r\n font_medium = pygame.font.Font(\"Andromeda-Bold.otf\", 42)\r\n\r\n player_one = font_medium.render(str(int(self.player_one_score)), True, (255, 255, 255))\r\n screen.blit(player_one, (360, 500))\r\n\r\n player_two = font_medium.render(str(self.player_two_score), True, (255, 255, 255))\r\n screen.blit(player_two, (425, 500))\r\n\r\n def reset_func(self):\r\n self.ball.y = 300\r\n self.ball.x = 400\r\n self.ball.speed_x = 10\r\n self.ball.speed_y = 0\r\n self.ball.direction = \"left\"\r\n\r\n self.line_left.pos_y = 230\r\n self.line_right.pos_y = 230\r\n\r\n self.reset = False\r\n\r\n def menu_func(self):\r\n font_menu = pygame.font.Font(\"Andromeda-Bold.otf\", 100)\r\n f_menu = font_menu.render(\"PONG\", True, (255, 255, 255))\r\n screen.blit(f_menu, (270, 20))\r\n\r\n font_menu_small = pygame.font.Font(\"Andromeda-Bold.otf\", 24)\r\n font_menu_smaller = pygame.font.Font(\"Andromeda-Bold.otf\", 18)\r\n\r\n if round(time.time()) % 10 in [1, 3, 5, 7, 9]:\r\n color = (255, 255, 255)\r\n f_menu_small = font_menu_small.render(\"Press P to play\", True, color)\r\n else:\r\n color = self.color\r\n f_menu_small = font_menu_small.render(\"Press P to play\", True, color)\r\n\r\n f_options = font_menu_smaller.render(\"Press O for options\", True, (64, 64, 64))\r\n\r\n screen.blit(f_menu_small, (325, 500))\r\n screen.blit(f_options, (630, 560))\r\n\r\n self.player_one_score = 0\r\n self.player_two_score = 0\r\n\r\n def options_func(self):\r\n font_options_small = pygame.font.Font(\"Andromeda-Bold.otf\", 24)\r\n f_options_blue = font_options_small.render(\"Press B for blue background\", True, (0, 0, 128))\r\n f_options_green = font_options_small.render(\"Press G for green background\", True, (0, 128, 0))\r\n f_options_black = font_options_small.render(\"Press K for black background\", True, (128, 128, 128))\r\n\r\n hard = font_options_small.render(\"Difficulty: Hard\", True, (128, 0, 0))\r\n easy = font_options_small.render(\"Difficulty: Easy\", True, (100, 100, 192))\r\n f_options_hard = font_options_small.render(\"Press H for hard mode\", True, (128, 0, 0))\r\n f_options_easy = font_options_small.render(\"Press E for easy mode\", True, (100, 100, 192))\r\n\r\n single_player = font_options_small.render(\"Mode: Single player\", True, (128, 128, 0))\r\n two_players = font_options_small.render(\"Mode: Two players\", True, (0, 128, 128))\r\n f_options_one_p = font_options_small.render(\"Press 1 for Single player mode\", True, (128, 128, 0))\r\n f_options_two_p = font_options_small.render(\"Press 2 for two player mode\", True, (0, 128, 128))\r\n\r\n f_options_menu = font_options_small.render(\"Press M to go back to main menu\", True, (255, 255, 255))\r\n\r\n # Color\r\n screen.blit(f_options_blue, (10, 30))\r\n screen.blit(f_options_green, (10, 70))\r\n screen.blit(f_options_black, (10, 110))\r\n\r\n # Difficulty\r\n if self.hard_mode:\r\n screen.blit(hard, (10, 210))\r\n else:\r\n screen.blit(easy, (10, 210))\r\n\r\n screen.blit(f_options_hard, (10, 250))\r\n screen.blit(f_options_easy, (10, 280))\r\n\r\n # AI\r\n if self.ai:\r\n screen.blit(single_player, (10, 380))\r\n else:\r\n screen.blit(two_players, (10, 380))\r\n\r\n screen.blit(f_options_one_p, (10, 420))\r\n screen.blit(f_options_two_p, (10, 450))\r\n\r\n # Menu\r\n screen.blit(f_options_menu, (10, 550))\r\n\r\n\r\npygame.init()\r\nscreen = pygame.display.set_mode((800, 600))\r\nclock = pygame.time.Clock()\r\nmain = Main()\r\nmain.menu = True\r\nmain.menu_func()\r\n\r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if not main.ai:\r\n if event.key == pygame.K_UP:\r\n main.line_right.speed = -8\r\n if event.key == pygame.K_DOWN:\r\n main.line_right.speed = 8\r\n if event.key == pygame.K_w:\r\n main.line_left.speed = -8\r\n if event.key == pygame.K_s:\r\n main.line_left.speed = 8\r\n if main.game_over == True:\r\n if event.key == pygame.K_p:\r\n main.reset = True\r\n main.reset_func()\r\n main.game_over = False\r\n if event.key == pygame.K_m:\r\n main.menu = True\r\n if main.menu:\r\n if event.key == pygame.K_p:\r\n main.menu = False\r\n if event.key == pygame.K_o:\r\n main.menu = False\r\n main.options = True\r\n if main.options:\r\n if event.key == pygame.K_b:\r\n main.color = (30, 50, 128)\r\n if event.key == pygame.K_g:\r\n main.color = (50, 192, 50)\r\n if event.key == pygame.K_k:\r\n main.color = (0, 0, 0)\r\n if event.key == pygame.K_h:\r\n main.hard_mode = True\r\n if event.key == pygame.K_e:\r\n main.hard_mode = False\r\n if event.key == pygame.K_1:\r\n main.ai = True\r\n if event.key == pygame.K_2:\r\n main.ai = False\r\n if event.key == pygame.K_m:\r\n main.options = False\r\n main.menu = True\r\n\r\n if event.type == pygame.KEYUP:\r\n if not main.ai:\r\n if event.key == pygame.K_UP or event.key == pygame.K_DOWN:\r\n main.line_right.speed = 0\r\n if event.key == pygame.K_w or event.key == pygame.K_s:\r\n main.line_left.speed = 0\r\n\r\n screen.fill(main.color)\r\n main.update()\r\n pygame.display.update()\r\n clock.tick(60)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"584702827","text":"import copy\n\nfrom astar.state import State\nfrom astar.tuples import tuple_sum\nfrom colorama import Fore\nfrom typing import Optional\n\n\nclass Node(object):\n\n\tdef __init__(self, position: tuple = (0, 0), walkable: bool = True):\n\t\tself.position = position\n\t\tself.walkable = walkable\n\n\tdef __str__(self):\n\t\treturn 'N{}{}'.format('!' if not self.walkable else '', self.position)\n\n\nclass Grid(object):\n\n\tdirections = [(1, 0), (0, -1), (-1, 0), (0, 1)]\n\n\tdef __init__(self, grid_size: tuple, nodes: [Node]):\n\t\tself.width = grid_size[0]\n\t\tself.height = grid_size[1]\n\t\tself.nodes = nodes\n\n\t\tself.cached_states = []\n\n\t\n\tdef expand_state(self, current_state: State) -> [State]:\n\t\tneighbour_states = []\n\n\t\tfor i, position in enumerate(current_state.positions):\n\t\t\tfor neighbour in self.get_neighbour_positions(position, current_state):\n\t\t\t\tpositions_copy = copy.deepcopy(current_state.positions)\n\t\t\t\tpositions_copy[i] = neighbour\n\n\t\t\t\tneighbour_state = State(positions_copy)\n\n\t\t\t\tif neighbour_state in self.cached_states:\n\t\t\t\t\tneighbour_state = self.cached_states[self.cached_states.index(neighbour_state)]\n\t\t\t\telse:\n\t\t\t\t\tself.cached_states.append(neighbour_state)\n\n\t\t\t\tneighbour_states.append(neighbour_state)\n\n\t\treturn neighbour_states\n\n\n\tdef get_neighbour_positions(self, position: tuple, current_state: State) -> [tuple]:\n\t\tneighbour_positions = []\n\n\t\tfor direction in self.directions:\n\t\t\tneighbour_node = self.get_neighbour_in_direction(position, direction, current_state)\n\n\t\t\tif neighbour_node is not None:\n\t\t\t\tneighbour_positions.append(neighbour_node.position)\n\n\t\treturn neighbour_positions\n\n\n\tdef get_neighbour_in_direction(self, position: tuple, direction: tuple, current_state: State) -> Optional[tuple]:\n\t\tnew_position = tuple_sum(position, direction)\n\n\t\tif not self.is_position_in_bounds(new_position):\n\t\t\treturn None\n\n\t\tnew_neighbour = None\n\t\tnext_neighbour = self.nodes[self.position_to_index(new_position)]\n\n\t\twhile self.is_position_in_bounds(new_position) and next_neighbour.walkable and next_neighbour.position not in current_state.positions:\n\t\t\tnew_neighbour = next_neighbour\n\t\t\tnext_neighbour = self.nodes[self.position_to_index(new_position)]\n\n\t\t\tnew_position = tuple_sum(new_position, direction)\n\n\t\treturn new_neighbour\n\n\n\tdef is_position_in_bounds(self, position: tuple) -> bool:\n\t\treturn position[0] > -1 and position[0] < self.width and position[1] > -1 and position[1] < self.height\n\n\n\tdef draw_grid(self, current_atoms: [tuple]):\n\t\tprint(' ', end='')\n\t\tfor i in range(self.width):\n\t\t\tprint('{}{}'.format(i, ' ' if len(str(i)) < 2 else ''), end='')\n\t\tprint()\n\n\t\tfor row in range(self.height):\n\t\t\tprint('{}{}'.format(row, ' ' if len(str(row)) < 2 else ''), end='')\n\n\t\t\tfor col in range(self.width):\n\t\t\t\tnode = self.nodes[self.position_to_index((col, row))]\n\n\t\t\t\tif not node.walkable:\n\t\t\t\t\tprint('# ', end='')\n\t\t\t\telif (col, row) in current_atoms:\n\t\t\t\t\tif (col, row) == current_atoms[0]:\n\t\t\t\t\t\tprint(Fore.CYAN + 'C ' + Fore.RESET, end='')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(Fore.YELLOW + 'H ' + Fore.RESET, end='')\n\t\t\t\telse:\n\t\t\t\t\tprint(' ', end='')\n\n\t\t\tprint()\n\n\t\tprint()\n\n\tdef position_to_index(self, position) -> int:\n\t\treturn self.width * position[1] + position[0]\n","sub_path":"astar/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"631436836","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# loadImageChlng.py\n# \n# Copyright 2017 jradko \n# \n# This version of the program will load an image from last season, and show it\n# in color in a window.\n#\n# The first challenge here is to change the program to use a filename passed\n# in as an argument. You can find a basic sample in this directory, and a\n# more sophisticated sample in ..\\util\\shrinkImage.py (that one uses argparse, which\n# is pretty cool).\n#\n# I will add some comments in here to explain a few parts\n# \n# \n\nimport cv2\nimport numpy as np\nimport os # including this so I can check the image file exists\n\n\ndef load_image_file(image_file): # I used a function here to combine the test with the load\n if(os.path.isfile(image_file)): # os.path.isfile returns true if the file exists, why is this needed?\n\t img = cv2.imread(image_file)\n else:\n\t print(\"Invalid file, exiting....\")\n\t exit(1)\n return(img)\n\ndef main(args):\n print(\"File Name?\")\n file_name = input()\n image_filename = \"..\\..\\images\" + chr(92) + file_name # this is a hard-coded image file\n \n img = load_image_file(image_filename) \n cv2.imshow(\"Image\", img) # this function will open a window with the image\n cv2.waitKey(0) # this function will wait until a key is pressed\n \n cv2.destroyAllWindows()\n \n return 0\n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n","sub_path":"challenge/load_image/EthanM27_loadImageChlng.py","file_name":"EthanM27_loadImageChlng.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"37579906","text":"import os\nimport stat\nimport sys\nimport argparse\nimport ipdb\n\nargparser = argparse.ArgumentParser(description = 'create Makefile from rvtest')\n\nargparser.add_argument('--pheno-name', metavar = 'name(s)', \n dest = 'pheno_name', required = True, help = 'SLURM partition name.')\nargparser.add_argument('--directory', metavar = 'name(s)', \n dest = 'directory', required = True, help = 'rvtest output directory')\nargparser.add_argument('--ped', metavar = 'name(s)', \n dest = 'ped', required = True, help = 'ped file')\n\n\ndef create_command(genotype, ped, directory, pheno_name, chrom):\n cmd = \"\"\"/net/fantasia/home/jweinstk/downloads/rvtests/executable/rvtest --inVcf {genotype} --pheno {ped} --pheno-name {pheno_name} --out {directory}chr{chrom} --dosage DS --covar-name AGE,SEX,PC1,PC2,PC3,PC4 --single firth --freqLower 0.01\"\"\".format(genotype = genotype,\n ped = ped,\n pheno_name = pheno_name,\n directory = directory,\n chrom = chrom)\n return cmd\n\n\nclass rule:\n def __init__(self, directory, ped, pheno_name, chrom):\n self.directory = directory\n self.ped = ped\n self.chrom = chrom\n self.pheno_name = pheno_name\n self.target = os.path.join(self.directory, \"chr{}.SingleFirth.assoc\".format(self.chrom))\n self.genotype = \"/net/fantasia/home/schellen/PheWAS/genotypes/DataFreeze_201602/MGI_HRC_chr{}.dose.vcf.gz\".format(self.chrom)\n\n def create_rule(self):\n command = create_command(self.genotype, self.ped, self.directory, self.pheno_name, self.chrom)\n return \"\"\"\\n{target}: {genotype} {ped}\\n\\t{command}\\n\"\"\".format(target = self.target, \n genotype = self.genotype, \n ped = self.ped,\n command = command)\n\ndef makefile_start(args):\n # https://stackoverflow.com/questions/24641948/merging-csv-files-appending-instead-of-merging/24643455\n cmd = \"\"\"\\t/net/fantasia/home/jweinstk/rvtest_wrapper/cat_rvtest.sh {directory} {pheno}\\n\"\"\".format(directory = args.directory,\n pheno = args.pheno_name)\n return cmd\n\ndef create_makefile(args):\n\n makefile = \"\"\n makefile = \"all: {directory}rvtests.OK\\n\".format(directory = args.directory)\n # makefile = \".DELETE_ON_ERROR\\nall: {directory}rvtests.OK\\n\".format(directory = args.directory)\n autosomes = range(1, 23)\n global rule\n rules = [rule(args.directory, args.ped, args.pheno_name, chrom) for chrom in autosomes]\n makefile += \"\\n{directory}rvtests.OK: {targets}\\n\".format(directory = args.directory, \n targets = \" \".join([r.target for r in rules]))\n makefile += makefile_start(args)\n\n for r in rules:\n makefile += r.create_rule()\n\n return makefile\n\nclass test_args:\n pheno_name = \"250.2\"\n directory = \"/net/fantasia/home/jweinstk/mgi/mgiAnalysis/output/case_control/250.2/\"\n ped = \"/net/fantasia/home/jweinstk/julia-epacts/data/MGI.filtered.discrete.FINAL2.ped\"\n\n# test = test_args()\n\nif __name__ == \"__main__\":\n args = argparser.parse_args()\n script = create_makefile(args)\n output = \"{directory}{pheno}.Makefile\".format(directory = args.directory, pheno = args.pheno_name)\n with open(output, \"w\") as f:\n f.write(script)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"357924361","text":"from copy import deepcopy\n\n\nclass Matrix:\n def __init__(self, values):\n self.values = values\n self.n_rows = len(values)\n self.n_cols = len(values[0])\n\n def __neg__(self):\n return Matrix([[-element for element in row] for row in self.values])\n\n def __add__(self, other):\n res = [[0 for _ in range(self.n_cols)] for __ in range(self.n_rows)]\n for i in range(self.n_rows):\n for j in range(self.n_cols):\n res[i][j] = self.values[i][j] + other.values[i][j]\n return Matrix(res)\n\n def __mul__(self, other):\n assert self.n_cols == other.n_rows\n\n res = [[0 for _ in range(other.n_cols)] for __ in range(self.n_rows)]\n for i in range(self.n_rows):\n for j in range(other.n_cols):\n res[i][j] = sum([self.values[i][k] * other.values[k][j] for k in range(self.n_cols)])\n return Matrix(res)\n\n def __sub__(self, other):\n return self + (-other)\n\n def __len__(self):\n return len(self.values)\n\n def invert(self):\n M = deepcopy(self.values)\n\n M = [M[i] + i * [0] + [1] + (self.n_rows - i - 1) * [0] for i in range(self.n_rows)]\n\n for i in range(self.n_rows):\n j = i\n while j < self.n_rows - 1 and int(M[j][i]) == 0:\n j += 1\n if j > i:\n M[i], M[j] = M[j], M[i]\n pivot_inverse = M[i][i] ** (-1)\n M[i] = [pivot_inverse * M[i][ell] for ell in range(2 * self.n_rows)]\n for k in range(i + 1, self.n_rows):\n factor = -M[k][i]\n M[k] = [M[k][ell] + factor * M[i][ell] for ell in range(2 * self.n_rows)]\n\n for i in range(self.n_rows - 1, -1, -1):\n for k in range(i - 1, -1, -1):\n factor = -M[k][i]\n M[k] = [M[k][ell] + factor * M[i][ell] for ell in range(2 * self.n_rows)]\n\n for i in range(self.n_rows):\n if M[i][i] != 1:\n raise ZeroDivisionError('Matrix is not invertible.')\n return Matrix([row[self.n_rows:] for row in M])\n\n def l1_norm(self):\n count = 0\n for i in range(self.n_rows):\n for j in range(self.n_cols):\n if self.values[i][j] != 0:\n count += 1\n return count\n\n def __repr__(self):\n return '\\n'.join([', '.join([str(element) for element in row]) for row in self.values])\n","sub_path":"matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"538656481","text":"# -*- coding:utf-8 -*-\nimport os\nimport random\nimport re\nfrom string import punctuation as en_punc\nfrom zhon.hanzi import punctuation as zh_punc\nfrom collections import Counter\n\n\nclass Metrics(object):\n \"\"\"用于评价模型,计算每个标签的精确率,召回率,F1分数\"\"\"\n\n def __init__(self, golden_tags, predict_tags,tag_map=None, remove_O=False):\n\n # [[t1, t2], [t3, t4]...] --> [t1, t2, t3, t4...]\n self.golden_tags = flatten_lists(golden_tags)\n self.predict_tags = flatten_lists(predict_tags)\n if tag_map is not None: self.tag_map=dict(zip(tag_map.values(),tag_map.keys()))\n else: self.tag_map=None\n\n if remove_O: # 将O标记移除,只关心实体标记\n self._remove_Otags()\n\n # 辅助计算的变量\n self.tagset = set(self.golden_tags)\n self.correct_tags_number = self.count_correct_tags()\n self.predict_tags_counter = Counter(self.predict_tags)\n self.golden_tags_counter = Counter(self.golden_tags)\n\n # 计算精确率\n self.precision_scores = self.cal_precision()\n\n # 计算召回率\n self.recall_scores = self.cal_recall()\n\n # 计算F1分数\n self.f1_scores = self.cal_f1()\n\n def cal_precision(self):\n\n precision_scores = {}\n for tag in self.tagset:\n precision_scores[tag] = self.correct_tags_number.get(tag, 0) / \\\n max(1e-10, self.predict_tags_counter[tag])\n\n return precision_scores\n\n def cal_recall(self):\n\n recall_scores = {}\n for tag in self.tagset:\n recall_scores[tag] = self.correct_tags_number.get(tag, 0) / \\\n max(1e-10, self.golden_tags_counter[tag])\n return recall_scores\n\n def cal_f1(self):\n f1_scores = {}\n for tag in self.tagset:\n p, r = self.precision_scores[tag], self.recall_scores[tag]\n f1_scores[tag] = 2 * p * r / (p + r + 1e-10) # 加上一个特别小的数,防止分母为0\n return f1_scores\n\n def report_scores(self):\n \"\"\"将结果用表格的形式打印出来,像这个样子:\n\n precision recall f1-score support\n B-LOC 0.775 0.757 0.766 1084\n I-LOC 0.601 0.631 0.616 325\n B-MISC 0.698 0.499 0.582 339\n I-MISC 0.644 0.567 0.603 557\n B-ORG 0.795 0.801 0.798 1400\n I-ORG 0.831 0.773 0.801 1104\n B-PER 0.812 0.876 0.843 735\n I-PER 0.873 0.931 0.901 634\n\n avg/total 0.779 0.764 0.770 6178\n \"\"\"\n # 打印表头\n header_format = '{:>9s} {:>9} {:>9} {:>9} {:>9}'\n header = ['precision', 'recall', 'f1-score', 'support']\n print(header_format.format('', *header))\n\n row_format = '{:>9s} {:>9.4f} {:>9.4f} {:>9.4f} {:>9}'\n # 打印每个标签的 精确率、召回率、f1分数\n for tag in self.tagset:\n print(row_format.format(\n self.tag_map[tag] if self.tag_map is not None else tag,\n self.precision_scores[tag],\n self.recall_scores[tag],\n self.f1_scores[tag],\n self.golden_tags_counter[tag]\n ))\n\n # 计算并打印平均值\n avg_metrics = self._cal_weighted_average()\n print(row_format.format(\n 'avg/total',\n avg_metrics['precision'],\n avg_metrics['recall'],\n avg_metrics['f1_score'],\n len(self.golden_tags)\n ))\n\n def count_correct_tags(self):\n \"\"\"计算每种标签预测正确的个数(对应精确率、召回率计算公式上的tp),用于后面精确率以及召回率的计算\"\"\"\n correct_dict = {}\n for gold_tag, predict_tag in zip(self.golden_tags, self.predict_tags):\n if gold_tag == predict_tag:\n if gold_tag not in correct_dict:\n correct_dict[gold_tag] = 1\n else:\n correct_dict[gold_tag] += 1\n\n return correct_dict\n\n def _cal_weighted_average(self):\n\n weighted_average = {}\n total = len(self.golden_tags)\n\n # 计算weighted precisions:\n weighted_average['precision'] = 0.\n weighted_average['recall'] = 0.\n weighted_average['f1_score'] = 0.\n for tag in self.tagset:\n size = self.golden_tags_counter[tag]\n weighted_average['precision'] += self.precision_scores[tag] * size\n weighted_average['recall'] += self.recall_scores[tag] * size\n weighted_average['f1_score'] += self.f1_scores[tag] * size\n\n for metric in weighted_average.keys():\n weighted_average[metric] /= total\n\n return weighted_average\n\n def _remove_Otags(self):\n\n length = len(self.golden_tags)\n O_tag_indices = [i for i in range(length)\n if self.golden_tags[i] == 'O']\n\n self.golden_tags = [tag for i, tag in enumerate(self.golden_tags)\n if i not in O_tag_indices]\n\n self.predict_tags = [tag for i, tag in enumerate(self.predict_tags)\n if i not in O_tag_indices]\n print(\"原总标记数为{},移除了{}个O标记,占比{:.2f}%\".format(\n length,\n len(O_tag_indices),\n len(O_tag_indices) / length * 100\n ))\n\n def report_confusion_matrix(self):\n \"\"\"计算混淆矩阵\"\"\"\n\n print(\"\\nConfusion Matrix:\")\n tag_list = list(self.tagset)\n # 初始化混淆矩阵 matrix[i][j]表示第i个tag被模型预测成第j个tag的次数\n tags_size = len(tag_list)\n matrix = []\n for i in range(tags_size):\n matrix.append([0] * tags_size)\n\n # 遍历tags列表\n for golden_tag, predict_tag in zip(self.golden_tags, self.predict_tags):\n try:\n row = tag_list.index(golden_tag)\n col = tag_list.index(predict_tag)\n matrix[row][col] += 1\n except ValueError: # 有极少数标记没有出现在golden_tags,但出现在predict_tags,跳过这些标记\n continue\n\n # 输出矩阵\n row_format_ = '{:>7} ' * (tags_size + 1)\n print(row_format_.format(\"\", *tag_list))\n for i, row in enumerate(matrix):\n print(row_format_.format(tag_list[i], *row))\n\n\ndef flatten_lists(lists):\n flatten_list = []\n for l in lists:\n if type(l) == list:\n flatten_list += l\n else:\n flatten_list.append(l)\n return flatten_list\n\ndef format_result(result, text, tag): \n entities = [] \n for i in result: \n begin, end = i \n entities.append({ \n \"start\":begin, \n \"stop\":end + 1, \n \"word\":text[begin:end+1],\n \"type\":tag\n }) \n return entities\n\ndef get_tags(path, tag, tag_map=None):\n if tag_map is not None:\n begin_tag = tag_map.get(\"B-\" + tag)\n mid_tag = tag_map.get(\"I-\" + tag)\n end_tag = tag_map.get(\"E-\" + tag)\n single_tag = tag_map.get(\"S\")\n o_tag = tag_map.get(\"O\")\n else:\n begin_tag = \"B-\" + tag\n mid_tag = \"I-\" + tag\n end_tag = \"E-\" + tag\n single_tag = \"S\"\n o_tag = \"O\"\n begin = -1\n end = 0\n tags = []\n last_tag = 0\n for index, tag in enumerate(path):\n if tag == begin_tag and index == 0:\n begin = 0\n elif tag == begin_tag:\n begin = index\n elif tag == end_tag and last_tag in [mid_tag, begin_tag] and begin > -1:\n end = index\n tags.append([begin, end])\n elif tag == o_tag or tag == single_tag:\n begin = -1\n last_tag = tag\n return tags\n\ndef f1_score(tar_path, pre_path, tag, tag_map):\n origin = 0.\n found = 0.\n right = 0.\n for fetch in zip(tar_path, pre_path):\n tar, pre = fetch\n tar_tags = get_tags(tar, tag, tag_map)\n pre_tags = get_tags(pre, tag, tag_map)\n\n origin += len(tar_tags)\n found += len(pre_tags)\n\n for p_tag in pre_tags:\n if p_tag in tar_tags:\n right += 1\n\n recall = 0. if origin == 0 else (right / origin)\n precision = 0. if found == 0 else (right / found)\n f1 = 0. if recall+precision == 0 else (2*precision*recall)/(precision + recall)\n print(\"\\t{}\\trecall= {:.2f}\\tprecision= {:.2f}\\tf1= {:.2f}\".format(tag, recall, precision, f1))\n return recall, precision, f1\n\ndef getWordTagPairs(tagedSentence, seged=True, tagScheme=\"BMES\", onlyNP=False, entityRe=r'\\[\\@.*?\\#.*?\\*\\]'):\n newSent = tagedSentence.strip('\\n')#.decode('utf-8')\n filterList = re.findall(entityRe, newSent)\n newSentLength = len(newSent)\n chunk_list = []\n start_pos = 0\n end_pos = 0\n if len(filterList) == 0:\n singleChunkList = []\n singleChunkList.append(newSent)\n singleChunkList.append(0)\n singleChunkList.append(len(newSent))\n singleChunkList.append(False)\n chunk_list.append(singleChunkList)\n # print singleChunkList\n singleChunkList = []\n else:\n for pattern in filterList:\n # print pattern\n singleChunkList = []\n start_pos = end_pos + newSent[end_pos:].find(pattern)\n end_pos = start_pos + len(pattern)\n singleChunkList.append(pattern)\n singleChunkList.append(start_pos)\n singleChunkList.append(end_pos)\n singleChunkList.append(True)\n chunk_list.append(singleChunkList)\n singleChunkList = []\n ## chunk_list format:\n full_list = []\n for idx in range(0, len(chunk_list)):\n if idx == 0:\n if chunk_list[idx][1] > 0:\n full_list.append([newSent[0:chunk_list[idx][1]], 0, chunk_list[idx][1], False])\n full_list.append(chunk_list[idx])\n else:\n full_list.append(chunk_list[idx])\n else:\n if chunk_list[idx][1] == chunk_list[idx-1][2]:\n full_list.append(chunk_list[idx])\n elif chunk_list[idx][1] < chunk_list[idx-1][2]:\n print(\"ERROR: found pattern has overlap!\", chunk_list[idx][1], ' with ', chunk_list[idx-1][2])\n else:\n full_list.append([newSent[chunk_list[idx-1][2]:chunk_list[idx][1]], chunk_list[idx-1][2], chunk_list[idx][1], False])\n full_list.append(chunk_list[idx])\n\n if idx == len(chunk_list) - 1 :\n if chunk_list[idx][2] > newSentLength:\n print(\"ERROR: found pattern position larger than sentence length!\")\n elif chunk_list[idx][2] < newSentLength:\n full_list.append([newSent[chunk_list[idx][2]:newSentLength], chunk_list[idx][2], newSentLength, False])\n else:\n continue\n return turnFullListToOutputPair(full_list, seged, tagScheme, onlyNP)\n\n\ndef turnFullListToOutputPair(fullList, seged=True, tagScheme=\"BMES\", onlyNP=False):\n pairList = []\n #ch2en_dict=get_label_ch2en_dict()\n for eachList in fullList:\n if eachList[3]:\n contLabelList = eachList[0].strip('[@$]').rsplit('#', 1)\n if len(contLabelList) != 2:\n print(\"Error: sentence format error!\")\n label = contLabelList[1].strip('*')\n if seged:\n contLabelList[0] = contLabelList[0].split()\n if onlyNP:\n label = \"NP\"\n outList = outputWithTagScheme(contLabelList[0], label, tagScheme)\n #outList = outputWithTagScheme(contLabelList[0], ch2en_dict[label], tagScheme)\n for eachItem in outList:\n pairList.append(eachItem)\n else:\n if seged:\n eachList[0] = eachList[0].split()\n for idx in range(0, len(eachList[0])):\n basicContent = eachList[0][idx]\n if basicContent == ' ':\n continue\n pair = basicContent + ' ' + 'O\\n'\n pairList.append(pair)#.encode('utf-8'))\n return pairList\n\n\ndef outputWithTagScheme(input_list, label, tagScheme=\"BMES\"):\n output_list = []\n list_length = len(input_list)\n if tagScheme==\"BMES\":\n if list_length ==1:\n pair = input_list[0]+ ' ' + 'S-' + label + '\\n'\n output_list.append(pair)#.encode('utf-8'))\n else:\n for idx in range(list_length):\n if idx == 0:\n pair = input_list[idx]+ ' ' + 'B-' + label + '\\n'\n elif idx == list_length -1:\n pair = input_list[idx]+ ' ' + 'E-' + label + '\\n'\n else:\n pair = input_list[idx]+ ' ' + 'M-' + label + '\\n'\n output_list.append(pair)#.encode('utf-8'))\n elif tagScheme==\"BIOES\":\n if list_length ==1:\n pair = input_list[0]+ ' ' + 'S-' + label + '\\n'\n output_list.append(pair)#.encode('utf-8'))\n else:\n for idx in range(list_length):\n if idx == 0:\n pair = input_list[idx]+ ' ' + 'B-' + label + '\\n'\n elif idx == list_length -1:\n pair = input_list[idx]+ ' ' + 'E-' + label + '\\n'\n else:\n pair = input_list[idx]+ ' ' + 'I-' + label + '\\n'\n output_list.append(pair)#.encode('utf-8'))\n else:\n for idx in range(list_length):\n if idx == 0:\n pair = input_list[idx]+ ' ' + 'B-' + label + '\\n'\n else:\n pair = input_list[idx]+ ' ' + 'I-' + label + '\\n'\n output_list.append(pair)#.encode('utf-8'))\n return output_list\n\ndef gen_dataset():\n train=open(\"./data/train0.anns\",'a',encoding=\"utf-8\")\n dev=open(\"./data/dev0.anns\",'a',encoding=\"utf-8\")\n test=open(\"./data/test0.anns\",'a',encoding=\"utf-8\")\n instances=[]\n for root, dirs, files in os.walk(\"../BootstrappingRE/data/im_data/ori_data\"):\n for file in files:\n lines=open(os.path.join(root,file),\"r\",encoding=\"utf-8\").readlines()\n for line in lines:\n line=line.strip().split(\"\\t\\t\")\n if len(line[-1])==0:continue\n instances.append(line[-1])\n\n random.shuffle(instances)\n for instance in instances:\n wordTagPairs = getWordTagPairs(instance, False, \"BIOES\", False, r'\\[\\@.*?\\#.*?\\*\\](?!\\#)')\n prob=random.random()\n if prob<=0.7: seqFile=train\n elif prob<=0.9: seqFile=dev\n else: seqFile=test\n\n for wordTag in wordTagPairs:seqFile.write(wordTag)\n seqFile.write(\"end\\n\")\n #seqFile.write(\"\\n\")\n\ndef get_label_ch2en_dict():\n return {\"政策技术\":\"POL\",\"应用领域\":\"DOM\",\"事业单位\":\"ORG\",\"事件地点\":\"LOC\",\n \"事件时间\":\"TIME\",\"技术特点\":\"FEAT\",\"问题挑战\":\"CHAL\"}\n\ndef char_replace():\n train = open(\"./data/train0.anns\", 'r', encoding=\"utf-8\").readlines()\n dev = open(\"./data/dev0.anns\", 'r', encoding=\"utf-8\").readlines()\n test = open(\"./data/test0.anns\", 'r', encoding=\"utf-8\").readlines()\n wtrain = open(\"./data/train.anns\", 'a', encoding=\"utf-8\")\n wdev = open(\"./data/dev.anns\", 'a', encoding=\"utf-8\")\n wtest = open(\"./data/test.anns\", 'a', encoding=\"utf-8\")\n for line in train:\n if line[0] in en_punc or line[0] in zh_punc: continue\n wtrain.write(line)\n for line in dev:\n if line[0] in en_punc or line[0] in zh_punc: continue\n wdev.write(line)\n for line in test:\n if line[0] in en_punc or line[0] in zh_punc: continue\n wtest.write(line)\n\n\n\nif __name__==\"__main__\":\n gen_dataset()\n #char_replace()\n","sub_path":"NER/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":15899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"341497255","text":"from shutil import copy\nfrom shutil import copytree\nfrom shutil import rmtree\nfrom pathlib import Path\nfrom os.path import expanduser\nimport os.path\n\nhome = expanduser(\"~\")\n\n# read filelist\nwith open('filelist.txt') as f:\n content = f.readlines()\ncontent = [x.strip() for x in content if x] \n\nprint('restoring a file or dir will create a backup of the original file appended with .save in your home directory')\n\nfor f in content:\n\tsaved = 'files/'+f\n\tpath = home+'/'+f\n\tinp = input(\"replace \"+path+\"? (Y/n)\")\n\tif inp == 'y' or inp == 'Y' or inp == '':\n\t\tif os.path.isfile(saved):\n\t\t\tif os.path.isfile(path):\n\t\t\t\tcopy(path, path+'.save')\n\t\t\tcopy(saved, path)\n\t\t\tprint('=> restoring file '+path)\n\t\telif os.path.isdir(saved):\n\t\t\tif os.path.isdir(path):\n\t\t\t\tcopytree(path,path+'.save')\n\t\t\t\trmtree(path)\n\t\t\tcopytree(saved, path)\n\t\t\tprint('=> restoring dir '+path)\n\telse:\n\t\tpass\nprint(\"dotfiles updated\")","sub_path":"restore.py","file_name":"restore.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"51751364","text":"import xlrd\nimport xlwt\nimport openpyxl\n\nglobal nrows\n\ndef set_style(name,height,bold=False):\n\n style = xlwt.XFStyle()\n\n font = xlwt.Font()\n font.name = name\n font.bold = bold\n font.color_index = 4\n font.height = height\n\n style.font = font\n\n return style\n\ndef read_excel(file_name):\n '''\n 读取表格file_datefrom_excel的数据(使用绝对路径),遍历“分类号”,\n 将相同的归为一行数据,整合不同内容的字段,如:“资产编号”使用下拉框进行数据存储,以及数据展示。\n '''\n global file_datefrom_excel\n data = xlrd.open_workbook(file_name)#文件名以及路径,如果路径或者文件名有中文给前面加一个r拜师原生字符.\n table = data.sheet_by_name(\"二教104\")\n \n colum0 = []\n date_moveline = 1\n common_count = 0\n row0 = table.row_values(0, start_colx=0, end_colx=None)#返回由该行中所有单元格的数据组成的列表\n print(\"row0 {}\".format(row0))\n\n ncols = table.ncols #获取列表的有效列数\n nrows = table.nrows #获取该sheet中的有效行数\n '''\n print(sheet1.cell(1,0).value)#获取表格里的内容,三种方式\n print(sheet1.cell_value(1,0))\n print(sheet1.row(1)[0].value)\n '''\n for i in range(2,nrows):\n print(i)\n before = str(table.cell(i-1,3))\n now = str(table.cell(i,3))\n #print(\"下一个记录 {}\".format(before))\n #print(\"当前记录 {}\".format(now))\n #print(type(before))\n if(i==nrows-1):\n #返回由该行中所有单元格的数据组成的列表\n colum0.append(table.row_values(i, start_colx=0, end_colx=None))\n print(common_count)\n write_excel(common_count+1,nrows,ncols,row0,colum0)\n \n if(before==now):\n continue\n else:\n #返回由该行中所有单元格的数据组成的列表\n colum0.append(table.row_values(i, start_colx=0, end_colx=None))\n #print(\"这项记录共有:{}条\".format(common_count))\n common_count += 1 \n \n #print(table.row(1)) #返回由该“行”中所有的单元格对象组成的列表\n #print(table.col_slice(1, start_rowx=0, end_rowx=None)) #返回由该列中所有的单元格对象组成的列表\n\ndef combox_excel():\n\n '''\n “资产编号”使用下拉框进行数据存储,进而用以数据展示。\n '''\n\n\ndef write_excel(line,nrows,ncols,row0,colum0):\n \n print(\"******写入列值 {}******\".format(line)) \n #创建一个Workbook 设置编码\n workbook = xlwt.Workbook()\n\n #创建一个Worksheet\n worksheet = workbook.add_sheet('My Worksheet',cell_overwrite_ok=True)\n\n #写第一行\n for i in range(0,len(row0)):\n worksheet.write(0,i,row0[i],set_style('Times New Roman',220,True))\n \n #写入列值\n for i in range(1,line):\n for j in range(0,ncols):\n worksheet.write(i,j,colum0[i][j],set_style('Times New Roman',220,False))\n '''\n write_merge(x, x + m, y, w + n, string, sytle)\n x表示行,y表示列,m表示跨行个数,n表示跨列个数,string表示要写入的单元格内容,\n style表示单元格样式。其中,x,y,w,h,都是以0开始计算的。\n 这个和xlrd中的读合并单元格的不太一样。\n\n row0 = [\"审核状态\",\"资产编号\",\"资产名称\",\"分类号\",\"分类名称\",\"项目号\",\"单价\",\"总造价\",\"套(件数)\",\"计量单位\",\n \"使(领)用人\",\"使用方向\",\"使用单位\",\"存放地点\",\"单位性质\",\"现状\",\"生产厂家\",\"出厂编号\",\"型号\",\"规格\",\"经费来源\",\n \"资产来源\",\"购置日期\",\"入帐日期\",\"调转入日期\",\"档案编号\",\"凭证号\",\"验收单号\",\"记帐类型\",\"学科\",\"学科类别\",\n \"归口审核人\",\"归口审核单位\",\"归口审核日期\",\"归口审核意见\",\"财务审核人\",\"财务审核日期\",\"财务审核意见\"]\n colum0 = [\"张三\",\"李四\",\"恋习Python\",\"小明\",\"小红\",\"无名\"]\n\n #参数对应:row,line,values 合并单元格函数\n worksheet.write(1,3,'2006/12/12')\n worksheet.write_merge(6,6,1,3,'行合并,未知')#合并“行”单元格\n worksheet.write_merge(1,2,3,3,'列合并,打游戏')#合并“列”单元格\n worksheet.write_merge(4,5,3,3,'打篮球')\n '''\n #保存文件\n workbook.save('Excel_create_test3.xls')\n print(\"@@@@@@@@@@@写入完成共_{}_记录@@@@@@@@@@\".format(line))\n \n\ndef read_with_dropdown(book_name, sheet_name):\n #读取excel.xlsx格式文件\n wb = openpyxl.load_workbook(book_name)\n #读取sheet表\n ws = wb[sheet_name]\n # 获取内容存在下拉选的框数据\n validations = ws.data_validations.dataValidation\n #遍历存在下拉选的单元格\n for validation in validations:\n cell = validation.sqref\n result = validation.formula1\n print(\"单元格位置:\"+str(cell)+\",下拉选内容:\"+result)\n\n\nif __name__ == '__main__':\n\n #read_with_dropdown()\n #\"D:\\WriteCode\\PyhtonCode\\基于xlrd的excel表格的数据分析\\测试表格Test1.xlsx\", \"test2\"\n file_datefrom_excel = \"D:\\WriteCode\\PyhtonCode\\基于xlrd的excel表格的数据分析\\二教104.xls\"\n read_excel(file_datefrom_excel)\n\n\n\n\n\n\n\n","sub_path":"基于xlrd的excel表格的数据分析/创建Excel文件并写入内容.py","file_name":"创建Excel文件并写入内容.py","file_ext":"py","file_size_in_byte":5280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"610015717","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"Abdelhafidh Belalia (s77rt)\"\n__license__ = \"MIT\"\n__maintainer__ = \"Abdelhafidh Belalia (s77rt)\"\n__email__ = \"admin@abdelhafidh.com\"\n__version__ = \"0.1.1\"\n__github__ = \"https://github.com/s77rt/hccapxsplitter/\"\n\nimport os\nimport argparse\nimport errno\nimport re\nimport gzip\nfrom operator import itemgetter\nfrom itertools import groupby\n\n### Constants ###\nHCCAPX_SIGNATURE = b'HCPX'\nHCCAPX_SIZE = 393\n###\n\n### H-Functions ###\ndef get_valid_filename(s, r='_'):\n\ts = str(s).strip().replace(' ', '_')\n\treturn re.sub(r'(?u)[^-\\w.\\@]', r, s)\ndef xprint(text=\"\", end='\\n', flush=True):\n\tprint(text, end=end, flush=flush)\n###\n\n### Database-Like ###\nclass hccapxs(list):\n\tdef __init__(self):\n\t\tlist.__init__(self)\n\nclass Database(object):\n\tdef __init__(self):\n\t\tsuper(Database, self).__init__()\n\t\tself.hccapxs = hccapxs()\n\tdef hccapx_add(self, bssid, essid, raw_data):\n\t\tself.hccapxs.append({ \\\n\t\t\t'bssid': bssid, \\\n\t\t\t'essid': essid, \\\n\t\t\t'raw_data': raw_data \\\n\t\t})\n\tdef hccapx_groupby(self, group_by):\n\t\tif group_by == \"handshake\":\n\t\t\tself.hccapxs = [{'key': v['bssid']+\"_\"+str(k), 'raw_data': [v['raw_data']]} for k, v in enumerate(self.hccapxs)]\n\t\telse:\n\t\t\tself.hccapxs.sort(key=itemgetter(group_by))\n\t\t\tself.hccapxs = groupby(self.hccapxs, key=itemgetter(group_by))\n\t\t\tself.hccapxs = [{'key': k, 'raw_data': [x['raw_data'] for x in v]} for k, v in self.hccapxs]\nDB = Database()\n###\n\n######################### CORE #########################\nHS_TOTAL = 0\nHS_AUTH = 0\n\ndef read_file(file):\n\tif file.lower().endswith('.gz'):\n\t\treturn gzip.open(file, 'rb')\n\treturn open(file, 'rb')\n\ndef read_hccapx(hccapx_file, auth_only=True):\n\tglobal HS_TOTAL, HS_AUTH\n\tdef extract_bssid(raw_data):\n\t\tbssid = raw_data[59:65].hex()\n\t\tbssid = '-'.join(bssid[i:i+2] for i in range(0,12,2))\n\t\tbssid = bssid.upper()\n\t\treturn bssid\n\tdef extract_essid(raw_data):\n\t\tessid = raw_data[10:10+raw_data[9]]\n\t\tessid = str(essid.decode(encoding='utf-8', errors='ignore').rstrip('\\x00'))\n\t\treturn essid\n\tdef extract_message_pair(raw_data):\n\t\tmessage_pair = raw_data[8]\n\t\treturn message_pair\n\twhile True: \n\t\thccapx = hccapx_file.read(HCCAPX_SIZE) \n\t\tif hccapx and hccapx[0:4] == HCCAPX_SIGNATURE:\n\t\t\tHS_TOTAL += 1\n\t\t\tmessage_pair = extract_message_pair(hccapx)\n\t\t\tif (message_pair & 0b00000001) or (message_pair & 0b00000010) or (message_pair & 0b00000100):\n\t\t\t\tHS_AUTH += 1\n\t\t\telif auth_only:\n\t\t\t\tcontinue\n\t\t\tDB.hccapx_add(extract_bssid(hccapx), extract_essid(hccapx), hccapx)\n\t\telse:\n\t\t\tbreak\n\n######################### MAIN #########################\n\ndef main():\n\tif os.path.isfile(args.input):\n\t\thccapx_file = read_file(args.input)\n\t\tread_hccapx(hccapx_file, args.auth)\n\t\tDB.hccapx_groupby(args.group_by)\n\n\t\txprint(\"Handshakes: {} ({} authenticated)\".format(HS_TOTAL, HS_AUTH))\n\n\t\tif len(DB.hccapxs):\n\t\t\twritten = 0\n\t\t\txprint(\"\\nOutput hccapx files:\")\n\t\t\tfor hccapx in DB.hccapxs:\n\t\t\t\tif args.output:\n\t\t\t\t\thccapx_filename = (re.sub('\\\\.hccap(x?)$', '', args.output, flags=re.IGNORECASE)) + get_valid_filename(\"{}.hccapx\".format(\"_\"+str(hccapx['key']) if hccapx['key'] != \"none\" else ''))\n\t\t\t\telse:\n\t\t\t\t\thccapx_filename = get_valid_filename(\"{}.hccapx\".format(str(hccapx['key'])))\n\t\t\t\tprint(hccapx_filename)\n\t\t\t\thccapx_file = open(hccapx_filename, 'wb')\n\t\t\t\thccapx_file.write(b''.join(hccapx['raw_data']))\n\t\t\t\thccapx_file.close()\n\t\t\t\twritten += len(hccapx['raw_data'])\n\t\t\tif written:\n\t\t\t\txprint(\"\\nWritten {} WPA Handshakes to {} files\".format(written, len(DB.hccapxs)), end='')\n\t\txprint()\n\telse:\n\t\txprint(FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), args.input))\n\t\texit()\n\n#########################\n#########################\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description='Tool to split one big hccapx file to multiple hccapx files', add_help=False)\n\trequired = parser.add_argument_group('required arguments')\n\toptional = parser.add_argument_group('optional arguments')\n\trequired.add_argument(\"--input\", \"-i\", help=\"Input hccapx file\", metavar=\"capture.hccapx\", required=True)\n\toptional.add_argument(\"--output\", \"-o\", help=\"Output file\", metavar=\"capture.hccapx\")\n\toptional.add_argument(\"--auth\", help=\"Export only authenticated handshakes\", action=\"store_true\")\n\toptional.add_argument(\"--group-by\", \"-g\", choices=['bssid', 'essid', 'handshake'], default='bssid')\n\toptional.add_argument(\"--quiet\", \"-q\", help=\"Enable quiet mode (print only output files)\", action=\"store_true\")\n\toptional.add_argument(\"--version\", \"-v\", action='version', version=__version__)\n\toptional.add_argument(\"--help\", \"-h\", action='help', default=argparse.SUPPRESS,\thelp='show this help message and exit')\n\targs = parser.parse_args()\n\tif args.quiet:\n\t\tdef xprint(text=\"\", end='\\n', flush=True):\n\t\t\tpass\n\tmain()\n","sub_path":"hccapxsplitter.py","file_name":"hccapxsplitter.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"31030624","text":"import package_datasets as pcd\nimport package_algorithm as pca\nimport numpy as np\n\n# model = input(\"Enter model name : \")\n\ndf = pcd.load_data()\n\nX_train, X_test, Y_train, Y_test = pcd.learn_data(df, \"mean radius\", \"mean texture\", \"y\")\nX_train_scaled = pcd.data_scale(X_train)\nX_test_scaled = pcd.data_scale(X_test)\n\nY_test=np.reshape(Y_test,(-1,1))\nY_train=np.reshape(Y_train,(-1,1))\n\nprint(Y_test)\nprint(Y_train)\n\n# -----------------------------------------------------------------------------------------\n\nY_train_pred = pca.logistic_regression_model(X_train_scaled, Y_train, X_train_scaled, \"pred\")\nY_test_pred = pca.logistic_regression_model(X_train_scaled, Y_train, X_test_scaled, \"pred\")\nlog_reg = pca.logistic_regression_model(X_train_scaled, Y_train, X_test_scaled, \"model\")\n\n# shp=Y_train.shape\n# print(shp)\n\nprint(Y_train_pred[:5])\nprint(Y_test_pred[:5])\n\n\npcd.plot_decision_regions(np.array(X_train_scaled), np.array(Y_train), clf=log_reg)\npcd.plt.show()\n\n# pcd.plot_data(X_test[\"mean radius\"], X_test[\"mean texture\"], Y_test_pred, \"mean radius\", \"mean texture\", \"Pred Test\")\n# pcd.plot_data(X_train[\"mean radius\"], X_train[\"mean texture\"], Y_train_pred, \"mean radius\", \"mean texture\", \"Pred Train\")\n\n# print(X_test_scaled[:3])\n\n# print(len(X_train))\n# display(X_train.head())\n# print(len(X_test))\n# display(X_test.head())\n","sub_path":"Reg_an/Breast_cancer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"81729147","text":"from util import Stack\n\n# write a function that takes a 2d binary array and returns the\n# number of 1 islands. An island consists of 1's that are connected\n# to the north, south, east, or west\n\n# for example:\n# undirected graph\n# unweighted\n#Cyclic (undirected)\n\n# Nodes are numbers, edges are connections between 1's\n\nislands = [\n [0, 1, 0, 1, 0],\n [1, 1, 0, 1, 1],\n [0, 0, 1, 0, 0],\n [1, 0, 1, 0, 0],\n [1, 1, 0, 0, 0],\n]\n\n# * islands[0] = [0, 1, 0, 1, 0] -- first row\n# * islands[0][1] = 1 --- first row, second index\n\n# Visit ech cell in the 2d array. When you come across a 1,\n# Traverse it and mark all connected nodes as visited, then\n# increment a counter\n\n\ndef get_islands_neighbors(x, y, matrix):\n neighbors = []\n # N y-1\n # S y+1\n # Check if a 1 to north\n if y > 0 and matrix[y-1][x] == 1:\n neighbors.append((x, y-1))\n # hceck south\n if y < len(matrix) - 1 and matrix[y+1][x] == 1:\n neighbors.append((x, y+1))\n if x < len(matrix[0]) - 1 and matrix[y][x+1] == 1:\n neighbors.append((x - 1, y))\n if x > 0 and matrix[y][x-1] == 1:\n neighbors.append((x+1, y))\n\n\ndef dft_islands(start_x, start_y, matrix, visited):\n \"\"\"\n returns an updated visited matrix after a dft of matrix\n starting from x and y\n \"\"\"\n # create empyt stack and push the starting vert ID\n s = Stack()\n s.push((start_x, start_y))\n # while not empty\n while s.size() > 0:\n v = s.pop()\n x = v[0]\n y = v[1]\n if not visited[y][x]:\n visited[y][x] = True\n for neighbor in get_islands_neighbors(x, y, matrix):\n s.push(neighbor)\n return visited\n\n\ndef island_counter(matrix):\n # Create a visited matrix w/ the same dimensions as the islands\n # matrix\n visited = []\n matrix_height = len(matrix)\n matrix_width = len(matrix[0])\n for i in range(len(matrix)):\n visited.append([False] * matrix_width)\n # create a counter, start at 0\n counter = 0\n\n # For each cell in the 2d array...\n for x in range(matrix_width):\n for y in range(matrix_height):\n # When you come across a 1,\n if not visited[y][x]:\n if matrix[y][x] == 1:\n # DFT it and mark connected nodes as visited\n visited = dft_islands(x, y, matrix, visited)\n # Traverse it and mark all connected nodes as visited, then\n # increment a counter\n counter += 1\n return counter\n\n\nisland_counter(islands) # returns 4\n","sub_path":"projects/island/island.py","file_name":"island.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"47613538","text":"# Set your secret key: remember to change this to your live secret key in production\n# See your keys here: https://dashboard.stripe.com/account/apikeys\nstripe.api_key = 'sk_test_q4fdg9hqru6omjpKGPieHxFz00UrmxTA5l'\n\nsession = stripe.checkout.Session.create(\n payment_method_types=['card'],\n subscription_data={\n 'items': [{\n 'plan': 'plan_123',\n }],\n },\n success_url='https://example.com/success?session_id={CHECKOUT_SESSION_ID}',\n cancel_url='https://example.com/cancel',\n)","sub_path":"python/tioa/teststipe.py","file_name":"teststipe.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"145640438","text":"# -*- coding: utf-8 -*-\nimport os\nimport flask\nimport flask_login\nimport requests\nimport json\nimport datetime\nfrom flask_login.mixins import UserMixin\nfrom wtforms import Form, PasswordField, StringField, IntegerField\nfrom wtforms.validators import ValidationError\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import and_\nfrom urllib.parse import urlparse, urljoin\n\n# Flask Setup\nlogin_manager = flask_login.LoginManager()\napp = flask.Flask(__name__)\napp.config['DEBUG_CDN'] = os.getenv('DEBUG_CDN', False)\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://{}:{}@{}:{}/{}'.format(\n os.getenv('DB_USER', 'demo'),\n os.getenv('DB_PASSWORD'),\n os.getenv('DB_HOST'),\n os.getenv('DB_PORT', '3306'),\n os.getenv('DB_NAME', 'demo'),\n)\n# API URL and OAUTH CLIENT URL are generally the same, API URL for Bluemoon API\napp.config['API_URL'] = os.getenv('API_URL')\napp.config['OAUTH_CLIENT_URL'] = os.getenv('OAUTH_CLIENT_URL')\n\n# static file host for Bluemoon API\napp.config['LEASE_EDITOR_CDN'] = os.getenv('LEASE_EDITOR_CDN')\n\napp.config['OAUTH_CLIENT_ID'] = os.getenv('OAUTH_CLIENT_ID')\napp.config['OAUTH_CLIENT_SECRET'] = os.getenv('OAUTH_CLIENT_SECRET')\n\napp.secret_key = 'o\\x91\\xc0\\xcehh\\xa5\\xbf!\\x8b\\xcak2\\xfe\\x81\\x89\\xb6Ch9\\x80\\xcb6\\xc7'\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'login'\ndb = SQLAlchemy(app)\n\nBASE_HEADERS = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'Provider': 'legacy'\n}\n\n\ndef is_safe_url(target):\n \"\"\"Just a safety check from flask snippets.\"\"\"\n ref_url = urlparse(flask.request.host_url)\n test_url = urlparse(urljoin(flask.request.host_url, target))\n return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc\n\n\ndef to_pretty_json(value):\n \"\"\"Pretty json for template.\"\"\"\n return json.dumps(\n value,\n sort_keys=True,\n indent=4,\n separators=(',', ': ')\n )\n\napp.jinja_env.filters['tojson_pretty'] = to_pretty_json\n\n# Forms\n\n\nclass LoginForm(Form):\n username = StringField('Username')\n license = StringField('License')\n password = PasswordField('Password')\n\n\ndef validate_lease_id(form, field):\n \"\"\"API query to fetch lease number.\"\"\"\n headers = BASE_HEADERS.copy()\n headers['Authorization'] = 'Bearer {}'.format(flask_login.current_user.token)\n url = '{}/api/lease/{}'.format(os.getenv('OAUTH_CLIENT_URL'), field.data)\n response = requests.get(url, headers=headers)\n if response.status_code == 404:\n raise ValidationError('Unable to find lease')\n if response.status_code != 200:\n raise ValidationError('Invalid lease id')\n\n\nclass SelectLeaseForm(Form):\n lease_id = IntegerField('Lease ID', [validate_lease_id])\n\n# User utils and such\n\n\nclass User(db.Model, UserMixin):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(80), nullable=False)\n license = db.Column(db.String(40), nullable=False)\n token = db.Column(db.Text, nullable=False)\n\n def __repr__(self):\n \"\"\"Representation.\"\"\"\n return '' % self.username\n\n @property\n def is_authenticated(self):\n return self.id is not None\n\n @property\n def is_active(self):\n return True\n\n @property\n def is_anonymous(self):\n return not self.is_authenticated()\n\n def get_id(self):\n return '{}'.format(self.id)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n \"\"\"Find the user.\"\"\"\n try:\n results = User.query.get(int(user_id))\n except (ValueError, TypeError):\n results = None\n return results\n\n\ndef login_user(username, password):\n \"\"\"Take the login credentials and validate against oauth password grant server.\"\"\"\n headers = BASE_HEADERS.copy()\n url = '{}/oauth/token'.format(app.config['OAUTH_CLIENT_URL'])\n payload = {\n 'username': username,\n 'password': password,\n 'grant_type': 'password',\n 'client_id': app.config['OAUTH_CLIENT_ID'],\n 'client_secret': app.config['OAUTH_CLIENT_SECRET'],\n }\n response = requests.post(url, headers=headers, json=payload)\n data = response.json()\n if response.status_code == 200:\n user = User.query.filter(User.username == username).first()\n if not user:\n user = User(username=username, license=\"nada\", token=data['access_token'])\n db.session.add(user)\n else:\n user.token = data['access_token']\n db.session.commit()\n flask_login.login_user(user)\n\n\n# Generic Utils\n\n\ndef get_property_number(token):\n \"\"\"API query to fetch property number.\"\"\"\n headers = BASE_HEADERS.copy()\n headers['Authorization'] = 'Bearer {}'.format(token)\n url = '{}/api/property'.format(app.config['OAUTH_CLIENT_URL'])\n response = requests.get(url, headers=headers)\n data = response.json()\n if response.status_code == 200:\n # Try to get the aptdb property number\n for prop in data['data']:\n if prop['unit_type'] == 'aptdb':\n return prop['id']\n # No apt db then just return the first one\n return data['data'][0]['id']\n\n\ndef get_settings(configuration):\n \"\"\"Pulling this out to reuse for multiple endpoint.\"\"\"\n js_files = [\n 'inline.bundle.js',\n 'polyfills.bundle.js',\n 'main.bundle.js',\n ]\n css_files = [\n 'styles/styles.bundle.css',\n ]\n\n if app.config['DEBUG_CDN']:\n js_files = [\n 'inline.bundle.js',\n 'polyfills.bundle.js',\n 'styles/styles.bundle.js',\n 'vendor.bundle.js',\n 'main.bundle.js',\n ]\n css_files = []\n\n context = {\n 'static_url': app.config['LEASE_EDITOR_CDN'],\n 'configuration': configuration,\n 'js_files': js_files,\n 'css_files': css_files\n }\n return context\n\n# Views\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"The login page for the application.\"\"\"\n form = LoginForm(flask.request.form)\n if flask.request.method == 'POST' and form.validate():\n login_user(\n username='{}@{}'.format(form.username.data, form.license.data),\n password=form.password.data\n )\n next = flask.request.args.get('next')\n\n if not is_safe_url(next):\n return flask.abort(400)\n\n return flask.redirect(next or flask.url_for('index'))\n return flask.render_template('login.html', form=form)\n\n\n@app.route('/', methods=['GET'])\n@flask_login.login_required\ndef index():\n \"\"\"The primary integration page for the application.\"\"\"\n configuration = {\n 'apiUrl': app.config['API_URL'],\n 'propertyNumber': get_property_number(flask_login.current_user.token),\n 'accessToken': flask_login.current_user.token,\n 'staff': flask_login.current_user.username == 'staff'\n }\n context = get_settings(configuration=configuration)\n context['refresh'] = datetime.datetime.now().strftime('%Y%m%d%H%M')\n return flask.render_template('integration.html', context=context)\n\n\n@app.route('/create', methods=['GET'])\n@flask_login.login_required\ndef create():\n \"\"\"An example of a lease create view only.\"\"\"\n configuration = {\n 'apiUrl': app.config['API_URL'],\n 'propertyNumber': get_property_number(flask_login.current_user.token),\n 'accessToken': flask_login.current_user.token,\n 'navigation': False,\n 'view': 'create',\n 'callBack': flask.url_for('callback', _external=True)\n }\n context = get_settings(configuration=configuration)\n context['refresh'] = datetime.datetime.now().strftime('%Y%m%d%H%M')\n return flask.render_template('integration.html', context=context)\n\n\n@app.route('/badcreate', methods=['GET'])\n@flask_login.login_required\ndef bad_create():\n \"\"\"An example of a lease create view only.\"\"\"\n configuration = {\n 'apiUrl': app.config['API_URL'],\n 'propertyNumber': get_property_number(flask_login.current_user.token),\n 'accessToken': flask_login.current_user.token,\n 'navigation': False,\n 'view': 'edit',\n 'leaseId': 0,\n 'callBack': flask.url_for('callback', _external=True)\n }\n context = get_settings(configuration=configuration)\n context['refresh'] = datetime.datetime.now().strftime('%Y%m%d%H%M')\n return flask.render_template('integration.html', context=context)\n\n\n@app.route('/select', methods=['GET', 'POST'])\n@flask_login.login_required\ndef select_lease():\n \"\"\"Input a lease id then if it exists redirect to integration.\"\"\"\n form = SelectLeaseForm(flask.request.form)\n message = None\n if flask.request.method == 'POST' and form.validate():\n return flask.redirect(flask.url_for('edit', lease_id=form.lease_id.data))\n return flask.render_template('select_lease.html', form=form, message=message)\n\n\n@app.route('/edit/', methods=['GET'])\n@flask_login.login_required\ndef edit(lease_id):\n \"\"\"An example of a lease edit view only.\"\"\"\n configuration = {\n 'apiUrl': app.config['API_URL'],\n 'propertyNumber': get_property_number(flask_login.current_user.token),\n 'accessToken': flask_login.current_user.token,\n 'navigation': False,\n 'view': 'edit',\n 'leaseId': lease_id,\n 'callBack': flask.url_for('callback', _external=True),\n 'origin': 'yardi',\n 'lockPopulatedFields': True,\n 'disableFieldToolTips': True,\n 'leaseData': {\n 'standard': {\n 'address': '123 Super Dr.'\n }\n }\n }\n context = get_settings(configuration=configuration)\n context['refresh'] = datetime.datetime.now().strftime('%Y%m%d%H%M')\n return flask.render_template('integration.html', context=context)\n\n\n@app.route('/callback', methods=['POST'])\n@flask_login.login_required\ndef callback():\n \"\"\"An example of a callback for lease submission.\"\"\"\n return flask.jsonify({'message': 'Success'})\n\n\n@app.route('/docs', methods=['GET'])\n@flask_login.login_required\ndef documentation():\n \"\"\"Documentation lease edit view only.\"\"\"\n configuration = {\n 'apiUrl': app.config['API_URL'],\n 'propertyNumber': get_property_number(flask_login.current_user.token),\n 'accessToken': 'TOKEN_GOES_HERE'\n }\n context = get_settings(configuration=configuration)\n context['create_view'] = {\n 'apiUrl': app.config['API_URL'],\n 'propertyNumber': get_property_number(flask_login.current_user.token),\n 'accessToken': 'TOKEN_GOES_HERE',\n 'navigation': False,\n 'view': 'create',\n 'callBack': flask.url_for('callback', _external=True),\n 'leaseData': None,\n }\n context['edit_view'] = {\n 'apiUrl': app.config['API_URL'],\n 'propertyNumber': get_property_number(flask_login.current_user.token),\n 'accessToken': 'TOKEN_GOES_HERE',\n 'navigation': False,\n 'view': 'edit',\n 'leaseId': 12345,\n 'callBack': flask.url_for('callback', _external=True),\n 'leaseData': None,\n }\n context['refresh'] = datetime.datetime.now().strftime('%Y%m%d%H%M')\n return flask.render_template('docs.html', context=context)\n\n\n@app.route('/logout', methods=['GET'])\n@flask_login.login_required\ndef logout():\n \"\"\"Log the user out.\"\"\"\n flask_login.logout_user()\n return flask.redirect(flask.url_for('login'))\n\n\n@login_manager.unauthorized_handler\ndef unauthorized_handler():\n \"\"\"Unauthorized handler.\"\"\"\n return flask.redirect(flask.url_for('login'))\n\nif __name__ == '__main__':\n # Only for debugging while developing\n app.jinja_env.auto_reload = True\n app.config['TEMPLATES_AUTO_RELOAD'] = True\n db.create_all()\n app.run(host='0.0.0.0', debug=True, port=80)\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"531572653","text":"# -*- coding: utf-8 -*-\nimport urllib\nimport pandas as pd\nimport json\nimport tushare as ts\nimport matplotlib.pyplot as plt\ndef getMarket(stockcode):\n if stockcode.startswith('6'):\n return 1\n else:\n return 2\ndef getBSType(dtype):\n if(dtype>0) :\n return '买盘'\n elif(dtype<0):\n return '卖盘'\n else:\n return '中性'\ndef getDealVol(stockcode,vol=300):\n market=getMarket(stockcode)\n urlraw='http://hqdigi2.eastmoney.com/EM_Quote2010NumericApplication/CompatiblePage.aspx?Type=OB&stk={0}{1}&Reference=xml&limit={2}&page={3}'\n url=urlraw.format(stockcode,market,vol,1) #第一页\n print(url)\n content=urllib.request.urlopen(url).read().decode('utf-8').replace(\"var jsTimeSharingData=\",\"\").replace(\";\",\"\")\n content=content.replace('pages','\"pages\"').replace('data','\"data\"')\n jsondata= json.loads(content)\n newdata=[]\n for i in jsondata['data']:\n tp=i.split(',')\n newdata.append(tp)\n start=2\n while start<=jsondata['pages']:\n url=urlraw.format(stockcode,market,vol,start) #第i页\n #print(url)\n content=urllib.request.urlopen(url).read().decode('utf-8').replace(\"var jsTimeSharingData=\",\"\").replace(\";\",\"\")\n content=content.replace('pages','\"pages\"').replace('data','\"data\"')\n jsondata= json.loads(content)\n for i in jsondata['data']:\n tp=i.split(',')\n newdata.append(tp)\n start=start+1\n newdata=pd.DataFrame(newdata)\n newdata.columns=['times','price','vol','dtype']\n newdata['vol']=newdata['vol'].astype(int)*100\n newdata['price']=pd.to_numeric(newdata['price'])#.astype('float64')\n newdata['dtype']=newdata['dtype'].astype('int')\n newdata['type']=newdata['dtype'].apply(lambda x:getBSType(x))\n newdata['amount']=newdata['vol']*newdata['price']\n return newdata\ndef getBigVol(df):\n dtype=[u'中性',u'买盘',u'卖盘']\n color=['b','r','g']\n df=df.sort_values(by='times')\n df.index=range(df.shape[0])\n gp= df.groupby(['type'])\n sumt=gp.sum()\n sumt['avgPrice']=sumt.amount/sumt.vol #成交均价\n print(sumt)\n plt.figure(figsize=(15,4));\n for i in range(3): \n #print(dtype[i])print(color[i])\n plt.bar(df[df['type']==dtype[i]].index, df[df['type']==dtype[i]].vol,alpha=0.7,color=color[i])\n plt.grid(True)\n plt.title(\"大单统计\")\n #plt.xticks(range(df.shape[0]),range(df.shape[0]))\n plt.margins(0)\n plt.show()\n #return sumt # 601005 \nstockcode='601005'\nvol=700\nop=getDealVol(stockcode,vol)\ngetBigVol(op)\n#today=ts.get_today_ticks(stockcode)\n#gp= today.groupby(['type'])\n#sumt=gp.sum()\n#sumt['avgPrice']=sumt.amount/sumt.volume #成交均价\n#print(sumt)","sub_path":"stock/eastmoney/个股当天各笔成交统计.py","file_name":"个股当天各笔成交统计.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"215061184","text":"#chapter 4 exercise\n#create fibonacci sequence and stop at the first number greater than 100\n#Fn = Fn-2+Fn-1, inital values F0=0 and F1=1\n\nFn = 0\nF0 = 0\nF1 = 1\ncheck = False\n\nwhile True:\n\tFn = F0 + F1\n\tF1 = F0\n\tF0 = Fn\n\tif(Fn > 100):\n\t\tcheck = True\n\t\t#print('check true')\n\telse:\n\t\tcheck = False\n\tprint(Fn)\n\tif(check == True):\n\t\t#print('break')\n\t\tbreak\n\t\n\n\t\n","sub_path":"chapter4Ex.py","file_name":"chapter4Ex.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"447744695","text":"diccionario = {\r\n\t\"clave\":\"valor\",\r\n\t\"clave_2\":\"valor_2\",\r\n\t\"clave_3\":\"valor_3\"\r\n}\t# Se le asigna un nombre clave a un valor para poder llamarlo más adelante\r\nprint(type(diccionario))\r\n\r\nprint(diccionario[\"clave_2\"])\t# Esta es la forma de llamar a un valor dentro\r\n# de un diccionario\r\n\r\ndiccionario[\"clave_4\"]=\"valor_4\" # Añadiendo un nuevo par clave:valor\r\n\r\nprint(diccionario)\r\n\r\n# No pueden haber dos claves iguales, en cambio python sobreescribe\r\n\r\ndiccionario[\"clave_4\"]=\"nuevo_valor_4\"\t# Reasignando valores\r\nprint(diccionario[\"clave_4\"])\r\n\r\ndel diccionario[\"clave_4\"]\t# Eliminando un elemento del diccionario\r\n# palabra reservada \"del\"\r\nprint(diccionario)\r\n\r\ntupla = (1,2,3,4)\t# Puedes usar tanto listas como tuplas para asignar\r\n# claves a los valores dentro del diccionario\r\ndiccionario = {\r\n\ttupla[0]: \"Uno\",\r\n\ttupla[1]: \"Dos\",\r\n\ttupla[2]: \"Tres\",\r\n\ttupla[3]: \"Cuatro\"\r\n}\r\nprint(diccionario[1])\r\n# o\r\nprint(diccionario[tupla[0]])\r\n\r\ndiccionarioPrincipal = {\r\n\t\"diccionario\":{\r\n\t\t1\t:\t[1,2,3,4,5]\r\n\t}\r\n}\t# Almacenando un diccionario dentro de otro diccionario\r\nprint(diccionarioPrincipal[\"diccionario\"])\r\n\r\nprint(diccionario.keys())\t# Imprimiento claves\r\nprint(diccionario.values())\t# Imprimiendo valores\r\nprint(len(diccionario))\t# Numero de pares dentro del diccionario","sub_path":"3. Síntaxis básica/diccionarios.py","file_name":"diccionarios.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"626938348","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/philewels/GitHub/MultiQC/multiqc/modules/deeptools/plotProfile.py\n# Compiled at: 2019-11-15 10:00:11\n# Size of source mod 2**32: 4897 bytes\n\"\"\" MultiQC submodule to parse output from deepTools plotProfile \"\"\"\nimport logging\nfrom multiqc.plots import linegraph\nlog = logging.getLogger(__name__)\n\nclass plotProfileMixin:\n\n def parse_plotProfile(self):\n \"\"\"Find plotProfile output\"\"\"\n self.deeptools_plotProfile = dict()\n for f in self.find_log_files('deeptools/plotProfile', filehandles=False):\n parsed_data, bin_labels, converted_bin_labels = self.parsePlotProfileData(f)\n for k, v in parsed_data.items():\n if k in self.deeptools_plotProfile:\n log.warning('Replacing duplicate sample {}.'.format(k))\n self.deeptools_plotProfile[k] = v\n\n if len(parsed_data) > 0:\n self.add_data_source(f, section='plotProfile')\n\n if len(self.deeptools_plotProfile) > 0:\n xPlotBands = []\n xPlotLines = []\n plotBandHelp = ''\n try:\n xPlotBands.append({'from':converted_bin_labels[bin_labels.index('TES')], 'to':converted_bin_labels[-1], 'color':'#f7cfcf'})\n xPlotBands.append({'from':converted_bin_labels[bin_labels.index('TSS')], 'to':converted_bin_labels[bin_labels.index('TES')], 'color':'#ffffe2'})\n xPlotBands.append({'from':converted_bin_labels[0], 'to':converted_bin_labels[bin_labels.index('TSS')], 'color':'#e5fce0'})\n xPlotLines.append({'width':1, 'value':converted_bin_labels[bin_labels.index('TES')], 'dashStyle':'Dash', 'color':'#000000'})\n xPlotLines.append({'width':1, 'value':converted_bin_labels[bin_labels.index('TSS')], 'dashStyle':'Dash', 'color':'#000000'})\n plotBandHelp = '\\n * Green: {} upstream of gene to {}\\n * Yellow: {} to {}\\n * Pink: {} to {} downstream of gene\\n '.format(list(filter(None, bin_labels))[0], list(filter(None, bin_labels))[1], list(filter(None, bin_labels))[1], list(filter(None, bin_labels))[2], list(filter(None, bin_labels))[2], list(filter(None, bin_labels))[3])\n except ValueError:\n pass\n\n config = {'id':'read_distribution_profile', \n 'title':'deeptools: Read Distribution Profile after Annotation', \n 'ylab':'Occurrence', \n 'xlab':None, \n 'smooth_points':100, \n 'xPlotBands':xPlotBands, \n 'xPlotLines':xPlotLines}\n self.add_section(name='Read Distribution Profile after Annotation',\n anchor='read_distribution_profile_plot',\n description=('\\n Accumulated view of the distribution of sequence reads related to the closest annotated gene.\\n All annotated genes have been normalized to the same size.\\n\\n {}'.format(plotBandHelp)),\n plot=(linegraph.plot(self.deeptools_plotProfile, config)))\n return len(self.deeptools_plotProfile)\n\n def parsePlotProfileData(self, f):\n d = dict()\n bin_labels = []\n bins = []\n for line in f['f'].splitlines():\n cols = line.rstrip().split('\\t')\n if cols[0] == 'bin labels':\n for col in cols[2:len(cols)]:\n if col not in list(filter(None, bin_labels)):\n bin_labels.append(col)\n else:\n break\n\n elif cols[0] == 'bins':\n for col in cols[2:len(cols)]:\n if len(bins) != len(bin_labels):\n bins.append(self._int(col))\n else:\n break\n\n else:\n s_name = self.clean_s_name(cols[0], f['root'])\n d[s_name] = dict()\n factors = {'Kb':1000.0, \n 'Mb':1000000.0, 'Gb':1000000000.0}\n convert_factor = 1\n for k, v in factors.items():\n if k in bin_labels[0]:\n convert_factor *= v\n start = float(bin_labels[0].strip(k)) * convert_factor\n\n step = self._int(abs(start / bin_labels.index('TSS')))\n end = step * (len(bin_labels) - bin_labels.index('TSS') - 1)\n converted_bin_labels = range(self._int(start) + step, self._int(end) + step, step)\n for i in bins:\n d[s_name].update({converted_bin_labels[(i - 1)]: float(cols[(i + 1)])})\n\n return (\n d, bin_labels, converted_bin_labels)","sub_path":"pycfiles/multiqc-1.8.tar/plotProfile.cpython-37.py","file_name":"plotProfile.cpython-37.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"213611245","text":"from flask_restful import Resource, reqparse\nfrom flask import abort, request\nfrom werkzeug.datastructures import FileStorage\n\nfrom cloudinary.uploader import upload\nfrom cloudinary.utils import cloudinary_url\n\nfrom .models import Post\nfrom .app import db, app\nfrom datetime import timedelta, datetime\nfrom uuid import uuid4\nfrom .models import Anonymous, Token, User\nimport jwt\n\nfrom werkzeug.security import check_password_hash\nfrom functools import wraps\nimport os\nimport base64\nimport cryptography\nfrom cryptography.fernet import Fernet\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC\nfrom cryptography.hazmat.backends import default_backend\nfrom dotenv import load_dotenv\nload_dotenv()\n\n# TODO: delete view, update view\n\nkdf = PBKDF2HMAC(\n hashes.SHA512(),\n 32,\n os.environ.get('SALT').encode(),\n 500000,\n default_backend()\n)\npassword = os.environ.get('ENCRYPT_KEY')\nKEY = base64.urlsafe_b64encode(kdf.derive(password.encode()))\n\nFR = Fernet(KEY)\n# print(os.environ.get('ENCRYPT_KEY'))\n\n\ndef admin_only(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = None\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if not token:\n return abort(401, message=\"Token is missing\")\n try:\n data = jwt.decode(token, app.config['SECRET_KEY'])\n # know that the current logged in user is using a token that is in the token table.\n user_obj = User.query.filter_by(public_id=data['public_id']).first()\n if user_obj.token is None and not user_obj.is_admin:\n return abort(401, message=\"You should login as admin\")\n except Exception as e:\n return abort(401, message=\"Token is invalid, {}\".format(e))\n return f(*args, **kwargs)\n\n return decorated\n\n\nclass PostList(Resource):\n def __init__(self):\n self.parse = reqparse.RequestParser()\n self.token = None\n\n def detail(self, qs):\n object = {\n 'title': qs.title,\n 'id': qs.public_id,\n 'subtitle': qs.subtitle,\n 'update': str(qs.updated),\n 'content': qs.content,\n 'archive': qs.archive,\n 'draft': qs.draft,\n 'image_url': qs.image_url\n }\n return {\"article\": [object]}\n\n def list(self, qs, articles):\n for i in qs:\n articles.append({\n 'title': i.title,\n 'id': i.public_id,\n 'subtitle': i.subtitle,\n 'update': str(i.updated),\n 'draft': i.draft,\n 'archive': i.archive,\n 'content': i.content,\n 'image_url': i.image_url\n })\n return {'list': articles[:8]}\n\n def get(self, path=None):\n if 'x-access-token' in request.headers:\n self.token = request.headers['x-access-token']\n if self.token is None:\n # Detail View\n if path is not None:\n qs = Post.query.filter_by(public_id=str(path)).first()\n return self.detail(qs)\n # ListView\n qs = Post.query.order_by(Post.updated.desc()).filter_by(draft=False).filter_by(archive=False).all()\n articles = []\n return self.list(qs, articles)\n else:\n # Detail View\n if path is not None:\n qs = Post.query.filter_by(public_id=str(path)).first()\n return self.detail(qs)\n # ListView\n qs = Post.query.order_by(Post.updated.desc()).all()\n articles = []\n return self.list(qs, articles)\n\n def post(self, path=None):\n if 'x-access-token' in request.headers:\n self.token = request.headers['x-access-token']\n if self.token is None: return {\"error\": \"login as admin\"}, 401\n\n if path is not None:\n qs = Post.query.filter_by(public_id=str(path)).first()\n if qs is not None:\n qs.draft = False\n qs.publish = datetime.utcnow()\n db.session.commit()\n else:\n return {\"error\": \"article doesnt exist\"}, 400\n return {\"message\": \"successfully published\"}\n self.parse.add_argument('file', type=FileStorage, location='files')\n self.parse.add_argument('title')\n self.parse.add_argument('subtitle')\n self.parse.add_argument('content')\n args = self.parse.parse_args()\n\n # Image\n image_file = args['file']\n\n # print(image_file)\n upload_result = upload(image_file, folder='Blog')\n # print('This is upload result ' + str(upload_result))\n\n (image_url, options) = cloudinary_url(upload_result['public_id'])\n # print('Image url = ' + str(image_url))\n\n # Store this info in the database (self, title, subtitle, content, image_url):\n article = Post(args['title'], args['subtitle'], args['content'], str(image_url), upload_result['public_id'])\n db.session.add(article)\n db.session.commit()\n return {'message': 'Article was successfully posted'}\n\n def put(self, path=None):\n if path is None: return abort(400, error=\"no response from the server\")\n if 'x-access-token' in request.headers:\n self.token = request.headers['x-access-token']\n if self.token is None: return {\"error\": \"login as admin\"}, 401\n\n self.parse.add_argument('title')\n self.parse.add_argument('subtitle')\n self.parse.add_argument('content')\n self.parse.add_argument('file', type=FileStorage, location='files')\n\n args = self.parse.parse_args()\n\n article = Post.query.filter_by(public_id=str(path)).first()\n\n # Image\n image_file = args['file']\n if image_file is not None:\n upload_result = upload(image_file, folder='Blog')\n (image_url, options) = cloudinary_url(upload_result['public_id'])\n\n article.image_url = image_url\n article.image_id = upload_result['public_id']\n\n article.title = args['title']\n article.subtitle = args['subtitle']\n article.content = args['content']\n\n db.session.commit()\n return {\"message\": \"Update was successful\"}\n\n def delete(self, path=None):\n if path is None: return abort(400, error=\"no response from the server\")\n if 'x-access-token' in request.headers:\n self.token = request.headers['x-access-token']\n if self.token is None: return {\"error\": \"login as admin\"}, 401\n qs = Post.query.filter_by(public_id=str(path)).first()\n if qs is not None:\n qs.archive = True\n db.session.commit()\n else:\n return {\"error\": \"article doesnt exist\"}, 400\n return {\"message\": \"Archived Successfully\"}\n\n\n\nclass AnonymousView(Resource):\n\n def __init__(self):\n self.parser = reqparse.RequestParser()\n self.time_to_exp = timedelta(hours=24)\n\n def post(self):\n self.parser.add_argument('session', type=dict, help=\"session validation\")\n args = self.parser.parse_args()\n session = args['session']\n if session is not None:\n session_id, session_token = session.get(\"session_id\", None), session.get(\"session_token\", None)\n if session_id is not None or session_token is not None:\n user_session_id = Anonymous.query.filter_by(session_id=session_id).first()\n user_session_token = Anonymous.query.filter_by(session_token=session_token).first()\n if user_session_token is not None:\n session_id = user_session_token.session_id\n session_token = user_session_token.session_token\n user_session_token.last_login = datetime.utcnow()\n db.session.add(user_session_token)\n db.session.commit()\n return {\"session\": {\"session_id\": session_id, \"session_token\": session_token}}\n elif user_session_id is not None:\n session_id = user_session_id.session_id\n session_token = user_session_id.session_token\n user_session_id.last_login = datetime.utcnow()\n db.session.add(user_session_id)\n db.session.commit()\n return {\"session\": {\"session_id\": session_id, \"session_token\": session_token}}\n time_exp = datetime.utcnow() + self.time_to_exp\n session_id = str(uuid4())\n token = jwt.encode({'session_id': session_id, 'exp': time_exp}, app.config['SECRET_KEY'], 'HS512')\n db.session.add(Anonymous(token.decode('UTF-8'), session_id))\n db.session.commit()\n return {\"session\": {\"session_id\": session_id, \"session_token\": token.decode('UTF-8')}}\n\n\n# class PublishArticle(Resource):\n# pass\n\n\nclass Login(Resource):\n def __init__(self):\n self.parser = reqparse.RequestParser()\n self.time_to_exp = timedelta(hours=24)\n\n def post(self):\n self.parser.add_argument('login', type=dict, help=\"login credentials are needed\")\n args = self.parser.parse_args()\n login = args['login']\n if login is not None:\n # required fields is email and password\n email = login.get(\"email\", None)\n if email is None or len(email) <= 7:\n return abort(400, error=\"email is invalid or empty\")\n if \"@\" not in email or \".\" not in email:\n return {\"error\": \"enter a valid email\"}, 400\n password = login.get(\"password\", None)\n if password is None or len(password) <= 7:\n return {\"error\": \"Invalid login credentials\"}, 401\n qs = User.query.filter_by(email=email).first()\n if qs is None:\n return {'error': \"Invalid login credentials\"}, 401\n elif qs is not None:\n if check_password_hash(qs.password, password):\n time_exp = datetime.utcnow() + self.time_to_exp\n token = jwt.encode({'public_id': qs.public_id, 'exp': time_exp}, app.config['SECRET_KEY'], 'HS512')\n # check whether the current user is in the token table\n qs.last_login = datetime.utcnow()\n db.session.add(qs)\n user = qs.token\n if user is None:\n # Add the token to the token table\n db.session.add(Token(token.decode('UTF-8'), qs.id))\n db.session.commit()\n else:\n user.token = token.decode('UTF-8')\n user.expiration = time_exp\n db.session.commit()\n encrypted_token = FR.encrypt(token)\n return {'Token': encrypted_token.decode(\"UTF-8\")}\n else:\n return {\"error\": \"Invalid login credentials\"}, 401\n else:\n return abort(401, error=\"Invalid login credentials\")\n\n\nclass VerifyToken(Resource):\n def __init__(self):\n self.token = None\n self.encrypted_token = None\n\n def post(self):\n if 'x-access-token' in request.headers:\n self.encrypted_token = request.headers['x-access-token']\n if self.encrypted_token is not None:\n try:\n self.token = FR.decrypt(self.encrypted_token.encode())\n except cryptography.fernet.InvalidToken:\n return {\"error\": \"Invalid Token\"}, 401\n try:\n data = jwt.decode(self.token.decode(\"UTF-8\"), app.config['SECRET_KEY'], algorithms=['HS512', 'PS512'])\n # know that the current logged in user is using a token that is in the token table.\n user_obj = User.query.filter_by(public_id=data['public_id']).first()\n\n if user_obj.token is None and not user_obj.admin:\n raise Exception(\"You should login as admin\")\n if user_obj.token.token != self.token.decode(\"UTF-8\"):\n raise Exception(\"login again\")\n except jwt.exceptions.DecodeError:\n return {\"error\": \"Invalid Token\"}, 401\n except jwt.exceptions.ExpiredSignatureError:\n return {\"error\": \"Invalid Token\"}, 401\n except Exception as e:\n return {\"error\": \"Token is invalid, {}\".format(e)}, 401\n encrypted_token = FR.encrypt(self.token)\n return {'Token': encrypted_token.decode(\"UTF-8\")}\n \n\nclass Logout(Resource):\n def post(self):\n token = None\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if token is None:\n return {\"error\": \"Only Logged in Users can logout\"}, 400\n\n try:\n data = jwt.decode(token, app.config['SECRET_KEY'], algorithms=['HS512', 'PS512'])\n # know that the current logged in user is using a token that is in the token table.\n user_obj = User.query.filter_by(public_id=data['public_id']).first()\n if user_obj.token is None:\n return {\"error\": \"Only Logged in Users can logout\"}, 401\n db.session.delete(user_obj.token)\n db.session.commit()\n except Exception as e:\n return {\"error\": \"Something is wrong, {}\".format(e)}, 400\n return {'message': \"logged out successfully\"}\n","sub_path":"Flask/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"421628825","text":"# https://oj.leetcode.com/problems/intersection-of-two-linked-lists/\n\n# Write a program to find the node at which the intersection of two singly linked lists begins.\n\n# For example, the following two linked lists:\n\n# A: a1 → a2\n# ↘\n# c1 → c2 → c3\n# ↗ \n# B: b1 → b2 → b3\n\n# begin to intersect at node c1.\n\n# Notes:\n\n# If the two linked lists have no intersection at all, return null.\n# The linked lists must retain their original structure after the function returns.\n# You may assume there are no cycles anywhere in the entire linked structure.\n# Your code should preferably run in O(n) time and use only O(1) memory.\n\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\nclass Solution:\n # @param two ListNodes\n # @return the intersected ListNode\n def getIntersectionNode(self, headA, headB):\n la = Solution.getLength(headA)\n lb = Solution.getLength(headB)\n if la > lb:\n headA = Solution.skipNodes(headA, la-lb)\n else:\n headB = Solution.skipNodes(headB, lb-la)\n\n while (headA != None and headB != None):\n if headA == headB:\n return headA\n headA = headA.next\n headB = headB.next\n return None\n\n @staticmethod\n def getLength(head):\n count = 0\n while head != None:\n count += 1\n head = head.next\n return count\n\n @staticmethod\n def skipNodes(head, steps):\n while steps > 0:\n head = head.next\n steps -= 1\n return head\n","sub_path":"160_IntersectionOfTwoLinkedList/IntersectionLiknedList.py","file_name":"IntersectionLiknedList.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"451596060","text":"string = input()\nnum = int(input())\n\ndef TextFormatPrint(string, num):\n words = string.split() #rozdeleni stringu na slova\n line = \"\"\n for word in words:\n if len(word) >= num: #vyjimka pro dlouha slova\n if line != \"\":\n print(line)\n line = \"\"\n print(word)\n elif len(word) + len(line) > num - 1:\n print(line)\n line = word\n else:\n if line == \"\":\n line = word \n else:\n line += \" \" + word\n if line != \"\":\n print(line)\n\nTextFormatPrint(string, num)\n\n","sub_path":"text_formatting.py","file_name":"text_formatting.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"291908370","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login, logout\nfrom .forms import SignUpForm, UserUpdateForm, ProfileUpdateForm, NewHoodForm, EditHoodForm, NewBizForm, NewPostForm\nfrom .models import Profile, Neighbourhood, Business, Post\n\n\n# Create your views here.\ndef home(request):\n return render(request, 'home.html')\n\n\ndef register(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=password)\n login(request, user)\n return redirect('home')\n else:\n form = SignUpForm()\n return render(request, 'registration/registration_form.html', {'form': form})\n\n\n@login_required(login_url='/accounts/login/')\ndef profile(request):\n if request.method == 'POST':\n user_form = UserUpdateForm(request.POST, instance=request.user)\n profile_form = ProfileUpdateForm(\n request.POST, request.FILES, instance=request.user.profile)\n\n if user_form.is_valid and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n messages.success(\n request, f'Your account has been updated successfully!')\n return redirect('profile')\n else:\n user_form = UserUpdateForm(instance=request.user)\n profile_form = ProfileUpdateForm(instance=request.user.profile)\n context = {\n 'user_form': user_form,\n 'profile_form': profile_form\n }\n return render(request, 'profile.html', context)\n\n\n@login_required(login_url='/accounts/login/')\ndef update_profile(request):\n current_user = request.user\n if request.method == 'POST':\n\n user_form = UserUpdateForm(request.POST, instance=request.user)\n profile_form = ProfileUpdateForm(\n request.POST, request.FILES, instance=request.user)\n\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n\n return redirect('profile')\n\n else:\n user_form = UserUpdateForm(instance=request.user)\n profile_form = ProfileUpdateForm(instance=request.user)\n\n context = {\n 'user_form': user_form,\n 'profile_form': profile_form\n\n }\n\n return render(request, 'update_profile.html', context)\n\n\n@login_required(login_url='/accounts/login/')\ndef hood(request):\n hoods = Neighbourhood.objects.all()\n return render(request, 'neighbourhoods.html', {\"hoods\": hoods})\n\n\n@login_required(login_url='/accounts/login/')\ndef new_hood(request):\n current_user = request.user\n if request.method == 'POST':\n form = NewHoodForm(request.POST, request.FILES)\n if form.is_valid():\n image = form.save(commit=False)\n image.admin = current_user.profile\n\n image.save()\n\n return redirect('hood')\n\n else:\n form = NewHoodForm()\n return render(request, 'new_hood.html', {\"form\": form})\n\n\ndef edit_hood(request):\n current_user = request.user\n if request.method == 'POST':\n form = EditHoodForm(request.POST, request.FILES, instance=request.user)\n if form.is_valid():\n image = form.save(commit=False)\n image.admin = current_user.profile\n\n image.save()\n return redirect('hood')\n\n else:\n form = EditHoodForm()\n return render(request, 'edit_hood.html', {'form': form})\n\n\ndef joinhood(request, id):\n hood = get_object_or_404(Neighbourhood, id=id)\n request.user.profile.neighbourhood = hood\n request.user.profile.save()\n return redirect('hood')\n\n\ndef leavehood(request, id):\n hood = get_object_or_404(Neighbourhood, id=id)\n request.user.profile.neighbourhood = None\n request.user.profile.save()\n return redirect('hood')\n\n\n@login_required(login_url='/accounts/login/')\ndef singlehood(request, id):\n hood = Neighbourhood.objects.get(id=id)\n return render(request, 'singlehood.html', {'hood':hood})\n\n\n@login_required(login_url='/accounts/login/')\ndef businesses(request, id):\n business = Business. hood_biz(id=id)\n return render(request, 'business.html', {'business': business})\n\n\n@login_required(login_url='/accounts/login/')\ndef newbiz(request):\n current_user = request.user\n if request.method == 'POST':\n form = NewBizForm(request.POST, request.FILES)\n if form.is_valid():\n business = form.save(commit=False)\n business.user = current_user\n\n business.save()\n\n return redirect('hood')\n\n else:\n form = NewBizForm()\n return render(request, 'newbiz.html', {\"form\": form})\n\n\n@login_required(login_url='/accounts/login/')\ndef posthood(request, id):\n post = Post.hood_post(id=id)\n return render(request, 'hoodpost.html', {'post': post})\n\n\n@login_required(login_url='/accounts/login/')\ndef post(request):\n current_user = request.user\n if request.method == 'POST':\n form = NewPostForm(request.POST, request.FILES)\n if form.is_valid():\n post = form.save(commit=False)\n post.user = current_user\n\n post.save()\n\n return redirect('hood')\n\n else:\n form = NewPostForm()\n return render(request, 'post.html', {\"form\": form})\n","sub_path":"neibour/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"621813897","text":"from flaskgolf.models import User, Teams, RouteTracking\nfrom flask import render_template, url_for, flash, redirect, request\nfrom flaskgolf import application, db, bcrypt, current_tourney_id\nfrom flaskgolf.forms import RegistrationForm, LoginForm\nfrom flask_login import login_user, current_user, logout_user, login_required\nfrom flaskgolf.data_loads import load_leaderboard, load_round_results, load_scoreboard\nfrom flaskgolf.data_pulls import (users_team, scoreboard_pull,leaderboard_pull,\n pull_rds_data, pull_tourney_info, tiger_results,\n players_picked, dict_players_picked_by_team,\n pull_available_players, pull_seconds_since_refresh,\n pull_db_last_refresh, all_users_teams,\n pull_teams_in_current, cut_players_score)\n\n\n@application.route(\"/\")\n@application.route(\"/home\")\ndef home():\n '''\n if current_user.is_authenticated:\n df_team_results = users_team(current_user.id)\n print(df_team_results)\n else:\n df_team_results = pd.DataFrame()\n '''\n\n try:\n tracker = RouteTracking(user_id=current_user.id, route='home')\n db.session.add(tracker)\n db.session.commit()\n except:\n print(\"No User\")\n return render_template('home.html', title='Home')\n\n\n@application.errorhandler(404)\ndef error_404(e):\n # note that we set the 404 status explicitly\n return render_template('404.html'), 404\n\n\n@application.errorhandler(403)\ndef error_403(e):\n return render_template('403.html'), 403\n\n\n@application.errorhandler(500)\ndef error_500(e):\n return render_template('500.html'), 500\n\n\n@application.route(\"/scoreboard\")\n@login_required\ndef scoreboard():\n\n try:\n tracker = RouteTracking(user_id=current_user.id, route='scoreboard')\n db.session.add(tracker)\n db.session.commit()\n except:\n print(\"No User\")\n\n df_sb = scoreboard_pull()\n df_players = players_picked(current_user.id)\n team_dict = dict_players_picked_by_team()\n df_sb_teams = pull_rds_data(f'''SELECT a.*, b.username, b.game_score, b.rank, b.new_rank\n FROM teams a LEFT JOIN scoreboard b on a.id=b.id\n WHERE tourney_id = {current_tourney_id}\n ''')\n\n df_all_users = all_users_teams()\n cut_score = cut_players_score()\n db_seconds = pull_seconds_since_refresh()\n db_seconds = db_seconds - 240\n\n # Hopefully don't need to load anymore on the route\n if db_seconds > 300:\n load_round_results()\n load_leaderboard()\n load_scoreboard()\n db_seconds = pull_seconds_since_refresh()\n db_seconds = db_seconds - 240\n\n db_minutes = int(db_seconds / 60.0)\n\n if db_minutes < 1:\n data_as_of = \"Updated <1 minute ago\"\n else:\n data_as_of = f\"Updated {db_minutes} minute(s) ago\"\n\n current_tourney_info = pull_tourney_info()\n tourney_status = current_tourney_info['tourney_status'][0]\n if tourney_status == 'Scheduled':\n return render_template('scoreboard_waiting.html',\n title='Scoreboard',\n scoreboard=df_sb,\n df_players=df_players)\n\n else:\n return render_template('scoreboard.html',\n title='Scoreboard',\n team_dict=team_dict,\n scoreboard=df_sb,\n df_players=df_players,\n df_sb_teams=df_sb_teams,\n df_all_users=df_all_users,\n data_as_of=data_as_of,\n cut_score=cut_score)\n\n\n@application.route(\"/how_to_play\")\ndef how_to_play():\n try:\n tracker = RouteTracking(user_id=current_user.id, route='how_to_play')\n db.session.add(tracker)\n db.session.commit()\n except:\n print(\"No User\")\n return render_template('how_to_play.html', title='How To Play')\n\n\n@login_required\n@application.route(\"/kingofdonks\")\ndef king_of_donks():\n\n df_tracking = pull_rds_data('''SELECT user_id,route, count(*) as views FROM route_tracking\n GROUP BY 1,2\n ORDER BY 3 DESC''')\n\n df_users = pull_rds_data('''SELECT b.username, count(*) as views FROM route_tracking a\n LEFT JOIN user b ON a.user_id=b.id\n GROUP BY 1\n ORDER BY 2 DESC''')\n\n if current_user.id == 28:\n teams = pull_teams_in_current()\n return render_template('kingofdonks.html', title=\"King O' Donks\",\n teams=teams, df_tracking=df_tracking,\n df_users=df_users)\n\n\n@application.route('/teams/')\ndef teams(user_id):\n df_team_results = users_team(user_id)\n return render_template('user_team.html', title='Donkey Team',\n user_id=user_id, team=df_team_results)\n\n\n@application.route('/player/')\ndef player_info(player_id):\n\n player_df = pull_rds_data(f'SELECT * FROM golferinfo WHERE player_id = {player_id}')\n return render_template('player_profile.html', title='Look at this donk',\n player_id=player_id, player_df=player_df)\n\n\n@application.route(\"/my_team\", methods=['GET', 'POST'])\n@login_required\ndef my_team():\n try:\n tracker = RouteTracking(user_id=current_user.id, route='my_team')\n db.session.add(tracker)\n db.session.commit()\n except:\n print(\"No user\")\n\n # Pull users team\n df_team_results = users_team(current_user.id)\n\n # Pull in current tourney info\n current_tourney_info = pull_tourney_info()\n tourney_status = current_tourney_info['tourney_status'][0]\n\n lb_df = pull_available_players()\n\n # If they have a team, take them to their team\n if not df_team_results.empty:\n return render_template('user_team.html', title='My Team',\n user_id=current_user.id, team=df_team_results)\n\n # If the tourney is anything other than Scheduled - don't let them draft\n elif tourney_status != 'Scheduled':\n flash('Sorry - tourney has already started!', 'danger')\n return render_template('home.html', title='Home')\n\n # Else - let them draft!\n else:\n if request.method == 'POST':\n team_list = request.form.getlist('team_list')\n print(f'TEAM LIST: {team_list}')\n grouper = lb_df.loc[lb_df['player_id'].isin(team_list)].groupby(['tier'])['player_name'].count().reset_index()\n tier_dict = dict(zip(grouper.tier, grouper.player_name))\n tier_1 = tier_dict.get('Tier 1', 0)\n tier_2 = tier_dict.get('Tier 2', 0)\n\n # Make sure they pick 3 people from each tier\n if tier_1 == 3 and tier_2 == 3:\n print('Clutch')\n print(team_list)\n try:\n for golfer in team_list:\n entry = Teams(id=current_user.id,\n tourney_id=current_tourney_id,\n golfer=golfer)\n db.session.add(entry)\n db.session.commit()\n flash('Congrats - you have selected a team!', 'success')\n except Exception as e:\n flash(\"Uh Oh - Weird Error\", 'danger')\n flash(f'{e}', 'info')\n else:\n flash('Pick exactly 3 from each tier, DUMMY!', 'danger')\n return render_template('select_team_v2.html',\n title='My Team',\n leaderboard=lb_df)\n\n return redirect(url_for('scoreboard'))\n else:\n print(request.method)\n\n return render_template('select_team_v2.html',\n title='My Team',\n leaderboard=lb_df)\n\n\n@application.route(\"/register\", methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n lower_email = form.email.data.lower()\n user = User(username=form.username.data, email=lower_email,\n password=hashed_password)\n db.session.add(user)\n db.session.commit()\n\n flash(f'Your account has been created. Booooom!', 'success')\n login_user(user)\n return redirect(url_for('home'))\n return render_template('register.html', title='Register', form=form)\n\n\n@application.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data.lower()).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n\n # If user tries to access page and gets redirected,\n # we want to re-direct them back to where they wanted to go.\n next_page = request.args.get('next')\n flash(f'WADDUP, {user.username}', 'success')\n return redirect(next_page) if next_page else redirect(url_for('home'))\n else:\n flash('Login Unsuccessful. Please check email and password', 'danger')\n return render_template('login.html', title='Login', form=form)\n\n\n@application.route(\"/logout\")\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('home'))\n\n\n@application.route(\"/account\")\n@login_required\ndef account():\n try:\n tracker = RouteTracking(user_id=current_user.id, route='account')\n db.session.add(tracker)\n db.session.commit()\n except:\n print(\"No user\")\n return render_template('account.html', title='Account')\n\n\n@application.route(\"/tourney_leaderboard\")\n@login_required\ndef tourney_leaderboard():\n try:\n tracker = RouteTracking(user_id=current_user.id, route='tourney_leaderboard')\n db.session.add(tracker)\n db.session.commit()\n except:\n print(\"No user\")\n # If data hasn't been loaded in 5 minutes, then pull refresh\n db_seconds = pull_seconds_since_refresh()\n if db_seconds > 300:\n load_round_results()\n load_leaderboard()\n\n db_minutes = int(db_seconds / 60.0)\n\n if db_minutes < 1:\n data_as_of = \"Updated <1 minute ago\"\n else:\n data_as_of = f\"Updated {db_minutes} minute(s) ago\"\n\n df_tourney = leaderboard_pull(user_id=current_user.id)\n df_info = pull_tourney_info()\n df_tiger = tiger_results(user_id=current_user.id)\n\n return render_template('tourney_leaderboard.html',\n title='Tourney Leaderboard',\n leaderboard=df_tourney,\n df_info=df_info,\n df_tiger=df_tiger,\n data_as_of=data_as_of)\n","sub_path":"flaskgolf/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":11315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"367322584","text":"#!/usr/bin/env python\n\nimport codecs\nimport feedparser\nimport json\nimport requests\nimport sys\nimport time\n\n\nBASE_URL = \"http://collections.ushmm.org/search/\" + \\\n \"?f%5Bfnd_type%5D%5B%5D=oh_transcript&\" + \\\n \"f%5Blanguage_facet%5D%5B%5D=English&format=atom\"\nMAX_REQUESTS = 1000\nSLEEP_SECONDS = 0.1\n\n\nif __name__ == '__main__':\n page_counter = 0\n item_counter = 0\n url = BASE_URL\n with codecs.open('transcripts.json', 'w', 'utf-8') as fp:\n while url:\n f = feedparser.parse(url)\n for e in f.entries:\n print(e.id, e.title)\n r = requests.get(e.id + '.json')\n j = r.json()\n fp.write(json.dumps(j['response']['document']) + '\\n')\n item_counter += 1\n time.sleep(SLEEP_SECONDS)\n next_links = [x['href'] for x in f.feed.links\n if x['rel'] == 'next']\n if next_links:\n url = next_links[0]\n else:\n url = ''\n page_counter += 1\n if page_counter >= MAX_REQUESTS:\n sys.exit(0)\n print('Fetched %s page(s), %s item(s)' % (page_counter,\n item_counter))\n","sub_path":"fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"622774328","text":"# ::File processing\nmyfile = open(\"files/fruits.txt\")\ncontent = myfile.read()\nmyfile.close()\nprint(content)\nprint(content)\n\nbear_file = open('files/bear.txt')\nbear_content = bear_file.read()\nprint(bear_content[:90])\n\n\n# Function that finds the number of characters in a file\ndef char_filepath(character, filepath):\n f = open(filepath)\n fc = f.read()\n return fc.count(character)\n\n\nprint(char_filepath('T', \"files/bear.txt\"))\n\n# `with` context manager\nwith open('files/bear.txt') as file2:\n file2_content = file2.read()\n\nprint(file2_content.count('bear'))\n\n# Writing text to a file\nwith open('files/veggies.txt', 'w') as veggies_file:\n veggies_file.write('Peas\\nOnion\\nCarrot\\nLeaks')\n\n# Append to a file\nwith open('files/append.txt', 'a') as append_file:\n append_file.write('One\\nTwo\\nThree\\n')\n\nwith open('files/append.txt', 'a+') as append_file:\n append_file.write('Four\\nFive\\n') # Cursor will rest at the end of the file\n append_file.seek(0) # Move cursor in file to beginning of file\n content = append_file.read()\n\nprint(content)\n","sub_path":"file_processing.py","file_name":"file_processing.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"430389997","text":"import discord\nimport aiohttp\nimport logging\nimport re\n\nfrom redbot.core import commands, checks, Config\n\nAPI_URL = \"https://www.cleverbot.com/getreply\"\nIO_API_URL = \"https://cleverbot.io/1.0\"\n\nlog = logging.getLogger(\"red.Cleverbot\")\n\n\nclass CleverbotError(Exception):\n pass\n\n\nclass NoCredentials(CleverbotError):\n pass\n\n\nclass InvalidCredentials(CleverbotError):\n pass\n\n\nclass APIError(CleverbotError):\n pass\n\n\nclass OutOfRequests(CleverbotError):\n pass\n\n\nclass OutdatedCredentials(CleverbotError):\n pass\n\n\nclass Cleverbot(commands.Cog):\n \"\"\"Cleverbot rewritten for V3 from https://github.com/Twentysix26/26-Cogs/tree/master/cleverbot\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.config = Config.get_conf(self, 127486454786)\n default_global = {\"api\": None, \"io_user\": None, \"io_key\": None, \"allow_dm\":False}\n default_guild = {\"channel\": None, \"toggle\": False}\n self.config.register_global(**default_global)\n self.config.register_guild(**default_guild)\n self.session = aiohttp.ClientSession(loop=self.bot.loop)\n self.instances = {}\n\n @commands.command()\n async def cleverbot(self, ctx, *, message):\n \"\"\"Talk with cleverbot\"\"\"\n author = ctx.message.author\n channel = ctx.message.channel\n async with channel.typing():\n try:\n result = await self.get_response(author, message)\n except NoCredentials:\n await ctx.send(\n \"The owner needs to set the credentials first.\\n\"\n \"See: `[p]cleverbotset` or `[p]cleverbotset`\"\n )\n except APIError as e:\n await ctx.send(\"Error contacting the API. Error code: {}\".format(e))\n except InvalidCredentials:\n await ctx.send(\n \"The token that has been set is not valid.\\n\" \"See: `[p]cleverbotset`\"\n )\n except OutOfRequests:\n await ctx.send(\n \"You have ran out of requests for this month. \"\n \"The free tier has a 5000 requests a month limit.\"\n )\n except OutdatedCredentials:\n await ctx.send(\n \"You need a valid cleverbot.com api key for this to \"\n \"work. The old cleverbot.io service will soon be no \"\n \"longer active. See `[p]help cleverbotset`\"\n )\n else:\n await ctx.send(result)\n\n @commands.group()\n async def cleverbotset(self, ctx):\n \"\"\"\n Settings for cleverbot\n \"\"\"\n pass\n\n @cleverbotset.command()\n @commands.guild_only()\n @checks.mod_or_permissions(manage_channels=True)\n async def toggle(self, ctx):\n \"\"\"Toggles reply on mention\"\"\"\n guild = ctx.message.guild\n if not await self.config.guild(guild).toggle():\n await self.config.guild(guild).toggle.set(True)\n await ctx.send(\"I will reply on mention.\")\n else:\n await self.config.guild(guild).toggle.set(False)\n await ctx.send(\"I won't reply on mention anymore.\")\n\n @cleverbotset.command()\n @checks.is_owner()\n async def dm(self, ctx):\n \"\"\"Toggles reply in DM\"\"\"\n guild = ctx.message.guild\n if not await self.config.allow_dm():\n await self.config.allow_dm.set(True)\n await ctx.send(\"I will reply directly to DM's.\")\n else:\n await self.config.allow_dm.set(False)\n await ctx.send(\"I won't reply directly to DM's.\")\n\n @cleverbotset.command()\n @checks.mod_or_permissions(manage_channels=True)\n @commands.guild_only()\n async def channel(self, ctx, channel: discord.TextChannel = None):\n \"\"\"\n Toggles channel for automatic replies\n\n do `[p]cleverbot channel` after a channel is set to disable.\n \"\"\"\n guild = ctx.message.guild\n cur_auto_channel = await self.config.guild(guild).channel()\n if not cur_auto_channel:\n if channel is None:\n channel = ctx.message.channel\n await self.config.guild(guild).channel.set(channel.id)\n await ctx.send(\"I will reply in {}\".format(channel.mention))\n else:\n await self.config.guild(guild).channel.set(None)\n await ctx.send(\"Automatic replies turned off.\")\n\n @cleverbotset.command()\n @checks.is_owner()\n async def apikey(self, ctx, key: str = None):\n \"\"\"Sets token to be used with cleverbot.com\n You can get it from https://www.cleverbot.com/api/\n Use this command in direct message to keep your\n token secret\"\"\"\n await self.config.api.set(key)\n await ctx.send(\"Credentials set.\")\n\n @cleverbotset.command()\n @checks.is_owner()\n async def ioapikey(self, ctx, io_user: str = None, io_key: str = None):\n \"\"\"Sets token to be used with cleverbot.io\n You can get it from https://www.cleverbot.io/\n Use this command in direct message to keep your\n token secret\"\"\"\n await self.config.io_user.set(io_user)\n await self.config.io_key.set(io_key)\n await ctx.send(\"Credentials set.\")\n\n async def get_response(self, author, text):\n payload = {}\n try:\n payload[\"key\"] = await self.get_credentials()\n payload[\"cs\"] = self.instances.get(str(author.id), \"\")\n payload[\"input\"] = text\n return await self.get_cleverbotcom_response(payload, author)\n except NoCredentials:\n payload[\"user\"], payload[\"key\"] = await self.get_io_credentials()\n payload[\"nick\"] = str(\"{}\".format(self.bot.user))\n return await self.get_cleverbotio_response(payload, text)\n\n async def make_cleverbotio_instance(self, payload):\n \"\"\"Makes the cleverbot.io instance if one isn't created for the user\"\"\"\n del payload[\"text\"]\n async with self.session.post(IO_API_URL + \"/create\", json=payload) as r:\n if r.status == 200:\n return\n elif r.status == 400:\n try:\n error_msg = await r.json()\n except:\n error_msg = \"Error status 400, credentials seem to be invalid\"\n pass\n log.error(error_msg)\n raise InvalidCredentials()\n else:\n error_msg = \"Error making instance: \" + str(r.status)\n log.error(error_msg)\n raise APIError(error_msg)\n\n async def get_cleverbotio_response(self, payload, text):\n payload[\"text\"] = text\n async with self.session.post(IO_API_URL + \"/ask/\", json=payload) as r:\n if r.status == 200:\n data = await r.json()\n elif r.status == 400:\n # Try to make the instance for the user first before raising the error\n await self.make_cleverbotio_instance(payload)\n return await self.get_cleverbotio_response(payload, text)\n else:\n error_msg = \"Error getting response: \" + str(r.status)\n log.error(error_msg)\n raise APIError(error_msg)\n return data[\"response\"]\n\n async def get_cleverbotcom_response(self, payload, author):\n async with self.session.get(API_URL, params=payload) as r:\n # print(r.status)\n if r.status == 200:\n data = await r.json()\n self.instances[str(author.id)] = data[\"cs\"] # Preserves conversation status\n elif r.status == 401:\n log.error(\"Cleverbot.com Invalid Credentials\")\n raise InvalidCredentials()\n elif r.status == 503:\n log.error(\"Cleverbot.com Out of Requests\")\n raise OutOfRequests()\n else:\n error_msg = \"Cleverbot.com API Error \" + str(r.status)\n log.error(error_msg)\n raise APIError(error_msg)\n return data[\"output\"]\n\n async def get_credentials(self):\n key = await self.config.api()\n if key is None:\n raise NoCredentials()\n else:\n return key\n\n async def get_io_credentials(self):\n io_key = await self.config.io_key()\n io_user = await self.config.io_user()\n if io_key is None:\n raise NoCredentials()\n else:\n return io_user, io_key\n\n async def on_message(self, message):\n guild = message.guild\n if guild is None:\n if await self.config.allow_dm() and message.author.id != self.bot.user.id:\n ctx = await self.bot.get_context(message)\n if ctx.prefix:\n return\n async with message.channel.typing():\n try:\n response = await self.get_response(\n message.author, message.clean_content\n )\n except NoCredentials:\n await ctx.send(\n \"The owner needs to set the credentials first.\\n\"\n \"See: [p]cleverbot apikey\"\n )\n except APIError as e:\n await ctx.send(\n \"Error contacting the API. Error code: {}\".format(e)\n )\n except InvalidCredentials:\n await ctx.send(\n \"The token that has been set is not valid.\\n\"\n \"See: [p]cleverbotset\"\n )\n except OutOfRequests:\n await ctx.send(\n \"You have ran out of requests for this month. \"\n \"The free tier has a 5000 requests a month limit.\"\n )\n except OutdatedCredentials:\n await ctx.send(\n \"You need a valid cleverbot.com api key for this to \"\n \"work. The old cleverbot.io service will soon be no \"\n \"longer active. See [p]help cleverbotset\"\n )\n else:\n await ctx.send(response)\n return\n\n author = message.author\n channel = message.channel\n msg = message.content\n to_strip = f\"(?m)^(<@!?{guild.me.id}>)\"\n is_mention = re.findall(to_strip, msg)\n if message.author.id != self.bot.user.id:\n text = message.clean_content\n if not is_mention and message.channel.id != await self.config.guild(guild).channel():\n return\n if not await self.config.guild(guild).toggle():\n return\n async with channel.typing():\n try:\n response = await self.get_response(author, text)\n except NoCredentials:\n await channel.send(\n \"The owner needs to set the credentials first.\\n\"\n \"See: `[p]cleverbot apikey`\"\n )\n except APIError as e:\n await channel.send(\"Error contacting the API. Error code: {}\".format(e))\n except InvalidCredentials:\n await channel.send(\n \"The token that has been set is not valid.\\n\" \"See: `[p]cleverbotset`\"\n )\n except OutOfRequests:\n await channel.send(\n \"You have ran out of requests for this month. \"\n \"The free tier has a 5000 requests a month limit.\"\n )\n except OutdatedCredentials:\n await channel.send(\n \"You need a valid cleverbot.com api key for this to \"\n \"work. The old cleverbot.io service will soon be no \"\n \"longer active. See `[p]help cleverbotset`\"\n )\n else:\n await channel.send(response)\n\n def __unload(self):\n self.bot.loop.create_task(self.session.close())\n","sub_path":"cleverbot/cleverbot.py","file_name":"cleverbot.py","file_ext":"py","file_size_in_byte":12303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"409936797","text":"\"\"\"\nCMIT-135-40A, Week 3: Assignment: Conditional Logic Programs, Program #3\nby Jennifer C. Roy 01/31/2021\n\nWrite a program that asks for two numbers.\nIf the sum of the numbers is greater than 100, print \"They add up to a big number\" if it\nis less than/equal to 100 than print \"They add up to ____\".\n\"\"\"\nnum1 = input('Enter first number:')\nnum2 = input('Enter second number:')\n\n# Declaring the value of two numbers\nsum = float(num1) + float(num2)\n\n# Displaying the sum output per Mr. Bostock's instructions\nif sum > 100:\n print(\"They add up to a big number\")\nelse:\n print(f'They add up to {sum}')\n","sub_path":"Week_3/ps3_2num.py","file_name":"ps3_2num.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"114732453","text":"# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied. See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom oslo_config import cfg\nfrom oslo_log import log\n\nfrom storyboard.common.working_dir import get_plugin_directory\n\n\nCONF = cfg.CONF\nLOG = log.getLogger(__name__)\n\nPLUGIN_OPTS = [\n cfg.BoolOpt(\"enable\",\n default=False,\n help=\"Enable, or disable, the notification email plugin.\"),\n cfg.StrOpt(\"sender\",\n default='StoryBoard (Do Not Reply)'\n '',\n help=\"The email address from which storyboard will send its \"\n \"messages.\"),\n cfg.StrOpt(\"reply_to\",\n default=None,\n help=\"The email address of the Reply-To header (optional).\"),\n cfg.StrOpt(\"default_url\",\n default=None,\n help=\"The default/fallback url base to use in emails.\"),\n cfg.StrOpt(\"smtp_host\",\n default='localhost',\n help=\"The SMTP server to use.\"),\n cfg.IntOpt(\"smtp_port\",\n default=25,\n help=\"The SMTP Server Port to connect to (default 25).\"),\n cfg.IntOpt(\"smtp_timeout\",\n default=10,\n help=\"Timeout, in seconds, to wait for the SMTP connection to \"\n \"fail\"),\n cfg.StrOpt(\"smtp_local_hostname\",\n default=None,\n help=\"The FQDN of the sending host when identifying itself \"\n \"to the SMTP server (optional).\"),\n cfg.StrOpt(\"smtp_ssl_keyfile\",\n default=None,\n help=\"Path to the SSL Keyfile, when using ESMTP. Please make \"\n \"sure the storyboard client can read this file.\"),\n cfg.StrOpt(\"smtp_ssl_certfile\",\n default=None,\n help=\"Path to the SSL Certificate, when using ESMTP \"\n \"(optional). Please make sure the storyboard client can \"\n \"read this file.\"),\n cfg.StrOpt(\"smtp_user\",\n default=None,\n help=\"Username/login for the SMTP server.\"),\n cfg.StrOpt(\"smtp_password\",\n default=None,\n help=\"Password for the SMTP server.\")\n]\n\nCONF.register_opts(PLUGIN_OPTS, \"plugin_email\")\n\n\ndef get_email_directory():\n \"\"\"A shared utility method that always provides the same working\n directory. Error handling is explicitly not provided, as the methods used\n 'should' be consistent about the errors they themselves raise.\n \"\"\"\n return get_plugin_directory(\"email\")\n","sub_path":"storyboard/plugin/email/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"190633450","text":"from django.core.management.base import BaseCommand, CommandError\nfrom contenidos.modulos.contactos.models import Newsletter\n\nNewsletter.objects.all()\n\nclass Command(BaseCommand):\n args = ''\n help = 'upload contacts on csv file'\n\n def handle(self, *args, **options):\n for newsletter in args:\n print(newsletter)\n try:\n n = Newsletter.objects.get(pk=int(newsletter))\n n.send_newsletter()\n except Newsletter.DoesNotExist:\n print('no existe el newsletter')\n except Exception as e:\n print(str(e))\n","sub_path":"contenidos/modulos/contactos/management/commands/send_newsletter.py","file_name":"send_newsletter.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"247590604","text":"import pygame\n\npygame.init()\n\nclass button:\n\tdef __init__ (self, size, text, color, colorS, pos):\n\t\tself.size = size\n\t\tself.text = text\n\t\tself.color = color\n\t\tself.colorS = colorS\n\t\tself.pos = pos\n\t\tself.button = pygame.font.Font(None, self.size).render(self.text, 1, self.color)\n\t\tself.bsize = self.button.get_size()\n\tdef draw(self, surf):\n\t\tif self.isSelected():\n\t\t\tself.button = pygame.font.Font(None, self.size).render(self.text, 1, self.colorS)\n\t\telse:\n\t\t\tself.button = pygame.font.Font(None, self.size).render(self.text, 1, self.color)\n\t\tsurf.blit(self.button, self.pos)\n\tdef isSelected(self):\n\t\tcoord = (self.pos[0]+self.button.get_size()[0], self.pos[1]+self.button.get_size()[1])\n\t\tcursor = pygame.mouse.get_pos()\n\t\tif cursor[0]>=self.pos[0] and cursor[0]<=coord[0] and cursor[1]>=self.pos[1] and cursor[1]<=coord[1]:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\nmenu = pygame.display.set_mode((0, 0))\non = True\nsize = 100\nstate = 0\n\nheight = menu.get_size()[1]\nbutton1 = button(size, \"New Game\", (255, 255, 255), (250,180,2), (200, (height-3*size)/4))\nbutton2 = button(size, \"Continue\", (255, 255, 255), (250,180,2), (200, (height-2*size)/2))\nbutton3 = button(size, \"Exit\", (255, 255, 255), (250,180,2), (200, 3*(height-size)/4))\nbutton4 = button(size, \"Return\", (255, 255, 255), (250,180,2), (200, 3*(height-size)/4))\nbgColor = (0, 120, 120)\n\nwhile on:\n\tif state == 0:\n\t\tpygame.draw.rect(menu, bgColor, (0, 0, menu.get_size()[0], menu.get_size()[0]))\n\t\tbutton1.draw(menu)\n\t\tbutton2.draw(menu)\n\t\tbutton3.draw(menu)\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\ton = False\n\t\t\tif button1.isSelected() and event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tstate = 1\n\t\t\tif button2.isSelected() and event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tstate = 2\n\t\t\tif button3.isSelected() and event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\ton = False\n\tif state == 1:\n\t\tpygame.draw.rect(menu, bgColor, (0, 0, menu.get_size()[0], menu.get_size()[0]))\n\t\tmenu.blit(pygame.font.Font(None, 50).render(\"New Game\", 1, (255, 255, 255)), (200, 300))\n\t\tbutton4.draw(menu)\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\ton = False\n\t\t\tif button4.isSelected() and event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tstate = 0\n\tif state == 2:\n\t\tpygame.draw.rect(menu, bgColor, (0, 0, menu.get_size()[0], menu.get_size()[0]))\n\t\tmenu.blit(pygame.font.Font(None, 50).render(\"Continue\", 1, (255, 255, 255)), (200, 300))\n\t\tbutton4.draw(menu)\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\ton = False\n\t\t\tif button4.isSelected() and event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tstate = 0\n\tpygame.display.update()\n\npygame.quit()\n","sub_path":"menu/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"492159047","text":"\"\"\"\r\nAgentpy Model Module\r\nContent: Main class for agent-based models\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nfrom datetime import datetime\r\n\r\nfrom .output import DataDict\r\nfrom .objects import ApEnv, Agent, Environment\r\nfrom .network import Network\r\nfrom .grid import Grid\r\nfrom .tools import AttrDict, AgentpyError, make_list\r\nfrom .lists import ObjList\r\n\r\n\r\nclass Model(ApEnv):\r\n \"\"\"\r\n An agent-based model that can hold environments and agents.\r\n\r\n This class can be used as a parent class for custom models.\r\n Class attributes can be accessed like dictionary items.\r\n To define the procedures of a simulation, override the methods\r\n :func:`Model.setup`, :func:`Model.step`,\r\n :func:`Model.update`, and :func:`Model.end`.\r\n See :func:`Model.run` for more information on the simulation procedure.\r\n\r\n Attributes:\r\n name (str): The models' name.\r\n envs (EnvList): The models' environments.\r\n agents (AgentList): The models' agents.\r\n p (AttrDict): The models' parameters.\r\n t (int): Current time-step of the model.\r\n log (dict): The models' recorded variables.\r\n output (DataDict): Output data after simulation.\r\n\r\n Arguments:\r\n parameters (dict, optional): Dictionary of model parameters.\r\n Recommended types for parameters are int, float, str, list,\r\n numpy.integer, numpy.floating, and numpy.ndarray.\r\n Other types might cause errors.\r\n run_id (int, optional): Number of current run (default None).\r\n scenario (str, optional): Current scenario (default None).\r\n **kwargs: Will be forwarded to :func:`Model.setup`\r\n \"\"\"\r\n\r\n def __init__(self, parameters=None, run_id=None, scenario=None, **kwargs):\r\n\r\n self._id_counter = -1\r\n self._obj_dict = {} # Objects mapped by their id\r\n super().__init__(self) # Model will be have id 0\r\n\r\n self.t = 0\r\n self.run_id = run_id\r\n self.scenario = scenario\r\n\r\n # Recording\r\n self._measure_log = {}\r\n self.output = DataDict()\r\n self.output.log = {'model_type': self.type,\r\n 'time_stamp': str(datetime.now())}\r\n\r\n # Private variables\r\n self._steps = None\r\n self._parameters = AttrDict(parameters)\r\n self._stop = False\r\n self._set_var_ignore()\r\n self._setup_kwargs = kwargs\r\n\r\n def __repr__(self):\r\n rep = f\"Agent-based model {{\"\r\n keys = ['type', 'agents', 'envs', 'p']\r\n items = [(k, self[k]) for k in keys]\r\n items += list(self.__dict__.items())\r\n for k, v in items:\r\n if k[0] != '_':\r\n v = v._short_repr() if '_short_repr' in dir(v) else v\r\n rep += f\"\\n'{k}': {v}\"\r\n return rep + '\\n}'\r\n\r\n @property\r\n def objects(self):\r\n \"\"\"The models agents and environments (list of objects).\"\"\"\r\n return ObjList(self.agents + self.envs)\r\n\r\n def get_obj(self, obj_id):\r\n \"\"\" Return model object with obj_id (int). \"\"\"\r\n try:\r\n return self._obj_dict[obj_id]\r\n except KeyError:\r\n raise ValueError(f\"Model has no object with obj_id '{obj_id}'.\")\r\n\r\n def _new_id(self):\r\n # Generate new object id\r\n self._id_counter += 1\r\n return self._id_counter\r\n\r\n def add_env(self, env_class=Environment, **kwargs):\r\n \"\"\" Creates a new environment. \"\"\"\r\n new_env = env_class(self.model, **kwargs)\r\n self.envs.append(new_env)\r\n return new_env\r\n\r\n def add_network(self, graph=None, agents=None, **kwargs):\r\n \"\"\" Creates a new environment with a network.\r\n Arguments are forwarded to :class:`Network`. \"\"\"\r\n new_env = Network(self.model, graph=graph, agents=agents, **kwargs)\r\n self.envs.append(new_env)\r\n return new_env\r\n\r\n def add_grid(self, shape, **kwargs):\r\n \"\"\" Creates a new environment with a spatial grid.\r\n Arguments are forwarded to :class:`Grid`. \"\"\"\r\n new_env = Grid(self.model, shape=shape, **kwargs)\r\n self.envs.append(new_env)\r\n return new_env\r\n\r\n def measure(self, measure, value):\r\n \"\"\" Records an evaluation measure. \"\"\"\r\n self._measure_log[measure] = [value]\r\n\r\n # Main simulation functions\r\n\r\n def setup(self, **kwargs):\r\n \"\"\" Defines the model's actions before the first simulation step.\r\n Can be overwritten and used to initiate agents and environments.\"\"\"\r\n pass\r\n\r\n def step(self):\r\n \"\"\" Defines the model's actions during each simulation step.\r\n Can be overwritten and used to set the models' main dynamics.\"\"\"\r\n pass\r\n\r\n def update(self):\r\n \"\"\" Defines the model's actions after setup and each simulation step.\r\n Can be overwritten and used for the recording of dynamic variables. \"\"\"\r\n pass\r\n\r\n def end(self):\r\n \"\"\" Defines the model's actions after the last simulation step.\r\n Can be overwritten and used for final calculations and measures.\"\"\"\r\n pass\r\n\r\n def stop(self):\r\n \"\"\" Stops :meth:`Model.run` during an active simulation. \"\"\"\r\n self._stop = True\r\n\r\n def _setup_run(self, steps=None):\r\n \"\"\" Prepare round 0 of a simulation. \"\"\"\r\n\r\n if steps is None:\r\n self._steps = self.p['steps'] if 'steps' in self.p else 1000\r\n else:\r\n self._steps = steps\r\n self._stop = False\r\n self.setup(**self._setup_kwargs)\r\n self.update()\r\n if self.t >= self._steps:\r\n self._stop = True\r\n\r\n def _make_step(self):\r\n \"\"\" Proceed simulation by one step. \"\"\"\r\n self.t += 1\r\n self.step()\r\n self.update()\r\n if self.t >= self._steps:\r\n self._stop = True\r\n\r\n def run(self, steps=None, display=True):\r\n \"\"\" Executes the simulation of the model.\r\n\r\n The simulation proceeds as follows.\r\n It starts by calling :func:`Model.setup` and :func:`Model.update`.\r\n After that, ``Model.t`` is increased by 1 and\r\n :func:`Model.step` and :func:`Model.update` are called.\r\n This step is repeated until the method :func:`Model.stop` is called\r\n or steps is reached. After the last step, :func:`Model.end` is called.\r\n\r\n Arguments:\r\n display(bool, optional):\r\n Whether to display simulation progress (default True).\r\n steps(int, optional):\r\n Maximum number of steps for the simulation to run.\r\n If none is given, the parameter 'Model.p.steps' will be used.\r\n If there is no such parameter, 'steps' will be set to 1000.\r\n\r\n Returns:\r\n DataDict: Recorded model data,\r\n which can also be found in :attr:`Model.output`.\r\n \"\"\"\r\n\r\n dt0 = datetime.now() # Time-Stamp\r\n self._setup_run(steps)\r\n\r\n while not self._stop:\r\n self._make_step()\r\n if display:\r\n print(f\"\\rCompleted: {self.t} steps\", end='')\r\n\r\n self.end()\r\n self._create_output()\r\n self.output.log['run_time'] = ct = str(datetime.now() - dt0)\r\n self.output.log['steps'] = self.t\r\n\r\n if display:\r\n print(f\"\\nRun time: {ct}\\nSimulation finished\")\r\n\r\n return self.output\r\n\r\n def _create_output(self):\r\n \"\"\" Generates an 'output' dictionary out of object logs. \"\"\"\r\n\r\n def output_from_obj_list(self, obj_list, columns):\r\n # Aggregate logs per object type\r\n obj_types = {}\r\n for obj in obj_list:\r\n\r\n if obj.log: # Check for variables\r\n\r\n # Add object id/key to object log\r\n obj.log['obj_id'] = [obj.id] * len(obj.log['t'])\r\n\r\n # Initiate object type if new\r\n obj_type = type(obj).__name__\r\n\r\n if obj_type not in obj_types.keys():\r\n obj_types[obj_type] = {}\r\n\r\n # Add object log to aggr. log\r\n for k, v in obj.log.items():\r\n if k not in obj_types[obj_type]:\r\n obj_types[obj.type][k] = []\r\n obj_types[obj_type][k].extend(v)\r\n\r\n # Transform logs into dataframes\r\n for obj_type, log in obj_types.items():\r\n df = pd.DataFrame(log)\r\n for k, v in columns.items():\r\n df[k] = v # Set additional index columns\r\n df = df.set_index(list(columns.keys()) + ['obj_id', 't'])\r\n self.output['variables'][obj_type] = df\r\n\r\n # 0 - Document parameters\r\n if self.p:\r\n self.output['parameters'] = self.p\r\n\r\n # 1 - Define additional index columns\r\n columns = {}\r\n if self.run_id is not None:\r\n columns['run_id'] = self.run_id\r\n if self.scenario is not None:\r\n columns['scenario'] = self.scenario\r\n\r\n # 2 - Create measure output\r\n if self._measure_log:\r\n d = self._measure_log\r\n for key, value in columns.items():\r\n d[key] = value\r\n df = pd.DataFrame(d)\r\n if columns:\r\n df = df.set_index(list(columns.keys()))\r\n self.output['measures'] = df\r\n\r\n # 3 - Create variable output\r\n self.output['variables'] = DataDict()\r\n\r\n # 3.1 - Create variable output for objects\r\n output_from_obj_list(self, self.agents, columns)\r\n output_from_obj_list(self, self.envs, columns)\r\n\r\n # 3.2 - Create variable output for model\r\n if self.log:\r\n df = pd.DataFrame(self.log)\r\n # df['obj_id'] = 'model'\r\n for k, v in columns.items():\r\n df[k] = v\r\n df = df.set_index(list(columns.keys()) + ['t']) # 'obj_id',\r\n\r\n if self.output['variables']:\r\n self.output['variables'][self.type] = df\r\n else:\r\n self.output['variables'] = df # No subdict if only model vars\r\n\r\n # 3.3 - Remove variable dict if empty (i.e. nothing has been added)\r\n elif not self.output['variables']:\r\n del self.output['variables']\r\n","sub_path":"agentpy/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"10925903","text":"\"\"\"\nDefinition of urls for TheEntertainer.\n\"\"\"\n\nfrom datetime import datetime\nfrom django.conf.urls import url\nimport django.contrib.auth.views\n\nimport app.forms\nimport app.views\n\n# Uncomment the next lines to enable the admin:\n# from django.conf.urls import include\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = (\n # Examples:\n url(r'^$', app.views.home, name='home'),\n url(r'^contact$', app.views.contact, name='contact'),\n url(r'^about$', app.views.about, name='about'),\n url(r'^login/$',\n django.contrib.auth.views.login,\n {\n 'template_name': 'app/login.html',\n 'authentication_form': app.forms.BootstrapAuthenticationForm,\n 'extra_context':\n {\n 'title': 'Log in',\n 'year': datetime.now().year,\n }\n },\n name='login'),\n url(r'^logout$',\n django.contrib.auth.views.logout,\n {\n 'next_page': '/',\n },\n name='logout'),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n)\n\nurlpatterns += (\n url(r'^episodes/season/(?P\\d+)$', app.views.episodes, name='episodes'),\n url(r'^episode_details/season/(?P\\d+)/episode/(?P\\d+)$', app.views.episode_details, name='episode_details'),\n)\n\n# Cast patterns\nurlpatterns += (\n url(r'^cast_details/(?P\\d+)$', app.views.cast_details, name='cast_details'),\n)\n\n# Account patterns\nurlpatterns += (\n url(r'^login$', app.views.login, name='login'),\n url(r'^logout$', app.views.logout, name='logout'),\n url(r'^signup$', app.views.signup, name='signup'),\n)\n\n# Comment/Reply patterns\nurlpatterns += (\n url(r'^add_comment/(?P\\d+)/(?P\\d+)$', app.views.add_comment, name='add_comment'),\n url(r'^add_reply/(?P\\d+)/(?P\\d+)$', app.views.add_reply, name='add_reply'),\n)\n\n# Vote patterns\nurlpatterns += (\n url(r'^vote_up/(?P\\d+)/(?P\\d+)/(?P\\d+)$', app.views.vote_up, name='vote_up'),\n url(r'^vote_down/(?P\\d+)/(?P\\d+)/(?P\\d+)$', app.views.vote_down, name='vote_down'),\n)\n\n# Rating patterns\nurlpatterns += (\n url(r'^add_rating/(?P\\d+)/(?P\\d+)$', app.views.add_rating, name='add_rating'),\n)\n\n","sub_path":"TheEntertainer/TheEntertainer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"651840419","text":"#!/usr/bin/env python3\r\n\r\n#from __future__ import print_function\r\nimport sys\r\nimport re\r\nimport gzip\r\n\r\n# run program as:\r\n# find-denovo.py \r\n# (type \"mom\",\"dad\", \"proband\", or \"sibling\")\r\n# type them in the order of their respective VCF columns. If sibling comes first in the VCF, type \"dad mom sibling proband\"\r\n# This script will look for instances in which the proband's genotype is heterozygous, and the other 3 family members' genotypes are homozygous reference.\r\n\r\n\r\ndef main():\r\n inFileName = sys.argv[1]\r\n momIdx = sys.argv.index(\"mom\") #user inputs the order of the family member columns\r\n dadIdx = sys.argv.index(\"dad\")\r\n childIdx = sys.argv.index(\"proband\")\r\n siblingIdx = sys.argv.index(\"sibling\")\r\n\r\n with open (inFileName, 'r') as infile: # gzip.open (inFileName, 'r') as infile: #when you use \"with open\" you don't have to close the file later\r\n with open (inFileName + \".homRefSib\", \"w\") as variantFile:\r\n for line in infile:\r\n if line.startswith(\"#\"): # header and info lines start with \"#\"\r\n variantFile.write(line)\r\n else:\r\n is_denovo_variant = process_line(line, momIdx-2, dadIdx-2, childIdx-2, siblingIdx-2)\r\n if is_denovo_variant == True:\r\n variantFile.write(line)\r\n\r\ndef process_line(line, momIdx, dadIdx, childIdx, siblingIdx):\r\n is_denovo_variant = False\r\n\r\n (chrom, pos, ID, ref, alt, qual, Filter, info, format, samples) = line.strip(\"\\n\").split(\"\\t\", 9)\r\n samples = samples.split(\"\\t\")\r\n\r\n dadgeno = samples[dadIdx]\r\n momgeno = samples[momIdx]\r\n childgeno = samples[childIdx]\r\n siblinggeno = samples[siblingIdx]\r\n \r\n dadAlleles = extract_genes(dadgeno)\r\n momAlleles = extract_genes(momgeno)\r\n childAlleles = extract_genes(childgeno)\r\n siblingAlleles = extract_genes(siblinggeno)\r\n\r\n if dadAlleles == \"0/0\" and momAlleles == \"0/0\" and siblingAlleles == \"0/0\" and (childAlleles == \"1/0\" or childAlleles == \"0/1\"): #COME BACK AND ACCOUNT FOR POSSIBLE PHASED GENOTYPE (WITH \"|\")\r\n is_denovo_variant = True\r\n return(is_denovo_variant)\r\n\r\ndef extract_genes(unparsed_geno):\r\n # split the data by \":\", to access only the genotype\r\n # first element of the list is the genotype when format is Genotype:Quality:ReadDepth:etc.\r\n geno = unparsed_geno.split(\":\")[0]\r\n\r\n # split the genotypes into individual alleles - split on \"/\" or \"|\"\r\n #alleles = re.split(r\"/|\\|\", geno)\r\n\r\n return geno #alleles\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"find-denovo.py","file_name":"find-denovo.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"603670852","text":"\"\"\"\nTkinter resources: \nhttp://zetcode.com/gui/tkinter/introduction/\nhttps://github.com/siddharthasahu/P2P-chat-application\nhttps://docs.python.org/2/library/tkinter.html\nhttp://www.tkdocs.com/tutorial/grid.html\n\"\"\"\nfrom network import Handler, poll\nimport random\nimport string\n\nimport Tkinter as tk\n\n\nmyname = ''.join([random.choice(string.ascii_lowercase + string.digits) \n for _ in range(4)])\n\nclass MyGUI():\n \n def __init__(self, manager):\n self.manager = manager\n root = tk.Tk() # main window\n root.protocol('WM_DELETE_WINDOW', self.manager.stop) # cross was clicked\n root.title('Chat client')\n root.resizable(width=False, height=False)\n self.root = root\n self._build_gui()\n \n def _build_gui(self):\n root = self.root\n root.grid()\n \n self.chat_screen = tk.Text(root, bg=\"white\", width=60, height=20,\n state=tk.DISABLED, wrap=tk.WORD)\n self.chat_screen.grid(column=0, row=0, sticky=tk.EW)\n \n scrollbar = tk.Scrollbar(command=self.chat_screen.yview,\n orient=tk.VERTICAL)\n scrollbar.grid(row=0, column=1, sticky=tk.NS)\n self.chat_screen.config(yscrollcommand=scrollbar.set)\n \n frame = tk.Frame(root)\n frame.grid(column=0, row=1, sticky=tk.EW)\n \n self.entry = tk.Entry(frame, width=60)\n self.entry.pack(side=tk.LEFT, padx=20)\n # ways to capture key press: http://stackoverflow.com/a/19148324/856897\n self.entry.bind('', lambda k: self._on_keypress(k))\n self.entry.focus_set()\n \n self.listbox = tk.Listbox(root, width=20)\n self.listbox.grid(column=2, row=0, sticky=tk.NSEW)\n \n \n def _on_keypress(self, key):\n if key.char in ('\\n', '\\r'):\n txt = self.entry.get()\n self.entry.delete(0, tk.END)\n self.manager.network.send_msg(txt)\n self.show_msg(txt, myname)\n \n def show_msg(self, txt, author=None):\n txtbox = self.chat_screen\n txtbox.config(state=tk.NORMAL)\n if author:\n txtbox.insert(tk.END, author + ': ')\n txtbox.insert(tk.END, txt + '\\n')\n txtbox.see(tk.END)\n txtbox.config(state=tk.DISABLED)\n \n def update_userlist(self, names):\n box = self.listbox\n box.delete(0, tk.END)\n [box.insert(tk.END, name) for name in names]\n \n def update(self):\n self.root.update()\n \n def kill(self):\n self.root.destroy()\n\nclass MyHandler(Handler):\n \n def __init__(self, manager):\n host, port = 'localhost', 8888\n Handler.__init__(self, host, port)\n self.manager = manager\n self.do_send({'join': myname})\n \n def on_close(self):\n self.manager.gui.show_msg('Server is offline.')\n self.manager.gui.show_msg('Close and re-open the window to restart.')\n \n def on_msg(self, msg):\n if 'join' in msg:\n self.manager.gui.update_userlist(msg['users'])\n name = msg['join']\n if name == myname:\n self.manager.gui.show_msg('welcome, ' + name)\n else:\n self.manager.gui.show_msg(name + ' joined')\n elif 'leave' in msg:\n self.manager.gui.update_userlist(msg['users'])\n self.manager.gui.show_msg(msg['leave'] + ' left')\n elif 'speak' in msg and msg['speak'] != myname:\n self.manager.gui.show_msg(msg['txt'], msg['speak'])\n \n def send_msg(self, txt):\n self.do_send({'speak': myname, 'txt': txt})\n \n def update(self):\n poll(0.01)\n \n def kill(self):\n self.close() # will call on_close\n\nclass Manager():\n \n def __init__(self):\n self.gui = MyGUI(self)\n self.network = MyHandler(self)\n self.run()\n \n def run(self):\n self.keep_going = True\n while self.keep_going:\n self.network.update()\n self.gui.update()\n self.gui.kill()\n self.network.kill()\n \n def stop(self):\n self.keep_going = False\n\nif __name__ == '__main__':\n Manager()\n \n","sub_path":"assignment7/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"439469618","text":"\"\"\"\nDESCRIPTION:\n This python file will be used as the server which will be connected with client.js using SocketIO.\n The connection is Peer to Peer. Thus at any point, only two users can chat or video call. \n\"\"\"\n\n# importing all the libraries\nfrom flask import Flask, render_template, request, session, redirect, url_for\nfrom flask_bootstrap import Bootstrap\nfrom flask_socketio import SocketIO\nimport pandas as pd\nimport numpy as np\nimport json\nimport datastoreUtil as util\n\napp = Flask(__name__)\nbootstrap=Bootstrap(app)\nsocketio=SocketIO(app)\n\napp.config['SECRET_KEY']=\"MY_KEY\"\nusers={}\n\n\n\"\"\"\n*******************************************************************\n ALL ROUTE FUNCTIONS \n*******************************************************************\n\"\"\"\n\n# registration for new users, using user_name and user_email\n@app.route('/', methods=['GET', 'POST'])\ndef login():\n if request.method == \"POST\":\n user_name = request.form.get(\"user_name\")\n user_email = request.form.get(\"user_email\")\n session['user_name'] = user_name\n session['user_email'] = user_email\n if util.uniqueDetail(user_name, user_email):\n return redirect(url_for('message'))\n elif util.registerUser(user_name, user_email):\n return redirect(url_for('message'))\n return render_template('register.html')\n\n\n# loading HTML page for chatting with users \n@app.route('/message')\ndef message():\n try:\n return render_template('message.html', user_name = session['user_name'], user_email = session['user_email'])\n except:\n return redirect(url_for('login'))\n\n# loading HTML page for video calling with users \n@app.route('/videoCall')\ndef videoCall():\n try:\n return render_template('videoCall.html', user_name = session['user_name'], user_email = session['user_email'])\n except:\n return redirect(url_for('login'))\n\n# loading HTML page for video calling with users \n@app.route('/logout')\ndef logout():\n try:\n del users[session['user_name']]\n session.pop('user_name')\n session.pop('user_email')\n return redirect(url_for('login'))\n except:\n return redirect(url_for('login'))\n\n\n\"\"\"\n*******************************************************************\n SOCKETIO FUNCTIONS\n*******************************************************************\n\"\"\"\n\n# to check for connection establishment of client and server\n@socketio.on('connected')\ndef onConnection(message): \n print(\"connected : \",message)\n data = json.loads(message)\n user_name = data['user_name']\n user_sid = request.sid\n if user_sid != None:\n util.updateUserSid(user_name, user_sid)\n\n\n\"\"\" \nTo handle messages emitted by client.js for identifying and establishing connection between two peers.\nThis includes offers, answers and identifying candidates.\n\"\"\"\n@socketio.on('message')\ndef onMessage(msg):\n data = json.loads(msg)\n print(\"[onMessage] All data : \", data)\n\n # if a new peer is conneted to the sever\n if(data['type']=='register'): \n user_name = data['user_name']\n users[user_name] = util.getUserSid(user_name)\n print(\"[onMessage] emit displayAvailableUsers\", users)\n socketio.emit('displayAvailableUsers', json.dumps(users))\n\n # elif data['purpose'] == 'videoCall':\n # print(\"[onMessage] emit displayUsersForCall\", users)\n # socketio.emit('displayUsersForCall', json.dumps(users))\n\n # handling offer andwers and candiates\n elif(data['type'] == 'offer'):\n print(\"sending offer\")\n socketio.emit('offerReceived', json.dumps({'type': \"offer\", 'offer': data['offer']\n ,'receiver': data['receiver'], 'sender':data['sender'], 'senderid': util.getUserSid(data['sender'])}), room = util.getUserSid(data['receiver']))\n\n elif(data['type'] == 'answer'):\n socketio.emit('answerReceived', json.dumps({'type': \"answer\", 'answer': data['answer'], 'sender': data['sender']}), room = util.getUserSid(data['receiver']))\n\n elif(data['type'] == 'candidate'):\n socketio.emit('candidateReceived', json.dumps({'type': \"candidate\", 'candidate': data['candidate']}), room = data['user'])\n\n\nif(__name__=='__main__'):\n\tsocketio.run(app, debug=True)\n # host = your ipv4\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"122189882","text":"from session import catch_session\nfrom menus import login_menu\n\ndef login_required(f):\n\tdef wrapper(*args, **kwargs):\n\t\tsession = catch_session()\n\t\tif len(session) > 0:\n\t\t\tf()\n\t\telif login_menu():\n\t\t\tf()\n\t\telse:\n\t\t\treturn wrapper()\n\treturn wrapper()\n\n\n\t\t\t\n\t\n\t\n","sub_path":"crud-deco/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"375860340","text":"import os, os.path\nimport json\nfrom dateutil.relativedelta import *\nfrom datetime import datetime ,timedelta, date\nimport pandas as pd\nimport subprocess\n\n\n\n\n\ndef getLength(input_video):\n\tresult = subprocess.check_output(['ffprobe', '-i', input_video, '-show_entries', 'format=duration', '-v', 'quiet', '-of', 'csv=%s' % (\"p=0\")])\n\treturn float(result)\n\n\n\ndef Daily(path_data, date):\n\t\n\tnumber_image = 0\n\tfile_image = path_data + '/' + date + '/image_url_' + date + '.json'\n\t# ------------------- Number image -----------------------\n\tif os.path.exists(file_image):\n\n\t\twith open (file_image,'r') as f:\n\t\t\timage = json.load(f)\n\t\tnumber_image = len(image['my_json'])\n\n\tnumber_video = 0\n\tpath_video_folder = path_data + '/' + date + '/videos'\n\t# ------------------ Time video ----------------------\n\tif os.path.exists(path_video_folder):\n\t\tdir_ = next(os.walk(path_video_folder))[0]\n\t\tlist_file = next(os.walk(path_video_folder))[2]\n\t\t# for file in list_file:\n\t\t# \t# file = dir_ + '/' + file\n\t\t# \t# print (file)\n\t\t# \t# if os.path.exists(file):\n\t\t# \t\tnumber_video += getLength(file)\n\t\tnumber_video += len (list_file)\n\treturn (number_image, number_video)\n\n\ndef count_image(path_data, date_, to_date_):\n\n\tdate_ = datetime.strptime(date_, '%Y-%m-%d').date()\n\tto_date_ = datetime.strptime(to_date_, '%Y-%m-%d').date()\n\tn = int((to_date_ - date_).days)\n\twith open('statictis.txt', 'a') as f:\n\t\tfor i in range(n + 1):\n\t\t\tsingle_date = date_ + timedelta(i)\n\n\t\t\td = single_date.strftime('%Y-%m-%d')\n\t\t\tnumber_image, number_video = Daily(path_data, str(d))\n\t\t\tprint (d , number_image, number_video)\n\n\n\t\n\nmonth = '2016-10-01'\nto_month = '2017-06-29'\npath_audit_content = '/u01/oracle/oradata/APEX/MARKETING_TOOL_02_JSON'\n\ncount_image(path_audit_content, month, to_month)","sub_path":"label_visualize/audio/statistic.py","file_name":"statistic.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"395221192","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import url\n\nfrom mainapp.views import ProfileView, \\\n PostViewSet, LikeViewSet, PostNewView, CommentViewSet\n\nurlpatterns = [\n url(r'post/(?P[0-9]+)/$', PostViewSet.as_view(), name='post-detail'),\n url(r'post/(?P[0-9]+)/like/$', LikeViewSet.as_view(), name='like-detail'),\n url(r'post/(?P[0-9]+)/comment/$', CommentViewSet.as_view(), name='comment'),\n url(r'^profile', ProfileView.as_view(), name='profile'),\n url(r'^post_new', PostNewView.as_view(), name='post_new'),\n\n]\n","sub_path":"blog/mainapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"535036432","text":"class koma:\n def __init__(self,name,rename,firstTurn,movability):\n self.name = name\n self.reverseName = rename\n self.xposition = -1\n self.yposition = -1\n self.firstTurn = firstTurn\n self.komaChange = False\n self.movablity = movability\n\n def check(self,movablity:list):\n for a in self.movablity:\n if self.firstTurn:\n if a[0] == -movablity[0] and a[1] == -movablity[1]:\n return True\n else:\n if a[0] == movablity[0] and a[1] == movablity[1]:\n return True\n return False","sub_path":"koma.py","file_name":"koma.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"245030513","text":"#BugCollector\r\n#6/15/17\r\n#CTI-110 M4T1 - BugCollector\r\n#MarquisMarshall\r\n#\r\n\r\ntotal = 0\r\n\r\nfor day in range(1, 6):\r\n print(\"Enter the bugs collected on day:\",day)\r\n bugs = int(input())\r\n total += bugs\r\n\r\nprint(\"You collected a total of\", total, \"bugs this week.\")\r\n \r\n","sub_path":"M4T1_BugCollector_MarquisMarsahall.py","file_name":"M4T1_BugCollector_MarquisMarsahall.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"181793496","text":"#!/usr/bin/env python3\n\n\"\"\"Main.\"\"\"\n\nimport sys\nimport os\nfrom cpu import *\n\ncpu = CPU()\n\n# Make sure second argument is passed\nif len(sys.argv) <= 1:\n raise Exception('Program is expected as a second argument, None provided.')\nelse:\n # Validate that the argument given is a file that exists\n file = sys.argv[1]\n\n if os.path.isfile(file):\n # Load filename into CPU\n cpu.load(file)\n cpu.run()\n else:\n raise FileNotFoundError('Could not find the specified file.')\n ","sub_path":"ls8/ls8.py","file_name":"ls8.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"248623517","text":"# 根据一棵树的前序遍历与中序遍历构造二叉树。\n#\n# 注意:\n# 你可以假设树中没有重复的元素。\n#\n# 例如,给出\n#\n# 前序遍历 preorder = [3,9,20,15,7]\n# 中序遍历 inorder = [9,3,15,20,7]\n#\n# 返回如下的二叉树:\n#\n# 3\n# / \\\n# 9 20\n# / \\\n# 15 7\n# Related Topics 树 深度优先搜索 数组\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n# Definition for a binary tree node.\nfrom typing import List\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n\n # 先序遍历:根左右\n # 中序遍历:左根右\n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n if not preorder: return None\n\n root_val = preorder[0] # 先序遍历的第一个元素就是当前(子)树的根节点\n root = TreeNode(root_val)\n\n mid = inorder.index(root_val) # 从中序遍历中找到根节点序号\n\n # 构建左子树\n root.left = self.buildTree(preorder[1:mid + 1], inorder[:mid])\n # 构建右子树\n root.right = self.buildTree(preorder[mid + 1:], inorder[mid + 1:])\n\n # 返回根节点\n return root\n\n\n# leetcode submit region end(Prohibit modification and deletion)\nres = Solution().buildTree([3, 9, 20, 15, 7], [9, 3, 15, 20, 7])\nprint(res)\n","sub_path":"Week_03/[105]从前序与中序遍历序列构造二叉树.py","file_name":"[105]从前序与中序遍历序列构造二叉树.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"79492575","text":"import xarray as xr\nimport glob\nimport os\nimport itertools\nfrom JASMIN import constants as cnst, MetUM_variables as mv\nimport numpy as np\nimport pdb\n\ndef run(orig_names=False):\n\n fpath = '/home/users/cornkle/runscript/in'\n outpath = '/home/users/cornkle/runscript/out'\n\n local_box = [-18+360,14+360, 3.5, 14]\n temp_box = [-18+360,35+360, 3.5, 30]\n\n months = [3,11] # March-May\n\n dic = {\n\n 't2' : ([temp_box], ['keep'], [], [12,3,6]),\n 'u10': ([temp_box], ['keep'], [], [12,3,6]),\n 'v10': ([temp_box], ['keep'], [], [12,3,6]),\n 'lw_out_PBLtop' : ([temp_box], ['keep'], [], []),\n 'v_pl' : ([temp_box], ['keep', 'keep'], [650,925], [12,3]),\n 'u_pl' : ([temp_box], ['keep', 'keep'], [650, 850,925], [12,3]),\n 't_pl' : ([temp_box], ['keep'], [925], [12,3]),\n 'omega_pl' : ([temp_box], ['keep'], [650,300], []),\n 'lsRain' : ([temp_box], ['keep'], [], []),\n 'q_pl' : ([temp_box], ['keep', 'keep'], [650, 925], []),\n }\n keys = dic.keys()\n\n for k in keys:\n\n info = cnst.VARDIC[k]\n dinfo = dic[k]\n var = mv.create_CP4_filename(k)\n\n if not orig_names:\n pathvar = k\n else:\n pathvar = var\n\n infolder = fpath+os.sep + pathvar\n outfolder = outpath +os.sep + k\n files = glob.glob(infolder + os.sep + var+'*.nc' )\n for f in files:\n\n fname = os.path.basename(f)\n outname = fname.replace(var, k)\n outfile = outfolder + os.sep + outname\n if os.path.isfile(outfile):\n print('File already exists, continue.')\n continue\n ds = xr.open_dataset(f)\n\n if (ds['time.month'][0]months[1]):\n continue\n\n if dinfo[3] != []:\n ds = ds.isel(time=(([np.in1d(ds['time.hour'].values, dinfo[3])][0]) & (ds['time.minute']==0))) \n box = dinfo[0]\n\n for id, b in enumerate(box):\n\n agg = dinfo[1][id]\n pres = dinfo[2]\n cut = ds.sel(longitude=slice(b[0], b[1]), latitude=slice(b[2], b[3]))\n try:\n da = cut[var]\n except KeyError:\n try:\n da = cut['c03238'] # stupid t2 problem\n except KeyError:\n try:\n da = cut['a04203'] # stupid lsRain_hFreq proble\n except KeyError:\n print('KEY ERROR, name missing')\n pdb.set_trace()\n\n if pres != []:\n da = da.sel(pressure=pres)\n\n if agg != 'keep':\n da = da.resample('24H', base=16, dim='time', skipna=True, how='mean')\n\n\n comp = dict(zlib=True, complevel=5)\n\n da.name = k\n da.longitude.values = da.longitude.values-360\n #encoding = {var: comp for var in da.data_vars}\n encoding = {k: {'complevel': 5, 'zlib': True}}\n if not os.path.exists(outfolder):\n os.makedirs(outfolder)\n\n da.to_netcdf(outfolder + os.sep + outname , format='NETCDF4', encoding=encoding)\n da.close()\n\n print('Wrote '+ outfolder + os.sep + outname)\n\n\n\n","sub_path":"JASMIN/JASMIN_extract_script_CLOVER.py","file_name":"JASMIN_extract_script_CLOVER.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"320977015","text":"import socket\n\n\ndef extract_uuid_description(rule_objects):\n filtered_rules_list = []\n for obj in rule_objects:\n new_obj = dict(uuid=obj['uuid'], description=obj['description'])\n filtered_rules_list.append(new_obj)\n return filtered_rules_list\n\n\ndef check_port_connection(host, port):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(0.05)\n s.connect((host, int(port)))\n s.shutdown(socket.SHUT_RDWR)\n return True\n\n except:\n return False\n\n\ndef change_format_float_list(old_list):\n new_list = list()\n for flt in old_list:\n new_list.append(str(flt).replace('.', ','))\n\n return new_list\n","sub_path":"opnsense_api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"395654340","text":"# -*-coding:utf-8-*- \nimport os\n\nSAVELIST = ['train_retrieval.py', 'clean.py', '__init__.py']\nversion = '7'\n\n\ndef deleteNouese(path): # 遍历指定文件夹中所有文件,检查图像大小,长高小于300的删除,不是图像的文件也删除\n for root, dirs, files in os.walk(path):\n for file in files:\n if file in SAVELIST:\n continue\n name = file.rsplit('.', 1)[0]\n postfix = file.rsplit('.', 1)[-1]\n # print(file.rsplit('.',1))\n if postfix == 'py':\n pycPath = os.path.join(root, '__pycache__', name + '.cpython-3{}.pyc'.format(version))\n flag = os.path.exists(pycPath)\n # print(name+'.cpython-38.pyc')\n if flag:\n print('save ', os.path.join(root, '__pycache__', name + '.cpython-3{}.pyc').format(version))\n pass\n else:\n pypath = os.path.join(root, file)\n print('delete {}'.format(pypath))\n # print('delete {}'.format(pycPath))\n\n # try:\n os.remove(pypath)\n # os.remove(pycPath)\n # except:\n # pass\n\n\ndef deletepyc(path):\n for root, dirs, files in os.walk(path):\n for file in files:\n aa1 = os.path.join(root, file)\n # print(aa1)\n if aa1.split('.')[-1] == 'pyc':\n print(aa1)\n os.remove(aa1) # 删除文件\n\n\ndef main():\n path = \"./\"\n # deleteNouese(path)\n # deletepyc(path)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"425840475","text":"\"\"\"\nA module that contains some future features of the math stdlib for earlier Python versions.\n\"\"\"\nimport math\nimport sys\n\nimport numpy as np\n\nfrom .overrides import set_module\n\n__all__ = [\"pow\", \"isqrt\", \"iroot\", \"ilog\", \"lcm\", \"prod\"]\n\n\n@set_module(\"galois\")\ndef pow(base, exp, mod): # pylint: disable=redefined-builtin\n \"\"\"\n Efficiently exponentiates an integer :math:`a^k (\\\\textrm{mod}\\\\ m)`.\n\n The algorithm is more efficient than exponentiating first and then reducing modulo :math:`m`. This\n is the integer equivalent of :func:`galois.poly_pow`.\n\n Note\n ----\n This function is an alias of :func:`pow` in the standard library.\n\n Parameters\n ----------\n base : int\n The integer base :math:`a`.\n exp : int\n The integer exponent :math:`k`.\n mod : int\n The integer modulus :math:`m`.\n\n Returns\n -------\n int\n The modular exponentiation :math:`a^k (\\\\textrm{mod}\\\\ m)`.\n\n Examples\n --------\n .. ipython:: python\n\n galois.pow(3, 5, 7)\n (3**5) % 7\n \"\"\"\n import builtins # pylint: disable=import-outside-toplevel\n return builtins.pow(base, exp, mod)\n\n\n@set_module(\"galois\")\ndef isqrt(n):\n \"\"\"\n Computes the integer square root of :math:`n` such that :math:`\\\\textrm{isqrt}(n)^2 \\\\le n`.\n\n Note\n ----\n This function is included for Python versions before 3.8. For Python 3.8 and later, this function\n calls :func:`math.isqrt` from the standard library.\n\n Parameters\n ----------\n n : int\n A non-negative integer.\n\n Returns\n -------\n int\n The integer square root of :math:`n` such that :math:`\\\\textrm{isqrt}(n)^2 \\\\le n`.\n\n Examples\n --------\n .. ipython:: python\n\n galois.isqrt(27**2 - 1)\n galois.isqrt(27**2)\n galois.isqrt(27**2 + 1)\n \"\"\"\n if sys.version_info.major == 3 and sys.version_info.minor >= 8:\n return math.isqrt(n) # pylint: disable=no-member\n else:\n if not isinstance(n, (int, np.integer)):\n raise TypeError(f\"Argument `n` must be an integer, not {type(n)}.\")\n if not n >= 0:\n raise ValueError(f\"Argument `n` must be non-negative, not {n}.\")\n\n n = int(n)\n if n < 2:\n return n\n\n small_candidate = isqrt(n >> 2) << 1\n large_candidate = small_candidate + 1\n if large_candidate * large_candidate > n:\n return small_candidate\n else:\n return large_candidate\n\n\n@set_module(\"galois\")\ndef iroot(n, k):\n \"\"\"\n Finds the integer :math:`k`-th root :math:`x` of :math:`n`, such that :math:`x^k \\\\le n`.\n\n Parameters\n ----------\n n : int\n A positive integer.\n k : int\n The root :math:`k`, must be at least 2.\n\n Returns\n -------\n int\n The integer :math:`k`-th root :math:`x` of :math:`n`, such that :math:`x^k \\\\le n`\n\n Examples\n --------\n .. ipython :: python\n\n galois.iroot(27**5 - 1, 5)\n galois.iroot(27**5, 5)\n galois.iroot(27**5 + 1, 5)\n \"\"\"\n if not isinstance(n, (int, np.integer)):\n raise TypeError(f\"Argument `n` must be an integer, not {type(n)}.\")\n if not isinstance(k, (int, np.integer)):\n raise TypeError(f\"Argument `k` must be an integer, not {type(k)}.\")\n if not n > 0:\n raise ValueError(f\"Argument `n` must be positive, not {n}.\")\n if not k >= 2:\n raise ValueError(f\"Argument `k` must be at least 2, not {k}.\")\n\n # https://stackoverflow.com/a/39191163/11694321\n u = n\n x = n + 1\n k1 = k - 1\n\n while u < x:\n x = u\n u = (k1*u + n // u**k1) // k\n\n return x\n\n\n@set_module(\"galois\")\ndef ilog(n, b):\n \"\"\"\n Finds the integer :math:`\\\\textrm{log}_b(n) = k`, such that :math:`b^k \\\\le n`.\n\n Parameters\n ----------\n n : int\n A positive integer.\n b : int\n The logarithm base :math:`b`.\n\n Returns\n -------\n int\n The integer :math:`\\\\textrm{log}_b(n) = k`, such that :math:`b^k \\\\le n`.\n\n Examples\n --------\n .. ipython :: python\n\n galois.ilog(27**5 - 1, 27)\n galois.ilog(27**5, 27)\n galois.ilog(27**5 + 1, 27)\n \"\"\"\n if not isinstance(n, (int, np.integer)):\n raise TypeError(f\"Argument `n` must be an integer, not {type(n)}.\")\n if not isinstance(b, (int, np.integer)):\n raise TypeError(f\"Argument `b` must be an integer, not {type(b)}.\")\n if not n > 0:\n raise ValueError(f\"Argument `n` must be positive, not {n}.\")\n if not b >= 2:\n raise ValueError(f\"Argument `b` must be at least 2, not {b}.\")\n\n # https://stackoverflow.com/a/39191163/11694321\n low, b_low, high, b_high = 0, 1, 1, b\n\n while b_high < n:\n low, b_low, high, b_high = high, b_high, high*2, b_high**2\n\n while high - low > 1:\n mid = (low + high) // 2\n b_mid = b_low * b**(mid - low)\n if n < b_mid:\n high, b_high = mid, b_mid\n elif b_mid < n:\n low, b_low = mid, b_mid\n else:\n return mid\n\n if b_high == n:\n return high\n\n return low\n\n\n@set_module(\"galois\")\ndef lcm(*integers):\n \"\"\"\n Computes the least common multiple of the integer arguments.\n\n Note\n ----\n This function is included for Python versions before 3.9. For Python 3.9 and later, this function\n calls :func:`math.lcm` from the standard library.\n\n Returns\n -------\n int\n The least common multiple of the integer arguments. If any argument is 0, the LCM is 0. If no\n arguments are provided, 1 is returned.\n\n Examples\n --------\n .. ipython:: python\n\n galois.lcm()\n galois.lcm(2, 4, 14)\n galois.lcm(3, 0, 9)\n\n This function also works on arbitrarily-large integers.\n\n .. ipython:: python\n\n prime1, prime2 = galois.mersenne_primes(100)[-2:]\n prime1, prime2\n lcm = galois.lcm(prime1, prime2); lcm\n lcm == prime1 * prime2\n \"\"\"\n if sys.version_info.major == 3 and sys.version_info.minor >= 9:\n return math.lcm(*integers) # pylint: disable=no-member\n else:\n _lcm = 1\n for integer in integers:\n _lcm = _lcm * integer // math.gcd(_lcm, integer)\n return _lcm\n\n\n@set_module(\"galois\")\ndef prod(iterable, start=1):\n \"\"\"\n Computes the product of the integer arguments.\n\n Note\n ----\n This function is included for Python versions before 3.8. For Python 3.8 and later, this function\n calls :func:`math.prod` from the standard library.\n\n Returns\n -------\n int\n The product of the integer arguments.\n\n Examples\n --------\n .. ipython:: python\n\n galois.prod([2, 4, 14])\n galois.prod([2, 4, 14], start=2)\n \"\"\"\n if sys.version_info.major == 3 and sys.version_info.minor >= 8:\n return math.prod(iterable, start=start) # pylint: disable=no-member\n else:\n result = start\n for integer in iterable:\n result *= integer\n return result\n","sub_path":"galois/math_.py","file_name":"math_.py","file_ext":"py","file_size_in_byte":6973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"596872841","text":"\"\"\"\nFile: midpoint_vec.py\nCopyright (c) 2016 Andrew Malfavon\nExcerise 5.22\nLicense: MIT\nCompares the runtimes of python sum, python for loop summation\nand numpy vectorized sum on the midpoint integral formula.\n\"\"\"\n\nimport numpy as np\nimport timeit\n\n#midpoint rule for approximating an integral using a for loop\ndef midpointint(f, a, b, n):\n h = (b - a) / float(n)\n sum_function = 0\n for i in range(n):\n sum_function += h * f(a - (h / 2) + (i * h))\n return sum_function\n\n\n#compute the sum with the built in sum function\ndef midpoint_sum_func(f, a, b, n):\n h = (b - a) / float(n)\n arr = []\n for i in range(n):\n arr.append(h * f(a - (h / 2) + (i * h)))\n return sum(arr)\n\n\n#compute the sum by the sum function in the numpy package\ndef midpoint_npsum(f, a, b, n):\n h = (b - a) / float(n)\n arr = []\n for i in range(n):\n arr.append(h * f(a - (h / 2) + (i * h)))\n return np.sum(arr)\n\n#function for testing\ndef func(x):\n return x\n\n#test that each approximates the integral correctly\ndef test_func():\n assert round(midpointint(func, 0 , 10, 1000)) == 50.0\n assert round(midpoint_sum_func(func, 0, 10, 1000)) == 50.0\n assert round(midpoint_npsum(func, 0, 10, 1000)) == 50.0\n\n#additional test\ndef test_func2():\n assert round(midpointint(np.sin, 0 , np.pi, 1000)) == 2.0\n assert round(midpoint_sum_func(np.sin, 0, np.pi, 1000)) == 2.0\n assert round(midpoint_npsum(np.sin, 0, np.pi, 1000)) == 2.0","sub_path":"midpoint_vec.py","file_name":"midpoint_vec.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"87602871","text":"#module for handling operations with matrices\n\"\"\"module for matrix operations\"\"\"\n\nfrom __future__ import division\n\n__version__ = '1.0'\n__author___ = 'Juraj Ondrej Dubrava'\n\nimport sys\nimport os\nimport numpy as np\nfrom scipy import stats\n\n\nclass BlosumMatrix:\n\t\"\"\"class for working with BLOSUM matrix\"\"\"\n\tdef __init__(self,matrix):\n\t\tself.load_matrix(matrix)\n\n\tdef load_matrix(self,matrix):\n\t\t\"\"\"load BLOSUM matrix from file and create matrix\"\"\"\n\t\twith open(matrix,'r') as blosum_file:\n\t\t\tblosum_matrix = blosum_file.read()\n\n\t\tlines = blosum_matrix.strip().split('\\n')\n\t\theader = lines.pop(0)\n\t\tcolumns = header.split()\n\t\tblosum_matrix = {}\n\n\t\tfor row in lines:\n\t\t\tentries = row.split()\n\t\t\trow_name = entries.pop(0)\n\t\t\tblosum_matrix[row_name] = {}\n\n\t\t\tfor column_name in columns:\n\t\t\t\tblosum_matrix[row_name][column_name] = entries.pop(0)\n\n\t\tblosum_file.close()\n\t\tself._blosum_matrix = blosum_matrix\n\n\tdef find_score(self,acid1,acid2):\n\t\t\"\"\"find score of 2 amino acids according to BLOSUM matrix\"\"\"\n\t\tacid1 = acid1.upper()\n\t\tacid2 = acid2.upper()\n\n\t\tif acid1 not in self._blosum_matrix or acid2 not in self._blosum_matrix[acid1]:\n\t\t\tprint(acid1)\n\t\t\tprint(acid2)\n\t\t\tsys.stderr.write('Error')\n\t\t\tsys.exit(1)\n\t\treturn self._blosum_matrix[acid1][acid2]\n\n\nclass ProbabilityMatrix:\n\t\"\"\"creates probability matrix from multiple alignements\"\"\"\n\tdef __init__(self,filename,matrix):\n\t\talignements = self.store_alignement(filename)\n\t\tself.create_probability_matrix(matrix,alignements)\n\n\tdef store_alignement(self,filename):\n\t\t\"\"\"#stores alignements from file\"\"\"\n\t\talignements = list()\n\t\twith open(filename,'r') as t:\n\t\t\tfor line in t.readlines():\n\t\t\t\tif not line.startswith('>'):\n\t\t\t\t\tseq1 = line.strip('\\n')\n\t\t\t\t\talignements.append(seq1)\n\t\treturn alignements\n\n\tdef create_probability_matrix(self,matrix,alignements):\n\t\t\"\"\"creates probability matrix as a dictionary, key value is alignement itself\"\"\"\n\t\tscore = 0\n\t\tmax_score = 0\n\t\tprobability_matrix = {}\n\t\tprobabilities = []\n\n\t\tfor i in alignements:\n\t\t\tfor acid,acid1 in zip(i,i):\n\t\t\t\tmax_score += int(matrix.find_score(acid,acid1))\n\t\t\tprobability_matrix[i] = {}\n\n\t\t\tfor j in alignements:\n\t\t\t\tfor acid,acid1 in zip(i,j):\n\t\t\t\t\tscore += int(matrix.find_score(acid,acid1))\n\t\t\t\t\tprobability = score / max_score\n\t\t\t\t\tprobabilities.append(probability)\n\t\t\t\t\tprobability_matrix[i][j] = probability\n\n\t\t\t\tscore = 0\n\t\t\tmax_score = 0\n\n\t\tself._probability_matrix = probability_matrix\n\n\tdef find_pair(self,seq1,seq2):\n\t\t\"\"\"find probability for entered sequnces\"\"\"\n\t\treturn self._probability_matrix[seq1][seq2]\n","sub_path":"BP/prediktor/final_code/blosum.py","file_name":"blosum.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"4135447","text":"import logging\nimport os\nimport re\nfrom dataclasses import dataclass, asdict\nfrom typing import Union, Dict\n\nfrom sfaira.consts.utils import clean_doi, clean_id_str\nfrom sfaira.commands.questionary import sfaira_questionary\nfrom rich import print\nfrom cookiecutter.main import cookiecutter\n\nlog = logging.getLogger(__name__)\n\n\n@dataclass\nclass TemplateAttributes:\n dataloader_type: str = '' # One of single_dataset, multiple_datasets_single_file, multiple_datasets_streamlined, multiple_datasets_not_streamlined\n id: str = '' # unique identifier of data set (Organism_Organ_Year_Protocol_NumberOfDataset_FirstAuthorLastname_doi).\n id_without_doi: str = '' # complete id without the doi -> usually used to name the python scripts\n create_extra_description: str = '' # Whether to create an optional extra description file or not\n\n author: Union[str, list] = '' # author (list) who sampled / created the data set\n doi: str = '' # doi of data set accompanying manuscript\n doi_sfaira_repr: str = '' # internal representation with any special characters replaced with underscores\n\n sample_fns: Union[str, Dict[str, list]] = '' # file name of the first *.h5ad file\n download_url_data: str = '' # download website(s) of data files\n download_url_meta: str = '' # download website(s) of meta data files\n organ: str = '' # (*) organ (anatomical structure)\n organism: str = '' # (*) species / organism\n assay_sc: str = '' # (*, optional) protocol used to sample data (e.g. smart-seq2)\n normalization: str = '' # raw or the used normalization technique\n default_embedding: str = '' # Default embedding of the data\n primary_data: str = '' # Is this a primary dataset?\n disease: str = '' # name of the disease of the condition\n ethnicity: str = '' # ethnicity of the sample\n sample_source: str = '' # source of the sample\n state_exact: str = '' # state of the sample\n year: str = 2021 # year in which sample was acquired\n number_of_datasets: str = 1 # Required to determine the file names\n\n cell_types_original_obs_key: str = '' # Original cell type key in obs\n\n\nclass DataloaderCreator:\n\n def __init__(self, path_loader, doi):\n self.WD = os.path.dirname(__file__)\n self.TEMPLATES_PATH = f'{self.WD}/templates'\n self.template_attributes = TemplateAttributes()\n self.out_path = path_loader\n self.doi = doi\n\n def create_dataloader(self):\n \"\"\"\n Prompts and guides the user through a number of possible dataloader choices.\n Prompts the user for required attributes which must be present in the dataloader.\n Finally creates the specific cookiecutter dataloader template.\n \"\"\"\n self._prompt_dataloader_template()\n self._prompt_dataloader_configuration()\n self._create_dataloader_template()\n\n def _prompt_dataloader_template(self) -> None:\n \"\"\"\n Guides the user to select the appropriate dataloader template for his dataset.\n Sets the dataloader_type\n \"\"\"\n number_datasets = sfaira_questionary(function='select',\n question='How many datasets does your project have?',\n choices=['One', 'More than one'])\n # One dataset\n if number_datasets == 'One':\n self.template_attributes.dataloader_type = 'single_dataset'\n else:\n self.template_attributes.dataloader_type = 'multiple_datasets'\n\n def _prompt_dataloader_configuration(self):\n \"\"\"\n Prompts the user for all required attributes for a dataloader such as DOI, author, etc.\n \"\"\"\n author = sfaira_questionary(function='text',\n question='Author(s):',\n default='Einstein, Albert; Hawking, Stephen')\n self.template_attributes.author = author.split(';') if ';' in author else author\n if self.doi:\n doi = self.doi\n else:\n doi = sfaira_questionary(function='text',\n question='DOI:',\n default='10.1000/j.journal.2021.01.001')\n while not re.match(r'\\b10\\.\\d+/[\\w.]+\\b', doi):\n print('[bold red]The entered DOI is malformed!')\n doi = sfaira_questionary(function='text',\n question='DOI:',\n default='10.1000/j.journal.2021.01.001')\n self.template_attributes.doi = doi\n self.template_attributes.doi_sfaira_repr = clean_doi(doi)\n\n self.template_attributes.number_of_datasets = sfaira_questionary(function='text',\n question='Number of datasets:',\n default='1')\n\n # Differentiate between a single dataset or multiple datasets to get sample file names\n if self.template_attributes.dataloader_type == 'multiple_datasets':\n self.template_attributes.sample_fns = {'fns': []}\n for ds in range(int(self.template_attributes.number_of_datasets)):\n fn = sfaira_questionary(function='text',\n question='Sample file name:',\n default=f'data_{ds}.h5ad')\n self.template_attributes.sample_fns['fns'].append(fn)\n else:\n self.template_attributes.sample_fns = sfaira_questionary(function='text',\n question='Sample file name of the first dataset:',\n default='data.h5ad')\n\n self.template_attributes.primary_data = str(sfaira_questionary(function='confirm',\n question='Primary data:',\n default='Yes'))\n self.template_attributes.default_embedding = sfaira_questionary(function='text',\n question='Default embedding:',\n default='NA')\n self.template_attributes.organism = sfaira_questionary(function='text',\n question='Organism:',\n default='NA')\n self.template_attributes.organ = sfaira_questionary(function='text',\n question='Organ:',\n default='NA')\n self.template_attributes.assay_sc = sfaira_questionary(function='text',\n question='Assay:',\n default='NA')\n self.template_attributes.normalization = sfaira_questionary(function='text',\n question='Normalization:',\n default='raw')\n self.template_attributes.disease = sfaira_questionary(function='text',\n question='Disease:',\n default='healthy')\n self.template_attributes.state_exact = sfaira_questionary(function='text',\n question='Sample state:',\n default='healthy')\n self.template_attributes.sample_source = sfaira_questionary(function='text',\n question='Sample source:',\n default='NA')\n is_cell_type_annotation = sfaira_questionary(function='confirm',\n question='Does your dataset have a cell type annotation?',\n default='No')\n if is_cell_type_annotation:\n self.template_attributes.cell_types_original_obs_key = sfaira_questionary(function='text',\n question='Cell type annotation obs key:',\n default='')\n self.template_attributes.year = sfaira_questionary(function='text',\n question='Year:',\n default='2021')\n first_author = author[0] if isinstance(author, list) else author\n try:\n first_author_lastname = first_author.split(',')[0]\n except KeyError:\n print('[bold yellow] First author was not in the expected format. Using full first author for the id.')\n first_author_lastname = first_author\n self.template_attributes.id_without_doi = f'{clean_id_str(self.template_attributes.organism)}_' \\\n f'{clean_id_str(self.template_attributes.organ)}_' \\\n f'{clean_id_str(self.template_attributes.year)}_' \\\n f'{clean_id_str(self.template_attributes.assay_sc)}_' \\\n f'{clean_id_str(first_author_lastname)}_001'\n self.template_attributes.id = f'{self.template_attributes.id_without_doi}_' \\\n f'{self.template_attributes.doi_sfaira_repr}'\n if self.template_attributes.dataloader_type == 'single_dataset':\n self.template_attributes.download_url_data = sfaira_questionary(function='text',\n question='URL to download the data',\n default='https://ftp.ncbi.nlm.nih.gov/geo/')\n self.template_attributes.download_url_meta = sfaira_questionary(function='text',\n question='URL to download the meta data',\n default='https://ftp.ncbi.nlm.nih.gov/geo/')\n self.template_attributes.create_extra_description = sfaira_questionary(function='confirm',\n question='Do you want to add additional custom metadata?',\n default='Yes')\n if is_cell_type_annotation:\n print('[bold blue]You will have to run \\'sfaira annotate-dataloader\\' after the template has been created and filled.')\n else:\n print('[bold blue]You can skip \\'sfaira annotate-dataloader\\'.')\n\n def _template_attributes_to_dict(self) -> dict:\n \"\"\"\n Create a dict from the our Template Structure dataclass\n :return: The dict containing all key-value pairs with non empty values\n \"\"\"\n return {key: val for key, val in asdict(self.template_attributes).items() if val != ''}\n\n def _create_dataloader_template(self):\n template_path = f'{self.TEMPLATES_PATH}/{self.template_attributes.dataloader_type}'\n cookiecutter(f'{template_path}',\n output_dir=self.out_path,\n no_input=True,\n overwrite_if_exists=True,\n extra_context=self._template_attributes_to_dict())\n\n def create_datadir(self, path_data):\n os.makedirs(os.path.join(path_data, self.template_attributes.doi_sfaira_repr))\n","sub_path":"sfaira/commands/create_dataloader.py","file_name":"create_dataloader.py","file_ext":"py","file_size_in_byte":12122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"154460982","text":"__author__ = 'andy'\nfrom ParentFuzz import ParentFuzz\nimport requests\nfrom Generator import *\nfrom Mutator import mutate_list\n\n\nclass ClientFuzz(ParentFuzz):\n\n def encode_correctly(self, form_values):\n \"\"\" Encodes the convenient array into the x-www-form-urlencoded form \"\"\"\n ret = \"\"\n for i in range(0, len(form_values)):\n try:\n ret += str(form_values[i]).encode('utf-8', 'replace')\n except (UnicodeEncodeError, UnicodeDecodeError):\n ret += generate_safe_text() # give it something safe(ish) (ascii/utf-8)\n if i % 2 == 0:\n ret += \"=\"\n else:\n ret += \"&\"\n return ret[:-1] # don't return the extraneous &\n\n def generate(self):\n # For code depth, I'm only going to fuzz the values, not the names here\n value_list = self.payload_template[1::2] # Grabs all odd indices in list\n value_list = cf_list(value_list)\n\n # This statement recombines the names to the generated values\n pl = [item for name_val_pair in zip(self.payload_template[::2], value_list) for item in name_val_pair]\n\n req = requests.post(self.url_template,\n data=self.encode_correctly(pl),\n headers=self.header_template)\n self.packets_sent += 1\n\n def targeted_mutate(self):\n # Mutate both the names and the values for the mutation\n full_payload = self.payload_template[:] # Copies over the list so we aren't mutating the template\n full_payload = mutate_list(full_payload)\n\n req = requests.post(self.url_template,\n data=self.encode_correctly(full_payload),\n headers=self.header_template)\n self.packets_sent += 1\n\n def fuzz(self, pcap=\"\", ip=\"\", port=\"\"):\n if pcap == \"\" or ip == \"\" or port == \"\":\n self.url_template = \"http://\" + self.client_ip + \":\" + self.client_port + \"/test\"\n super(ClientFuzz, self).fuzz(self.client_pcap, self.client_ip, self.client_port)\n else:\n self.url_template = \"http://\" + ip + \":\" + str(port) + \"/test\"\n super(ClientFuzz, self).fuzz(pcap, ip, str(port))\n\n def __init__(self, configs, fuzz_type, num_packets):\n super(ClientFuzz, self).__init__(configs, fuzz_type, num_packets)\n # Templates of what a \"normal\" packet will contain\n \"\"\"\n Payload form params passed:\n oauthKey = cool_app_id\n oauthSecret = secret\n accessTokenEndPoint = http%3A%2F%2Flocalhost%3A8080%2Foauth2%2Ftoken\n authorizationURL = http%3A%2F%2Flocalhost%3A8080%2Foauth2%2Fauthorize&\n authorizationURLComplete = http%3A%2F%2Flocalhost%3A80\n \"\"\"\n self.payload_template = [\"oauthKey\", \"cool_app_id\",\n \"oauthSecret\", \"secret\",\n \"accessTokenEndPoint\", \"http://\" + self.auth_ip + \":\" + self.auth_port + \"/oauth2/token\",\n \"authorizationURL\", \"http://\" + self.auth_ip + \":\" + self.auth_port + \"/oauth2/authorize\",\n \"authorizationURLComplete\", \"http://\" + self.auth_ip + \":\" + self.auth_port]\n\n self.header_template = {\"Origin\": \"http://\" + self.client_ip + \":\" + self.client_port,\n \"Host\": self.client_ip + \":\" + self.client_port,\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Referrer\": \"http://\" + self.client_ip + \":\" + self.client_port}\n\n","sub_path":"ClientFuzz.py","file_name":"ClientFuzz.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"527642375","text":"from lettuce import step, world\nfrom django.core.urlresolvers import reverse\nfrom lettuce.django import django_url\nfrom log.models import Post, Tag\nfrom nose.tools import assert_is_not_none, assert_equals, assert_true, assert_greater\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom terrain import checkNode\n\n\n@step('Given There are following posts in the database:')\ndef given_there_following_posts_in_the_database(step):\n\tfor posts_dict in step.hashes:\n\t\targs = dict(posts_dict)\n\n\t\t# Check if has to be edited.\n\t\ttry:\n\t\t\tedited = args['edited'] == 'yes'\n\t\t\tdel args['edited']\n\t\texcept KeyError:\n\t\t\tedited = False\n\n\t\t# Check tags.\n\t\ttry:\n\t\t\ttags = args['tags'].split(',')\n\t\t\tdel args['tags']\n\t\texcept KeyError:\n\t\t\ttags = []\n\n\t\tpost = Post(**args)\n\t\tpost.save()\n\n\t\t# Add tags.\n\t\tt = (tag.strip() for tag in tags if len(tag.strip()) > 0)\n\t\tfor tagName in t:\n\t\t\ttry:\n\t\t\t\ttag = Tag.objects.get(name=tagName)\n\t\t\texcept ObjectDoesNotExist:\n\t\t\t\ttag = Tag(name=tagName)\n\t\t\t\ttag.save()\n\t\t\tpost.tags.add(tag)\n\n\t\tif edited:\n\t\t\tpost.save()\n\n\n@step(u'Then I see post \"([^\"]*)\" with introduction \"([^\"]*)\" that \"([^\"]*)\" edited')\n@step(u'And I see post \"([^\"]*)\" with introduction \"([^\"]*)\" that \"([^\"]*)\" edited')\ndef then_i_see_post_with_introduction_and_tags(step, title, introduction, was_edited):\n\theader = world.browser.find_by_xpath('//article/header/h2[text()=\"' + title + '\"]')\n\tcheckNode(header)\n\n\tactual_introduction = world.browser.find_by_xpath('//article/header/h2[text()=\"' + title + '\"]/../../div[contains(., \"' + introduction + '\")]')\n\tcheckNode(actual_introduction)\n\n\tif was_edited == 'was':\n\t\tamount = 2\n\telse:\n\t\tamount = 1\n\n\ttimes = world.browser.find_by_xpath('//article/header/h2[text()=\"' + title + '\"]/../..//time')\n\tcheckNode(times)\n\tassert_equals(len(times), amount)\n\n\n@step(u'And post \"([^\"]*)\" has tag \"([^\"]*)\" with label \"([^\"]*)\"')\n@step(u'And post \"([^\"]*)\" has tags \"([^\"]*)\" with label \"([^\"]*)\"')\ndef and_post_has_tags_with_label(step, title, tags, label):\n\ttags = [tag.strip() for tag in tags.split(',') if len(tag.strip()) > 0]\n\ttags_to_check = list(tags)\n\n\tif len(tags):\n\t\tactual_tags = world.browser.find_by_xpath('//article/header/h2[text()=\"%s\"]/../../div[@class=\"tags\"]/a' % title)\n\t\tcheckNode(actual_tags)\n\t\tadditional = []\n\t\tfor actual_tag in actual_tags:\n\t\t\ttry:\n\t\t\t\tindex = tags_to_check.index(actual_tag.text)\n\t\t\t\tdel tags_to_check[index]\n\t\t\texcept ValueError:\n\t\t\t\tadditional.append(actual_tag.text)\n\n\t\tassert len(additional) == 0, 'There are unexpected tags (%s).' % ', '.join(['\"' + str(t) + '\"' for t in additional])\n\t\tassert len(tags_to_check) == 0, 'There are missing tags (%s).' % ', '.join(['\"' + str(t) + '\"' for t in tags_to_check])\n\telse:\n\t\traise RuntimeError('No tags given.')\n\n\n@step(u'And post \"([^\"]*)\" has no tags')\ndef and_post_has_no_tags(step, title):\n\ttags = world.browser.find_by_xpath('//article/header/h2[text()=\"%s\"]/../../div[@class=\"tags\"]/a' % title)\n\tassert len(tags) == 0, 'Tags container for \"%s\" is present, but should not.' % title\n\n\n@step(u'And I click \"more\" link for \"([^\"]*)\"')\ndef and_i_click_link_for_post(step, title):\n\tlink = world.browser.find_by_xpath('//article/header/h2[text()=\"' + title + '\"]/../..//a[@class=\"more\"]')\n\tcheckNode(link)\n\tlink.click()\n\n\n@step(u'Then I see post \"([^\"]*)\" with introduction \"([^\"]*)\" and content \"([^\"]*)\" that \"([^\"]*)\" edited')\ndef then_i_see_post_with_introduction_and_content(step, title, introduction, content, was_edited):\n\tthen_i_see_post_with_introduction_and_tags(step, title, introduction, was_edited)\n\tactual_content = world.browser.find_by_xpath('//article/header/h2[text()=\"' + title + '\"]/../../div[contains(., \"' + content + '\")]')\n\tcheckNode(actual_content)\n\n\n","sub_path":"log/features/steps.py","file_name":"steps.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"175692639","text":"from math import *\nimport numpy as np\nimport scipy.spatial.distance as distance\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.arrays import ArrayDatatype as ADT\nfrom PyQt5 import QtGui, QtCore, QtOpenGL,QtWidgets, QtOpenGL\nimport time\n## Program imports\nimport constants\nimport fileReaders\nimport openGl\nimport calculations\n\n##Constant#\n\nATOMS = constants.element_parser()\nATOM_SCALE = 0.5\nCYLINDER_RADIUS = 0.2\nH_RADIUS = 0.18\nATOMIC_COVA_SCALE = 0.2\n\nTRANSLATE_X_DIV = 24.9\nTRANSLATE_Y_DIV = 17.75\nROTATE_X_DIV = 60\nROTATE_Y_DIV = 75\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\n\nclass Window(QtWidgets.QMainWindow):\n def __init__(self):\n super(Window, self).__init__()\n\n self.dockView = []\n\n self.init_main_window()\n\n self.init_toolbar()\n self.init_info_window()\n # self.init_view_window()\n self.init_menu()\n\n #### Init definitions ####\n def init_main_window(self):\n self.setGeometry(1260, 50, 1280, 720)\n self.setWindowTitle(\"Structure viewer and editor\")\n self.setWindowIcon(QtGui.QIcon('favicon.png'))\n self.statusBar()\n self.setUnifiedTitleAndToolBarOnMac(True)\n\n def init_info_window(self):\n ## Infolist widget\n self.infoList = QtWidgets.QListWidget()\n self.infoList.addItem(\"Open Structure to display info\")\n self.infoList.setFont(QtGui.QFont(\"Verdana\", 12))\n self.infoList.setWordWrap(True)\n self.dockInfo = QtWidgets.QDockWidget(\"Structure info\", self)\n self.dockInfo.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea | QtCore.Qt.RightDockWidgetArea)\n self.dockInfo.setMaximumWidth(300)\n self.dockInfo.setWidget(self.infoList)\n self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dockInfo)\n\n ## View widget\n\n def init_view_window(self):\n if self.dockView != []:\n return\n self.dockView = QtWidgets.QDockWidget(\"View options\")\n self.viewOptions = QtWidgets.QWidget()\n self.dockView.setMaximumWidth(300)\n self.dockView.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea | QtCore.Qt.RightDockWidgetArea)\n\n verticalLayout = QtWidgets.QVBoxLayout()\n groupBox = self.create_images_groupbox()\n verticalLayout.setAlignment(QtCore.Qt.AlignTop)\n verticalLayout.addWidget(groupBox)\n self.viewOptions.setLayout(verticalLayout)\n self.dockView.setWidget(self.viewOptions)\n self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dockView)\n\n def create_images_groupbox(self):\n\n groupBox = QtWidgets.QGroupBox('Images')\n groupBox.setMaximumHeight(200)\n\n gridLayout = QtWidgets.QGridLayout()\n\n label1 = QtWidgets.QLabel('-', groupBox)\n label1.setAlignment(QtCore.Qt.AlignBottom | QtCore.Qt.AlignHCenter)\n gridLayout.addWidget(label1, 1, 1, 1, 1)\n\n label2 = QtWidgets.QLabel('+', groupBox)\n label2.setAlignment(QtCore.Qt.AlignBottom | QtCore.Qt.AlignHCenter)\n gridLayout.addWidget(label2, 1, 2, 1, 1)\n\n xLabel = QtWidgets.QLabel('X', groupBox)\n xLabel.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)\n gridLayout.addWidget(xLabel, 3, 0, 1, 1)\n\n yLabel = QtWidgets.QLabel('Y', groupBox)\n yLabel.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)\n gridLayout.addWidget(yLabel, 4, 0, 1, 1)\n\n zLabel = QtWidgets.QLabel('Z', groupBox)\n zLabel.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)\n gridLayout.addWidget(zLabel, 5, 0, 1, 1)\n\n xMinBox = QtWidgets.QSpinBox(groupBox)\n xMinBox.setAccelerated(False)\n xMinBox.setProperty(\"value\", 0)\n xMinBox.valueChanged[int].connect(self.glWidget.set_negximage_counter)\n gridLayout.addWidget(xMinBox, 3, 1, 1, 1)\n\n xPlusBox = QtWidgets.QSpinBox(groupBox)\n xPlusBox.valueChanged[int].connect(self.glWidget.set_posximage_counter)\n gridLayout.addWidget(xPlusBox, 3, 2, 1, 1)\n\n yMinBox = QtWidgets.QSpinBox(groupBox)\n yMinBox.valueChanged[int].connect(self.glWidget.set_negyimage_counter)\n gridLayout.addWidget(yMinBox, 4, 1, 1, 1)\n\n yPlusBox = QtWidgets.QSpinBox(groupBox)\n yPlusBox.valueChanged[int].connect(self.glWidget.set_posyimage_counter)\n gridLayout.addWidget(yPlusBox, 4, 2, 1, 1)\n\n zMinBox = QtWidgets.QSpinBox(groupBox)\n zMinBox.valueChanged[int].connect(self.glWidget.set_negzimage_counter)\n gridLayout.addWidget(zMinBox, 5, 1, 1, 1)\n\n zPlusBox = QtWidgets.QSpinBox(groupBox)\n zPlusBox.valueChanged[int].connect(self.glWidget.set_poszimage_counter)\n gridLayout.addWidget(zPlusBox, 5, 2, 1, 1)\n\n groupBox.setLayout(gridLayout)\n return groupBox\n\n def init_menu(self):\n ## Menu Actions\n extractAction = QtWidgets.QAction(\"&Quit\", self)\n extractAction.setStatusTip('Leave The App')\n extractAction.triggered.connect(self.close_application)\n\n openFileAction = QtWidgets.QAction(\"&Open File\", self)\n openFileAction.setStatusTip('Open Structure')\n openFileAction.setShortcut(\"Ctrl+O\")\n openFileAction.triggered.connect(self.file_open)\n\n saveFileAction = QtWidgets.QAction(\"&Save File\", self)\n saveFileAction.setStatusTip('Save Structure')\n saveFileAction.setShortcut(\"Ctrl+S\")\n saveFileAction.triggered.connect(self.file_save)\n\n saveFileAsAction = QtWidgets.QAction(\"&Save File As\", self)\n saveFileAsAction.setStatusTip('Save Structure')\n saveFileAsAction.setShortcut(\"Ctrl+Alt+S\")\n saveFileAsAction.triggered.connect(self.file_save_as)\n\n closeFileAction = QtWidgets.QAction(\"&Close\", self)\n closeFileAction.setStatusTip('Close File')\n closeFileAction.setShortcut(\"Ctrl+C\")\n closeFileAction.triggered.connect(self.file_close)\n\n ## Construct Menu\n self.mainMenu = self.menuBar()\n\n self.fileMenu = self.mainMenu.addMenu('&File')\n self.fileMenu.addAction(openFileAction)\n self.fileMenu.addAction(saveFileAction)\n self.fileMenu.addAction(saveFileAsAction)\n self.fileMenu.addAction(closeFileAction)\n self.fileMenu.addAction(extractAction)\n\n self.viewMenu = self.mainMenu.addMenu('&View')\n self.viewMenu.addAction(self.dockInfo.toggleViewAction())\n\n def init_toolbar(self):\n ## Toolbar actions\n extractAction1 = QtWidgets.QAction(QtGui.QIcon('icons/cross.xpm'), 'flee the scene', self)\n extractAction1.triggered.connect(self.close_application)\n openFileAction = QtWidgets.QAction(QtGui.QIcon('icons/folder.xpm'), 'open structure', self)\n openFileAction.triggered.connect(self.file_open)\n\n ## Build Toolbar\n self.toolbar = self.addToolBar(\"Toolbar\")\n self.toolbar.addAction(openFileAction)\n self.toolbar.addAction(extractAction1)\n\n #### Command/Action definitions ####\n def file_open(self):\n file = QtWidgets.QFileDialog.getOpenFileName(self, 'Open Structure')[0]\n filePath, fileExtension = os.path.splitext(file)\n if fileExtension == '.in':\n reader = fileReaders.QEReader(file)\n atoms = reader.get_atoms()\n cell = reader.get_cell_param()\n self.structure = Structure(atoms, cell)\n self.display_info()\n self.raise_opengl_widget(self.structure)\n self.init_view_window()\n\n elif fileExtension == '.out':\n fileData = open(file, 'r')\n self.editor()\n with fileData:\n text = fileData.read()\n self.textEdit.setText(text)\n self.textEdit.setDocumentTitle(file)\n\n else:\n msgBox = QtWidgets.QMessageBox()\n msgBox.setText(\"\"\"Error: invalid file,\\nPlease select a file with extension .in or .out\"\"\")\n msgBox.exec()\n\n def display_info(self):\n self.infoList.clear()\n totalAt = self.structure.get_amount_atoms()\n atoms = self.structure.get_atoms()\n symbols = []\n self.infoList.addItem(\"Number of atoms: \" + str(totalAt))\n for at in atoms:\n symbols.append(at.get_symbol())\n for symbol in symbols:\n self.infoList.addItem(symbol)\n\n self.dockInfo.setWidget(self.infoList)\n\n def raise_opengl_widget(self, structure):\n qgl_format = QtOpenGL.QGLFormat()\n qgl_format.setVersion(4, 1)\n qgl_format.setProfile(QtOpenGL.QGLFormat.CompatibilityProfile)\n qgl_format.setSampleBuffers(True)\n self.glWidget = structWidget(structure,format=qgl_format)\n self.setCentralWidget(self.glWidget)\n\n def file_save(self):\n\n if type(self.centralWidget()) == QtWidgets.QTextEdit:\n name = self.textEdit.documentTitle()\n file = open(name, 'w')\n text = self.textEdit.toPlainText()\n file.write(text)\n file.close()\n self.textEdit.close()\n\n else:\n pass\n\n def file_save_as(self):\n\n if type(self.centralWidget()) == QtWidgets.QTextEdit:\n name = QtWidgets.QFileDialog.getSaveFileName(self, 'Save Structure')\n file = open(name, 'w')\n text = self.textEdit.toPlainText()\n file.write(text)\n file.close()\n self.textEdit.close()\n\n else:\n pass\n\n def file_close(self):\n\n if type(self.centralWidget()) == QtWidgets.QTextEdit:\n self.textEdit.close()\n self.raise_opengl_widget()\n else:\n pass\n\n def editor(self):\n self.textEdit = QtWidgets.QTextEdit()\n self.setCentralWidget(self.textEdit)\n\n def close_application(self):\n choice = QtWidgets.QMessageBox.question(self, 'Extract!', \"Exit\", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\n if choice == QtWidgets.QMessageBox.Yes:\n print(\"Extracting\")\n sys.exit()\n else:\n pass\n\n\nclass Structure:\n def __init__(self, inAtoms, cell):\n self._atoms = []\n self._cell = cell\n self.pop_atoms(inAtoms)\n self.calculate_origin()\n\n def pop_atoms(self, inAtoms):\n i = 0\n while i < np.shape(inAtoms)[0]:\n element = ATOMS[inAtoms[i, 0]]\n x = inAtoms[i, 1]\n y = inAtoms[i, 2]\n z = inAtoms[i, 3]\n at = Atom(element, x, y, z)\n self._atoms.append(at)\n i += 1\n\n def calculate_origin(self):\n tot_x = 0\n tot_y = 0\n tot_z = 0\n for atom in self._atoms:\n tot_x += atom.x\n tot_y += atom.y\n tot_z += atom.z\n mean_x = tot_x / len(self._atoms)\n mean_y = tot_y / len(self._atoms)\n mean_z = tot_z / len(self._atoms)\n self.origin = np.array([mean_x, mean_y, mean_z])\n\n def get_atoms(self):\n return self._atoms\n\n def get_cell(self):\n return self._cell\n\n def get_amount_atoms(self):\n return len(self._atoms)\n\n\nclass Atom:\n def __init__(self, element, x, y, z):\n self.element = element\n self.x = float(x)\n self.y = float(y)\n self.z = float(z)\n\n def get_coord(self):\n return np.array([self.x, self.y, self.z])\n\n def get_name(self):\n return self.element.get_name()\n\n def get_symbol(self):\n return self.element.get_symbol()\n\n def get_cova(self):\n return self.element.get_cova()\n\n def get_color(self):\n return self.element.get_color()\n\n def set_coord(self, coord):\n self.x = coord[0]\n self.y = coord[1]\n self.z = coord[2]\n\n\nclass structWidget(QtOpenGL.QGLWidget):\n def __init__(self, structure, format=None):\n super(structWidget, self).__init__(format,None)\n self.lastPos = QtCore.QPoint()\n self.scaling = 1\n self.structure = structure\n self.atoms = self.structure.get_atoms()\n self.cell = self.structure.get_cell()\n self.origin = structure.origin\n self.glOrigin = self.origin\n self.atCoord = []\n self.cellVertices = self.gen_full_cell()\n self.primAtoms = []\n\n self.axes = np.ndarray((3, 3), dtype=np.float, buffer=np.array([[1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 1.0]]))\n self.glBonds = []\n self.glCells = []\n\n self.secAtoms = []\n self.secAtPos = []\n\n self.xPosImageCounter = 0\n self.xPosImages = []\n self.xNegImageCounter = 0\n self.xNegImages = []\n\n self.yPosImageCounter = 0\n self.yPosImages = []\n self.yNegImageCounter = 0\n self.yNegImages = []\n\n self.zPosImageCounter = 0\n self.zPosImages = []\n self.zNegImageCounter = 0\n self.zNegImages = []\n\n def gen_full_cell(self):\n cell = np.vstack((np.array([0, 0, 0]), self.cell))\n extraCoord = np.array(\n [[self.cell[0, 0] + self.cell[1, 0], self.cell[0, 1] + self.cell[1, 1], self.cell[0, 2] + self.cell[1, 2]],\n [self.cell[0, 0] + self.cell[2, 0], self.cell[0, 1] + self.cell[2, 1], self.cell[0, 2] + self.cell[2, 2]],\n [self.cell[1, 0] + self.cell[2, 0], self.cell[1, 1] + self.cell[2, 1], self.cell[1, 2] + self.cell[2, 2]],\n [self.cell[0, 0] + self.cell[1, 0] + self.cell[2, 0], self.cell[0, 1] + self.cell[1, 1] + self.cell[2, 1],\n self.cell[0, 2] + self.cell[1, 2] + self.cell[2, 2]]])\n return np.vstack((cell, extraCoord)) - np.dot(np.ones((8, 1)), np.reshape(self.origin, (1, 3)))\n\n def minimumSizeHint(self):\n return QtCore.QSize(50, 50)\n\n def rotate(self, dx, dy):\n\n self.axes = np.dot(self.axes,\n np.array([[cos(dx), sin(dx), 0], [-sin(dx), cos(dx), 0], [0, 0, 1]]))\n\n self.axes = np.dot(self.axes,\n np.array([[1, 0, 0], [0, cos(dy), sin(dy)], [0, -sin(dy), +cos(dy)]]))\n\n self.primAtoms[:][2] = [np.dot(coord, self.axes) for coord in self.atCoord]\n self.glOrigin = np.dot(self.origin, self.axes)\n # t1 = time.time()\n self.updateGL()\n # print(\"total updateGl took: {}\".format(time.time() - t1))\n\n def normalize_angle(self, angle):\n while (angle < 0):\n angle += 360 * 16\n\n while (angle > 360 * 16):\n angle -= 360 * 16\n\n return angle\n\n def resizeGL(self, width, height):\n # print('resize',width, height)\n side = min(width, height)\n if side < 0:\n return\n\n glViewport((width - side) // 2, (height - side) // 2, side, side)\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glFrustum(-1.0, +1.0, -1.0, 1.0, 5.0, 1000.0)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n # glTranslated(0.0, 0.0, -80.0)\n gluLookAt(0, -100, 0, 0, 0, 0, 0, 0, 1)\n\n def mousePressEvent(self, event):\n self.lastPos = event.pos()\n #self.select_event()\n\n def mouseMoveEvent(self, event):\n\n dx = event.x() - self.lastPos.x()\n dy = event.y() - self.lastPos.y()\n ## hier kan ik alle acties met verschillende mouse buttons ingeven\n if event.buttons() & QtCore.Qt.MiddleButton:\n glTranslatef(dx / TRANSLATE_X_DIV, 0, -dy / TRANSLATE_Y_DIV)\n self.updateGL()\n elif event.buttons() & QtCore.Qt.RightButton:\n self.rotate(dx / ROTATE_X_DIV, dy / ROTATE_Y_DIV)\n\n self.lastPos = event.pos()\n\n def wheelEvent(self, QWheelEvent):\n if self.scaling > 0.0001:\n self.scaling += 12 / QWheelEvent.delta()\n\n if self.scaling < 0.001:\n self.scaling = 0.001\n print(self.scaling)\n self.updateGL()\n\n def initializeGL(self):\n # print('initialize')\n ## main drawing setup\n glClearColor(0.0, 0.0, 0.0, 1.0)\n\n glFrontFace(GL_CCW)\n glEnable(GL_LIGHTING)\n glEnable(GL_DEPTH_TEST)\n glDepthFunc(GL_LESS)\n glEnable(GL_CULL_FACE)\n glShadeModel(GL_SMOOTH)\n glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)\n glLineWidth(1)\n\n ## Set Up default light sources\n glLightfv(GL_LIGHT0, GL_POSITION, (-1, 1, -1, 0))\n glLightfv(GL_LIGHT0, GL_AMBIENT, (0.2, 0.2, 0.2, 1))\n glLightfv(GL_LIGHT0, GL_DIFFUSE, (0.9, 0.9, 0.9, 1))\n glLightfv(GL_LIGHT0, GL_SPECULAR, (0.9, 0.9, 0.9, 1))\n glEnable(GL_LIGHT0)\n\n ## Set up material attributes\n glEnable(GL_COLOR_MATERIAL)\n glMaterial(GL_FRONT, GL_SHININESS, 20)\n glMaterial(GL_FRONT, GL_DIFFUSE, (0.8, 0.8, 0.8, 1))\n glMaterial(GL_FRONT, GL_SPECULAR, (0.8, 0.8, 0.8, 1))\n glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE)\n\n ##Set up Camera\n glEnable(GL_NORMALIZE)\n\n ##set up the atoms\n for atom in self.atoms:\n coord = atom.get_coord() - self.origin\n at = self.make_atom(atom)\n self.primAtoms.append((atom, at, coord))\n\n self.make_images()\n self.make_posximages()\n self.make_bonds()\n self.make_cells()\n\n\n def select_event(self):\n glDisable(GL_LIGHTING)\n glDisable(GL_LIGHT0)\n glDisable(GL_COLOR_MATERIAL)\n glShadeModel(GL_FLAT)\n glPolygonMode(GL_FRONT, GL_FILL)\n glLoadIdentity()\n #glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glPushMatrix()\n glScaled(self.scaling, self.scaling, self.scaling)\n for image in self.xPosImages:\n for atom, position,color in zip(image[0], image[2],image[3]):\n position1 = np.dot(np.reshape(position, (1, 3)), self.axes)\n self.draw_simple_atom(position1[0, 0], position1[0, 1], position1[0, 2],color,atom.get_cova()*ATOM_SCALE)\n for image in self.xNegImages:\n for atom, position, color in zip(image[0], image[2], image[3]):\n position1 = np.dot(np.reshape(position, (1, 3)), self.axes)\n self.draw_simple_atom(position1[0, 0], position1[0, 1], position1[0, 2], color,\n atom.get_cova() * ATOM_SCALE)\n for image in self.yPosImages:\n for atom, position, color in zip(image[0], image[2], image[3]):\n position1 = np.dot(np.reshape(position, (1, 3)), self.axes)\n self.draw_simple_atom(position1[0, 0], position1[0, 1], position1[0, 2], color,\n atom.get_cova() * ATOM_SCALE)\n for image in self.yNegImages:\n for atom, position, color in zip(image[0], image[2], image[3]):\n position1 = np.dot(np.reshape(position, (1, 3)), self.axes)\n self.draw_simple_atom(position1[0, 0], position1[0, 1], position1[0, 2], color,\n atom.get_cova() * ATOM_SCALE)\n for image in self.zPosImages:\n for atom, position, color in zip(image[0], image[2], image[3]):\n position1 = np.dot(np.reshape(position, (1, 3)), self.axes)\n self.draw_simple_atom(position1[0, 0], position1[0, 1], position1[0, 2], color,\n atom.get_cova() * ATOM_SCALE)\n for image in self.zNegImages:\n for atom, position, color in zip(image[0], image[2], image[3]):\n position1 = np.dot(np.reshape(position, (1, 3)), self.axes)\n self.draw_simple_atom(position1[0, 0], position1[0, 1], position1[0, 2], color,\n atom.get_cova() * ATOM_SCALE)\n\n glReadBuffer(GL_FRONT)\n print(glReadPixels(self.lastPos.x(), self.lastPos.y(), 1, 1, GL_RGB, GL_FLOAT))\n glPopMatrix()\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n glEnable(GL_COLOR_MATERIAL)\n glShadeModel(GL_SMOOTH)\n glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)\n self.updateGL()\n\n def make_images(self):\n for atom in self.primAtoms:\n self.secAtoms.append((atom[0], atom[2] + self.cell[0]))\n self.secAtoms.append((atom[0], atom[2] - self.cell[0]))\n self.secAtoms.append((atom[0], atom[2] + self.cell[1]))\n self.secAtoms.append((atom[0], atom[2] - self.cell[1]))\n self.secAtoms.append((atom[0], atom[2] + self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] - self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] + self.cell[0] + self.cell[1]))\n self.secAtoms.append((atom[0], atom[2] + self.cell[0] + self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] - self.cell[0] + self.cell[1]))\n self.secAtoms.append((atom[0], atom[2] - self.cell[0] + self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] + self.cell[0] - self.cell[1]))\n self.secAtoms.append((atom[0], atom[2] + self.cell[0] - self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] - self.cell[0] - self.cell[1]))\n self.secAtoms.append((atom[0], atom[2] - self.cell[0] - self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] + self.cell[1] + self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] - self.cell[1] + self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] + self.cell[1] - self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] - self.cell[1] - self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] + self.cell[0] + self.cell[1] + self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] - self.cell[0] + self.cell[1] + self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] + self.cell[0] - self.cell[1] + self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] + self.cell[0] + self.cell[1] - self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] + self.cell[0] - self.cell[1] - self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] - self.cell[0] + self.cell[1] - self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] - self.cell[0] - self.cell[1] + self.cell[2]))\n self.secAtoms.append((atom[0], atom[2] - self.cell[0] - self.cell[1] - self.cell[2]))\n\n def paintGL(self):\n try:\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n except:\n pass\n\n glPushMatrix()\n\n glScaled(self.scaling, self.scaling, self.scaling)\n ##Draw atoms, cell and bonds\n try:\n #t1 = time.time()\n for image in self.xPosImages:\n for atom, position in zip(image[1], image[2]):\n position1 = np.dot(np.reshape(position, (1, 3)), self.axes)\n self.draw_atom(atom, position1[0, 0], position1[0, 1], position1[0, 2])\n for image in self.xNegImages:\n\n for atom, position in zip(image[1], image[2]):\n position1 = np.dot(np.reshape(position, (1, 3)), self.axes)\n self.draw_atom(atom, position1[0, 0], position1[0, 1], position1[0, 2])\n for image in self.yPosImages:\n\n for atom, position in zip(image[1], image[2]):\n position1 = np.dot(np.reshape(position, (1, 3)), self.axes)\n self.draw_atom(atom, position1[0, 0], position1[0, 1], position1[0, 2])\n for image in self.yNegImages:\n\n for atom, position in zip(image[1], image[2]):\n position1 = np.dot(np.reshape(position, (1, 3)), self.axes)\n self.draw_atom(atom, position1[0, 0], position1[0, 1], position1[0, 2])\n for image in self.zPosImages:\n for atom, position in zip(image[1], image[2]):\n position1 = np.dot(np.reshape(position, (1, 3)), self.axes)\n self.draw_atom(atom, position1[0, 0], position1[0, 1], position1[0, 2])\n for image in self.zNegImages:\n for atom, position in zip(image[1], image[2]):\n position1 = np.dot(np.reshape(position, (1, 3)), self.axes)\n self.draw_atom(atom, position1[0, 0], position1[0, 1], position1[0, 2])\n\n #print(\"drawing atoms took: {}\".format(time.time() - t1))\n #t1 = time.time()\n openGl.draw_cells(self.glCells, self.axes)\n #print(\"drawing cells took:{}\".format(time.time() - t1))\n #t1 = time.time()\n updated_bonds = calculations.update_bonds(self.glBonds, self.axes)\n for bond in updated_bonds:\n self.draw_bond(bond[0], bond[1], bond[2], bond[3])\n\n #print('drawing bonds took: {}'.format(time.time() - t1))\n except:\n pass\n\n glPopMatrix()\n\n def make_atom(self, atom):\n genList = glGenLists(1)\n glNewList(genList, GL_COMPILE)\n\n quadratic = gluNewQuadric()\n gluQuadricNormals(quadratic, GLU_SMOOTH)\n gluQuadricTexture(quadratic, GL_TRUE)\n color = atom.get_color()\n if 'Hydrogen' in atom.get_name():\n radius = atom.get_cova() + H_RADIUS\n else:\n radius = atom.get_cova()\n\n glColor3f(color[0], color[1], color[2])\n gluSphere(quadratic, radius * ATOM_SCALE, 32, 32)\n\n glEndList()\n return genList\n\n def draw_atom(self, atom, dx, dy, dz):\n glPushMatrix()\n glTranslated(dx, dy, dz)\n glCallList(atom)\n glPopMatrix()\n\n def draw_simple_atom(self,dx,dy,dz,color,radius):\n glPushMatrix()\n glTranslated(dx,dy,dz)\n genList = glGenLists(1)\n glNewList(genList, GL_COMPILE)\n quadratic = gluNewQuadric()\n gluQuadricNormals(quadratic, GLU_FLAT)\n gluQuadricTexture(quadratic, GL_FALSE)\n glColor3f(color[0], color[1], color[2])\n gluSphere(quadratic, radius+1, 10, 10)\n glEndList()\n glCallList(genList)\n glPopMatrix()\n\n def make_cells(self):\n self.glCells=[]\n edges = ((0, 1),\n (0, 2),\n (0, 3),\n (1, 4),\n (1, 5),\n (2, 6),\n (2, 4),\n (3, 6),\n (5, 3),\n (6, 7),\n (5, 7),\n (4, 7))\n self.glCells.append((self.cellVertices, edges))\n i = 1\n while i <= self.xPosImageCounter:\n ##TODO If performance dips due to double vertices from translation do something about it\n self.glCells.append((self.cellVertices + i * np.dot(np.ones((8, 1)), np.reshape(self.cell[0],(1,3))), edges))\n i += 1\n i = 1\n while i <= self.xNegImageCounter:\n self.glCells.append((self.cellVertices - i * np.dot(np.ones((8, 1)), np.reshape(self.cell[0],(1,3))), edges))\n i += 1\n i = 1\n while i <= self.yPosImageCounter:\n for cell in self.glCells[:self.xPosImageCounter+self.xNegImageCounter+1]:\n vertices = cell[0]+i * np.dot(np.ones((8,1)),np.reshape(self.cell[1],(1,3)))\n self.glCells.append((vertices,edges))\n i += 1\n i=1\n while i <= self.yNegImageCounter:\n for cell in self.glCells[:self.xPosImageCounter+self.xNegImageCounter+1]:\n vertices = cell[0]-i * np.dot(np.ones((8,1)),np.reshape(self.cell[1],(1,3)))\n self.glCells.append((vertices,edges))\n i += 1\n i=1\n while i <= self.zPosImageCounter:\n for cell in self.glCells[:self.xPosImageCounter+self.xNegImageCounter+(self.xPosImageCounter+self.xNegImageCounter+1)*self.yPosImageCounter+(self.xPosImageCounter+self.xNegImageCounter+1)*self.yNegImageCounter+1]:\n vertices = cell[0] + i*np.dot(np.ones((8,1)),np.reshape(self.cell[2],(1,3)))\n self.glCells.append((vertices,edges))\n i += 1\n i = 1\n while i <= self.zNegImageCounter:\n for cell in self.glCells[:self.xPosImageCounter + self.xNegImageCounter + (self.xPosImageCounter+self.xNegImageCounter+1)*self.yPosImageCounter + (self.xPosImageCounter+self.xNegImageCounter+1)*self.yNegImageCounter + 1]:\n vertices = cell[0] - i * np.dot(np.ones((8, 1)), np.reshape(self.cell[2],(1,3)))\n self.glCells.append((vertices, edges))\n i += 1\n\n def draw_cells(self):\n glPushMatrix()\n glBegin(GL_LINES)\n glColor3f(10,10,10)\n cell1 = self.glCells[0]\n vertices1 = np.dot(cell1[0],self.axes)\n edges1 = cell1[1]\n for edge in edges1:\n for vertex in edge:\n glVertex3fv(vertices1[vertex])\n glEnd()\n glBegin(GL_LINES)\n glColor3f(1,1,1)\n for cell in self.glCells[1:]:\n vertices = np.dot(cell[0],self.axes)\n edges = cell[1]\n for edge in edges:\n for vertex in edge:\n glVertex3fv(vertices[vertex])\n glEnd()\n glPopMatrix()\n\n def make_bonds(self):\n self.glBonds = []\n image1 = self.xPosImages[0]\n i = 0\n atoms = image1[0]\n positions = image1[2]\n while i < len(atoms):\n atom1 = atoms[i]\n pos1 = positions[i]\n\n i1 = i + 1\n while i1 < len(atoms):\n atom2 = atoms[i1]\n pos2 = positions[i1]\n cov1 = atom1.get_cova()\n cov2 = atom2.get_cova()\n\n dist = distance.euclidean(pos1, pos2)\n if cov1 + cov2 + ATOMIC_COVA_SCALE > dist:\n color1 = atom1.get_color()\n color2 = atom2.get_color()\n self.glBonds.append((pos1, pos2, self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2, pos1, self.make_cylinder(dist / 2, color2)))\n self.translate_bonds_prim(pos1, pos2, color1, color2, dist)\n\n i1 += 1\n i += 1\n ########## intercell bonds############\n\n atoms1 = image1[0]\n positions1 = image1[2]\n for atom1, pos1 in zip(atoms1, positions1):\n for atom in self.secAtoms:\n atom2 = atom[0]\n pos2 = atom[1]\n cov1 = atom1.get_cova()\n cov2 = atom2.get_cova()\n\n dist = distance.euclidean(pos1, pos2)\n if cov1 + cov2 + ATOMIC_COVA_SCALE > dist:\n color = atom1.get_color()\n glBond = self.make_cylinder(dist / 2, color)\n self.glBonds.append((pos1, pos2, glBond))\n self.translate_bonds_sec(pos1, pos2, glBond)\n\n def translate_bonds_prim(self, pos1, pos2, color1, color2, dist):\n i2 = 1\n while i2 <= self.xPosImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0], pos2 + i2 * self.cell[0],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 + i2 * self.cell[0], pos1 + i2 * self.cell[0],\n self.make_cylinder(dist / 2, color2)))\n i3 = 1\n while i3 <= self.yPosImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0] + i3 * self.cell[1],\n pos2 + i2 * self.cell[0] + i3 * self.cell[1],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 + i2 * self.cell[0] + i3 * self.cell[1],\n pos1 + i2 * self.cell[0] + i3 * self.cell[1],\n self.make_cylinder(dist / 2, color2)))\n i4 = 1\n while i4 <= self.zPosImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0] + i3 * self.cell[1] + i4 * self.cell[2],\n pos2 + i2 * self.cell[0] + i3 * self.cell[1] + i4 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 + i2 * self.cell[0] + i3 * self.cell[1] + i4 * self.cell[2],\n pos1 + i2 * self.cell[0] + i3 * self.cell[1] + i4 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i4 += 1\n i4 = 1\n while i4 <= self.zNegImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0] + i3 * self.cell[1] - i4 * self.cell[2],\n pos2 + i2 * self.cell[0] + i3 * self.cell[1] - i4 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 + i2 * self.cell[0] + i3 * self.cell[1] - i4 * self.cell[2],\n pos1 + i2 * self.cell[0] + i3 * self.cell[1] - i4 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i4 += 1\n i3 += 1\n\n i3 = 1\n while i3 <= self.yNegImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0] - i3 * self.cell[1],\n pos2 + i2 * self.cell[0] - i3 * self.cell[1],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 + i2 * self.cell[0] - i3 * self.cell[1],\n pos1 + i2 * self.cell[0] - i3 * self.cell[1],\n self.make_cylinder(dist / 2, color2)))\n i4 = 1\n while i4 <= self.zPosImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0] - i3 * self.cell[1] + i4 * self.cell[2],\n pos2 + i2 * self.cell[0] - i3 * self.cell[1] + i4 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 + i2 * self.cell[0] - i3 * self.cell[1] + i4 * self.cell[2],\n pos1 + i2 * self.cell[0] - i3 * self.cell[1] + i4 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i4 += 1\n i4 = 1\n while i4 <= self.zNegImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0] - i3 * self.cell[1] - i4 * self.cell[2],\n pos2 + i2 * self.cell[0] - i3 * self.cell[1] - i4 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 + i2 * self.cell[0] - i3 * self.cell[1] - i4 * self.cell[2],\n pos1 + i2 * self.cell[0] - i3 * self.cell[1] - i4 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i4 += 1\n i3 += 1\n i3 = 1\n while i3 <= self.zPosImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0] + i3 * self.cell[2],\n pos2 + i2 * self.cell[0] + i3 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 + i2 * self.cell[0] + i3 * self.cell[2],\n pos1 + i2 * self.cell[0] + i3 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i3 += 1\n i3 = 1\n while i3 <= self.zNegImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0] - i3 * self.cell[2],\n pos2 + i2 * self.cell[0] - i3 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 + i2 * self.cell[0] - i3 * self.cell[2],\n pos1 + i2 * self.cell[0] - i3 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i3 += 1\n\n i2 += 1\n\n i2 = 1\n while i2 <= self.yPosImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[1], pos2 + i2 * self.cell[1],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 + i2 * self.cell[1], pos1 + i2 * self.cell[1],\n self.make_cylinder(dist / 2, color2)))\n\n i3 = 1\n while i3 <= self.zPosImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[1] + i3 * self.cell[2],\n pos2 + i2 * self.cell[1] + i3 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 + i2 * self.cell[1] + i3 * self.cell[2],\n pos1 + i2 * self.cell[1] + i3 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i3 += 1\n\n i3 = 1\n while i3 <= self.zNegImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[1] - i3 * self.cell[2],\n pos2 + i2 * self.cell[1] - i3 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 + i2 * self.cell[1] - i3 * self.cell[2],\n pos1 + i2 * self.cell[1] - i3 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i3 += 1\n i2 += 1\n\n i2 = 1\n while i2 <= self.zPosImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[2], pos2 + i2 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 + i2 * self.cell[2], pos1 + i2 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i2 += 1\n\n ########Making negative image intra cell bonds################\n i2 = 1\n while i2 <= self.xNegImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0], pos2 - i2 * self.cell[0],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 - i2 * self.cell[0], pos1 - i2 * self.cell[0],\n self.make_cylinder(dist / 2, color2)))\n i3 = 1\n while i3 <= self.yNegImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0] - i3 * self.cell[1],\n pos2 - i2 * self.cell[0] - i3 * self.cell[1],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 - i2 * self.cell[0] - i3 * self.cell[1],\n pos1 - i2 * self.cell[0] - i3 * self.cell[1],\n self.make_cylinder(dist / 2, color2)))\n i4 = 1\n while i4 <= self.zNegImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0] - i3 * self.cell[1] - i4 * self.cell[2],\n pos2 - i2 * self.cell[0] - i3 * self.cell[1] - i4 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 - i2 * self.cell[0] - i3 * self.cell[1] - i4 * self.cell[2],\n pos1 - i2 * self.cell[0] - i3 * self.cell[1] - i4 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i4 += 1\n i4 = 1\n while i4 <= self.zPosImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0] - i3 * self.cell[1] + i4 * self.cell[2],\n pos2 - i2 * self.cell[0] - i3 * self.cell[1] + i4 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 - i2 * self.cell[0] - i3 * self.cell[1] + i4 * self.cell[2],\n pos1 - i2 * self.cell[0] - i3 * self.cell[1] + i4 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i4 += 1\n i3 += 1\n\n i3 = 1\n while i3 <= self.yPosImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0] + i3 * self.cell[1],\n pos2 - i2 * self.cell[0] + i3 * self.cell[1],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 - i2 * self.cell[0] + i3 * self.cell[1],\n pos1 - i2 * self.cell[0] + i3 * self.cell[1],\n self.make_cylinder(dist / 2, color2)))\n i4 = 1\n while i4 <= self.zPosImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0] + i3 * self.cell[1] + i4 * self.cell[2],\n pos2 - i2 * self.cell[0] + i3 * self.cell[1] + i4 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 - i2 * self.cell[0] + i3 * self.cell[1] + i4 * self.cell[2],\n pos1 - i2 * self.cell[0] + i3 * self.cell[1] + i4 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i4 += 1\n i4 = 1\n while i4 <= self.zNegImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0] + i3 * self.cell[1] - i4 * self.cell[2],\n pos2 - i2 * self.cell[0] + i3 * self.cell[1] - i4 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 - i2 * self.cell[0] + i3 * self.cell[1] - i4 * self.cell[2],\n pos1 - i2 * self.cell[0] + i3 * self.cell[1] - i4 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i4 += 1\n i3 += 1\n\n i3 = 1\n while i3 <= self.zNegImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0] - i3 * self.cell[2],\n pos2 - i2 * self.cell[0] - i3 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 - i2 * self.cell[0] - i3 * self.cell[2],\n pos1 - i2 * self.cell[0] - i3 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i3 += 1\n i3 = 1\n while i3 <= self.zPosImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0] + i3 * self.cell[2],\n pos2 - i2 * self.cell[0] + i3 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 - i2 * self.cell[0] + i3 * self.cell[2],\n pos1 - i2 * self.cell[0] + i3 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i3 += 1\n\n i2 += 1\n\n i2 = 1\n while i2 <= self.yNegImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[1], pos2 - i2 * self.cell[1],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 - i2 * self.cell[1], pos1 - i2 * self.cell[1],\n self.make_cylinder(dist / 2, color2)))\n i3 = 1\n while i3 <= self.zNegImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[1] - i3 * self.cell[2],\n pos2 - i2 * self.cell[1] - i3 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 - i2 * self.cell[1] - i3 * self.cell[2],\n pos1 - i2 * self.cell[1] - i3 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i3 += 1\n i3 = 1\n while i3 <= self.zPosImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[1] + i3 * self.cell[2],\n pos2 - i2 * self.cell[1] + i3 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 - i2 * self.cell[1] + i3 * self.cell[2],\n pos1 - i2 * self.cell[1] + i3 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i3 += 1\n i2 += 1\n\n i2 = 1\n while i2 <= self.zNegImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[2], pos2 - i2 * self.cell[2],\n self.make_cylinder(dist / 2, color1)))\n self.glBonds.append((pos2 - i2 * self.cell[2], pos1 - i2 * self.cell[2],\n self.make_cylinder(dist / 2, color2)))\n i2 += 1\n\n def translate_bonds_sec(self, pos1, pos2, bond):\n i2 = 1\n while i2 <= self.xPosImageCounter:\n\n self.glBonds.append((pos1 + i2 * self.cell[0], pos2 + i2 * self.cell[0], bond))\n\n i3 = 1\n while i3 <= self.yPosImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0] + i3 * self.cell[1],\n pos2 + i2 * self.cell[0] + i3 * self.cell[1],\n bond))\n\n i4 = 1\n while i4 <= self.zPosImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0] + i3 * self.cell[1] + i4 * self.cell[2],\n pos2 + i2 * self.cell[0] + i3 * self.cell[1] + i4 * self.cell[2],\n bond))\n\n i4 += 1\n i4 = 1\n while i4 <= self.zNegImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0] + i3 * self.cell[1] - i4 * self.cell[2],\n pos2 + i2 * self.cell[0] + i3 * self.cell[1] - i4 * self.cell[2],\n bond))\n\n i4 += 1\n i3 += 1\n\n i3 = 1\n while i3 <= self.yNegImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0] - i3 * self.cell[1],\n pos2 + i2 * self.cell[0] - i3 * self.cell[1],\n bond))\n\n i4 = 1\n while i4 <= self.zPosImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0] - i3 * self.cell[1] + i4 * self.cell[2],\n pos2 + i2 * self.cell[0] - i3 * self.cell[1] + i4 * self.cell[2],\n bond))\n\n i4 += 1\n i4 = 1\n while i4 <= self.zNegImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0] - i3 * self.cell[1] - i4 * self.cell[2],\n pos2 + i2 * self.cell[0] - i3 * self.cell[1] - i4 * self.cell[2],\n bond))\n\n i4 += 1\n i3 += 1\n i3 = 1\n while i3 <= self.zPosImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0] + i3 * self.cell[2],\n pos2 + i2 * self.cell[0] + i3 * self.cell[2],\n bond))\n\n i3 += 1\n i3 = 1\n while i3 <= self.zNegImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[0] - i3 * self.cell[2],\n pos2 + i2 * self.cell[0] - i3 * self.cell[2],\n bond))\n\n i3 += 1\n\n i2 += 1\n\n i2 = 1\n while i2 <= self.yPosImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[1], pos2 + i2 * self.cell[1],\n bond))\n\n i3 = 1\n while i3 <= self.zPosImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[1] + i3 * self.cell[2],\n pos2 + i2 * self.cell[1] + i3 * self.cell[2],\n bond))\n\n i3 += 1\n\n i3 = 1\n while i3 <= self.zNegImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[1] - i3 * self.cell[2],\n pos2 + i2 * self.cell[1] - i3 * self.cell[2],\n bond))\n\n i3 += 1\n i2 += 1\n\n i2 = 1\n while i2 <= self.zPosImageCounter:\n self.glBonds.append((pos1 + i2 * self.cell[2], pos2 + i2 * self.cell[2],\n bond))\n\n i2 += 1\n\n ########Making negative image inter cell bonds################\n i2 = 1\n while i2 <= self.xNegImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0], pos2 - i2 * self.cell[0],\n bond))\n\n i3 = 1\n while i3 <= self.yNegImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0] - i3 * self.cell[1],\n pos2 - i2 * self.cell[0] - i3 * self.cell[1],\n bond))\n\n i4 = 1\n while i4 <= self.zNegImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0] - i3 * self.cell[1] - i4 * self.cell[2],\n pos2 - i2 * self.cell[0] - i3 * self.cell[1] - i4 * self.cell[2],\n bond))\n\n i4 += 1\n i4 = 1\n while i4 <= self.zPosImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0] - i3 * self.cell[1] + i4 * self.cell[2],\n pos2 - i2 * self.cell[0] - i3 * self.cell[1] + i4 * self.cell[2],\n bond))\n\n i4 += 1\n i3 += 1\n\n i3 = 1\n while i3 <= self.yPosImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0] + i3 * self.cell[1],\n pos2 - i2 * self.cell[0] + i3 * self.cell[1],\n bond))\n\n i4 = 1\n while i4 <= self.zPosImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0] + i3 * self.cell[1] + i4 * self.cell[2],\n pos2 - i2 * self.cell[0] + i3 * self.cell[1] + i4 * self.cell[2],\n bond))\n\n i4 += 1\n i4 = 1\n while i4 <= self.zNegImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0] + i3 * self.cell[1] - i4 * self.cell[2],\n pos2 - i2 * self.cell[0] + i3 * self.cell[1] - i4 * self.cell[2],\n bond))\n\n i4 += 1\n i3 += 1\n\n i3 = 1\n while i3 <= self.zNegImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0] - i3 * self.cell[2],\n pos2 - i2 * self.cell[0] - i3 * self.cell[2],\n bond))\n\n i3 += 1\n i3 = 1\n while i3 <= self.zPosImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[0] + i3 * self.cell[2],\n pos2 - i2 * self.cell[0] + i3 * self.cell[2],\n bond))\n\n i3 += 1\n\n i2 += 1\n\n i2 = 1\n while i2 <= self.yNegImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[1], pos2 - i2 * self.cell[1],\n bond))\n\n i3 = 1\n while i3 <= self.zNegImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[1] - i3 * self.cell[2],\n pos2 - i2 * self.cell[1] - i3 * self.cell[2],\n bond))\n\n i3 += 1\n i3 = 1\n while i3 <= self.zPosImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[1] + i3 * self.cell[2],\n pos2 - i2 * self.cell[1] + i3 * self.cell[2],\n bond))\n\n i3 += 1\n i2 += 1\n\n i2 = 1\n while i2 <= self.zNegImageCounter:\n self.glBonds.append((pos1 - i2 * self.cell[2], pos2 - i2 * self.cell[2],\n bond))\n\n i2 += 1\n\n def draw_bond(self, coord1, angle, rotax, bond):\n\n glPushMatrix()\n\n glTranslatef(coord1[0], coord1[1], coord1[2])\n glRotatef(angle, rotax[0], rotax[1], rotax[2])\n glCallList(bond)\n glPopMatrix()\n\n def make_cylinder(self, height, color):\n genList = glGenLists(1)\n glNewList(genList, GL_COMPILE)\n\n quadratic = gluNewQuadric()\n glColor3f(color[0], color[1], color[2])\n gluQuadricNormals(quadratic, GLU_FLAT)\n gluQuadricTexture(quadratic, GL_TRUE)\n gluCylinder(quadratic, CYLINDER_RADIUS, CYLINDER_RADIUS, height, 20, 20)\n\n glEndList()\n return genList\n\n def set_posximage_counter(self, amount):\n self.xPosImageCounter = amount\n self.make_posximages()\n self.make_posyimages()\n self.make_negyimages()\n self.make_poszimages()\n self.make_negzimages()\n self.make_bonds()\n self.make_cells()\n self.updateGL()\n\n def make_posximages(self):\n self.xPosImages = []\n i = 0\n while i <= self.xPosImageCounter:\n atomList = []\n glAtomList = []\n posList = []\n idList = []\n for atom in self.primAtoms:\n atomList.append(atom[0])\n glAtomList.append(atom[1])\n posList.append(atom[2] + i * self.cell[0])\n idList.append((i/256,0,0))\n self.xPosImages.append((atomList, glAtomList, posList,idList))\n\n i += 1\n\n def set_negximage_counter(self, amount):\n self.xNegImageCounter = amount\n self.make_negximages()\n self.make_posyimages()\n self.make_negyimages()\n self.make_poszimages()\n self.make_negzimages()\n self.make_bonds()\n self.make_cells()\n self.updateGL()\n\n def make_negximages(self):\n self.xNegImages = []\n i = 1\n while i <= self.xNegImageCounter:\n atomList = []\n glAtomList = []\n posList = []\n idList = []\n for atom in self.primAtoms:\n atomList.append(atom[0])\n glAtomList.append(atom[1])\n posList.append(atom[2] - i * self.cell[0])\n idList.append(((127+i)/256,0,0))\n self.xNegImages.append((atomList, glAtomList, posList,idList))\n i += 1\n\n def set_posyimage_counter(self, amount):\n self.yPosImageCounter = amount\n self.make_posyimages()\n self.make_poszimages()\n self.make_negzimages()\n self.make_bonds()\n self.make_cells()\n self.updateGL()\n\n def make_posyimages(self):\n self.yPosImages = []\n i = 1\n while i <= self.yPosImageCounter:\n atomList = []\n glAtomList = []\n posList = []\n idList = []\n for image in self.xPosImages:\n for atom, glAtom, position,id in zip(image[0], image[1], image[2], image[3]):\n atomList.append(atom)\n glAtomList.append(glAtom)\n posList.append(position + i * self.cell[1])\n idList.append((id[0],i/256,0))\n for image in self.xNegImages:\n for atom, glAtom, position,id in zip(image[0], image[1], image[2], image[3]):\n atomList.append(atom)\n glAtomList.append(glAtom)\n posList.append(position + i * self.cell[1])\n idList.append((id[0], i/256, 0))\n self.yPosImages.append((atomList, glAtomList, posList,idList))\n i += 1\n\n def set_negyimage_counter(self, amount):\n self.yNegImageCounter = amount\n self.make_negyimages()\n self.make_poszimages()\n self.make_negzimages()\n self.make_bonds()\n self.make_cells()\n self.updateGL()\n\n def make_negyimages(self):\n self.yNegImages = []\n i = 1\n while i <= self.yNegImageCounter:\n atomList = []\n glAtomList = []\n posList = []\n idList = []\n for image in self.xPosImages:\n for atom, glAtom, position, id in zip(image[0], image[1], image[2], image[3]):\n atomList.append(atom)\n glAtomList.append(glAtom)\n posList.append(position - i * self.cell[1])\n idList.append((id[0],(127+i)/256,0))\n for image in self.xNegImages:\n for atom, glAtom, position, id in zip(image[0], image[1], image[2], image[3]):\n atomList.append(atom)\n glAtomList.append(glAtom)\n posList.append(position - i * self.cell[1])\n idList.append((id[0],(127+i)/256,0))\n self.yNegImages.append((atomList, glAtomList, posList, idList))\n i += 1\n\n def set_poszimage_counter(self, amount):\n self.zPosImageCounter = amount\n self.make_poszimages()\n self.make_bonds()\n self.make_cells()\n self.updateGL()\n\n def make_poszimages(self):\n self.zPosImages = []\n i = 1\n while i <= self.zPosImageCounter:\n atomList = []\n glAtomList = []\n posList = []\n idList=[]\n for image in self.xPosImages:\n for atom, glAtom, position, id in zip(image[0], image[1], image[2], image[3]):\n atomList.append(atom)\n glAtomList.append(glAtom)\n posList.append(position + i * self.cell[2])\n idList.append((id[0],id[1],i/256))\n for image in self.xNegImages:\n for atom, glAtom, position, id in zip(image[0], image[1], image[2], image[3]):\n atomList.append(atom)\n glAtomList.append(glAtom)\n posList.append(position + i * self.cell[2])\n idList.append((id[0], id[1], i/256))\n for image in self.yPosImages:\n for atom, glAtom, position, id in zip(image[0], image[1], image[2], image[3]):\n atomList.append(atom)\n glAtomList.append(glAtom)\n posList.append(position + i * self.cell[2])\n idList.append((id[0], id[1], i/256))\n for image in self.yNegImages:\n for atom, glAtom, position, id in zip(image[0], image[1], image[2], image[3]):\n atomList.append(atom)\n glAtomList.append(glAtom)\n posList.append(position + i * self.cell[2])\n idList.append((id[0], id[1], i/256))\n self.zPosImages.append((atomList, glAtomList, posList, idList))\n i += 1\n\n def set_negzimage_counter(self, amount):\n self.zNegImageCounter = amount\n self.make_negzimages()\n self.make_bonds()\n self.make_cells()\n self.updateGL()\n\n def make_negzimages(self):\n self.zNegImages = []\n i = 1\n while i <= self.zNegImageCounter:\n atomList = []\n glAtomList = []\n posList = []\n idList = []\n for image in self.xPosImages:\n for atom, glAtom, position, id in zip(image[0], image[1], image[2], image[3]):\n atomList.append(atom)\n glAtomList.append(glAtom)\n posList.append(position - i * self.cell[2])\n idList.append((id[0],id[1],(127+i)/256))\n for image in self.xNegImages:\n for atom, glAtom, position, id in zip(image[0], image[1], image[2], image[3]):\n atomList.append(atom)\n glAtomList.append(glAtom)\n posList.append(position - i * self.cell[2])\n idList.append((id[0], id[1], (127+i)/256))\n for image in self.yPosImages:\n for atom, glAtom, position, id in zip(image[0], image[1], image[2], image[3]):\n atomList.append(atom)\n glAtomList.append(glAtom)\n posList.append(position - i * self.cell[2])\n idList.append((id[0], id[1], (127+i)/256))\n for image in self.yNegImages:\n for atom, glAtom, position, id in zip(image[0], image[1], image[2], image[3]):\n atomList.append(atom)\n glAtomList.append(glAtom)\n posList.append(position - i * self.cell[2])\n idList.append((id[0], id[1], (127+i)/256))\n self.zNegImages.append((atomList, glAtomList, posList, idList))\n i += 1\n\n def get_vertices(self,atom,pos,q):\n i = 0\n radius = atom.get_cova()*ATOM_SCALE\n vertices = [pos[0],pos[2],pos[1]]\n while i < 360:\n if i ==0:\n a = 0\n else:\n a = pi/180 * 360/i\n vertices.append(radius*cos(a)+pos[0])\n vertices.append(pos[2])\n vertices.append(radius*sin(a)+pos[1])\n i+=360/q\n return vertices\n\n def select_atom(self):\n windowSize = (self.size().width(),self.size().height())\n print(windowSize,self.lastPos.x(),self.lastPos.y())\n x = (self.lastPos.x()-windowSize[0]/2-30*(self.scaling-1))/24.04\n z = (-self.lastPos.y()+windowSize[1]/2-45*(self.scaling-1))/17.1\n print(x,z)\n for image in self.xPosImages:\n for atom , pos in zip(image[0],image[2]):\n glPos = np.dot(np.reshape(pos,(1,3)),self.axes)\n print(glPos)\n if x < glPos[0,0]+atom.get_cova()*ATOM_SCALE/2 and x> glPos[0,0]-atom.get_cova()*ATOM_SCALE/2 and z< glPos[0,2]+atom.get_cova()*ATOM_SCALE/2 and z>glPos[0,2]-atom.get_cova()*ATOM_SCALE/2:\n print('ping',atom.get_name())\n\n\ndef run():\n global app\n app = QtWidgets.QApplication(sys.argv)\n GUI = Window()\n GUI.show()\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":63747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"121783686","text":"# Solution to https://www.hackerrank.com/challenges/list-comprehensions/\n\ndef myfunction(x, y, z, n):\n myarray = []\n for i in range(0, x):\n for j in range(0, y):\n for k in range(0, z):\n if i + j + k != n:\n myarray.append([i, j, k])\n print(myarray)\n\n\nif __name__ == '__main__':\n x = int(input()) + 1\n y = int(input()) + 1\n z = int(input()) + 1\n n = int(input())\n myfunction(x, y, z, n)\n","sub_path":"basic/ListComprehensions.py","file_name":"ListComprehensions.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"536527307","text":"from fractions import Fraction\r\nfrom math import gcd\r\nimport math\r\n\r\nt = 1\r\nN = t*t + t + 4\r\nXp = -4*(t*t+t+1)*(t*t+t+1)\r\nYp = 4*(2*t+1)*(t*t+t+1)*(3*t*t+3*t+7)\r\n\r\nXq = 0\r\nYq = 0\r\n\r\na3 = 1\r\na2 = 4*N*N + 12*N - 3\r\na1 = 32*(N+3)\r\n\r\n# y^2 = a3*x^3 + a2*x^2 + a1*x\r\n# Given two points P(Xp, Yp) and Q(Xq, Yq) on the graph\r\n# M() calculates the slope of PQ\r\ndef M(Xp, Yp, Xq, Yq, a2, a1):\r\n if Xp == Xq and Yp == Yq:\r\n f1 = 3*Xp*Xp + 2*a2*Xp + a1\r\n f2 = 2*Yp\r\n return Fraction(f1, f2)\r\n else:\r\n f1 = Yq - Yp\r\n f2 = Xq - Xp\r\n return Fraction(f1, f2)\r\n\r\n# y^2 = a3*x^3 + a2*x^2 + a1*x\r\n# Given two points P(Xp, Yp) and Q(Xq, Yq) on the graph\r\n# R() returns the point R=p+Q and the corresponding (a,b,c) of R point\r\ndef R(Xp, Yp, Xq, YQ, a2, a1):\r\n m = M(Xp, Yp, Xq, Yq, a2, a1)\r\n Xr = m*m - a2 - Xp - Xq\r\n Yr = m*(Xp-Xr) - Yp\r\n\r\n a = Fraction(8*(N+3)-Xr+Yr, 2*(4-Xr)*(N+3))\r\n b = Fraction(8*(N+3)-Xr-Yr, 2*(4-Xr)*(N+3))\r\n c = Fraction(-4*(N+3)-(N+2)*Xr, (4-Xr)*(N+3))\r\n\r\n return a,b,c,Xr,Yr\r\n\r\n\r\nround = 1\r\nwhile True:\r\n round = round+1\r\n #print(\"round =\", round)\r\n a, b, c, Xr, Yr = R(Xp, Yp, Xq, Yq, a2, a1)\r\n if (a>0) and (b>0) and (c>0):\r\n break\r\n else:\r\n Xq = Xr\r\n Yq = Yr\r\n\r\ndef verify(a,b,c,Xr,Yr):\r\n print(\"Xr =\", Xr)\r\n print(\"Yr =\", Yr)\r\n print(\"\")\r\n\r\n print(\"Verify 0:\")\r\n v1 = N*(a+b)*(b+c)*(c+a)\r\n v2 = a*(a+b)*(a+c) + b*(b+a)*(b+c) + c*(c+a)*(c+b)\r\n print(\"v1 =\", v1)\r\n print(\"v2 =\", v2)\r\n print(\"\")\r\n\r\nGCD1 = gcd(a.denominator, b.denominator)\r\nLCM1 = a.denominator * b.denominator // GCD1\r\nGCD = gcd(LCM1, c.denominator)\r\nLCM = LCM1 * c.denominator // GCD\r\na = a*LCM\r\nb = b*LCM\r\nc = c*LCM\r\n#verify(a,b,c,Xr,Yr)\r\nsum = a+b+c\r\n#print(sum)\r\n\r\nn = input()\r\ndiv = 1000 ** int(n)\r\nout = sum//div % 1000\r\n\r\nprint(\"%03d\" % out)","sub_path":"2019SampleSolutions/06永恆的力量 Eternal Power.py","file_name":"06永恆的力量 Eternal Power.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"34658714","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\n\nfrom .base import *\n\nSECRET_KEY = 'SECRET_KEY'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), # ':memory:',\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'pyreader_default_cache',\n },\n 'session': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'pyreader_session_cache',\n }\n}\n","sub_path":"pyreader/pyreader/settings/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"216624055","text":"# MIT License\n#\n# Copyright (c) 2018-2019 Red Hat, Inc.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport inspect\nimport logging\nimport shlex\nimport subprocess\nfrom enum import Enum\nfrom pathlib import Path\n\nfrom _pytest.python import Function\n\nfrom requre.constants import RELATIVE_TEST_DATA_DIRECTORY, DEFAULT_SUFIX\nfrom requre.exceptions import PersistentStorageException\n\nlogger = logging.getLogger(__name__)\n\n\nclass StorageMode(Enum):\n default = 0\n read = 1\n write = 2\n append = 3\n\n\ndef run_command(cmd, error_message=None, cwd=None, fail=True, output=False):\n \"\"\"\n subprocess wrapper, copied from packit, for higher level handling of executiong commands,\n :param cmd:\n :param error_message:\n :param cwd:\n :param fail:\n :param output:\n :return:\n \"\"\"\n if not isinstance(cmd, list):\n cmd = shlex.split(cmd)\n\n logger.debug(\"cmd = '%s'\", \" \".join(cmd))\n\n cwd = cwd or str(Path.cwd())\n error_message = error_message or f\"Command {cmd} failed.\"\n\n shell = subprocess.run(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=False,\n cwd=cwd,\n universal_newlines=True,\n )\n\n if not output:\n # output is returned, let the caller process it\n logger.debug(\"%s\", shell.stdout)\n stderr = shell.stderr.strip()\n if stderr:\n logger.error(\"%s\", shell.stderr)\n\n if shell.returncode != 0:\n logger.error(\"Command %s failed\", shell.args)\n logger.error(\"%s\", error_message)\n if fail:\n raise PersistentStorageException(\n f\"Command {shell.args!r} failed: {error_message}\"\n )\n success = False\n else:\n success = True\n\n if not output:\n return success\n return shell.stdout\n\n\nclass Replacement:\n def __init__(self, name, key, parent, one_filter, replacement) -> None:\n self.name = name\n self.key = key\n self.parent = parent\n self.filter = one_filter\n self.replacement = replacement\n\n\ndef get_module_of_previous_context():\n current_ctx = inspect.currentframe()\n while True:\n current_ctx = current_ctx.f_back\n frameinfo_args = (current_ctx,) + inspect.getframeinfo(current_ctx, 1)\n frameinfo = inspect.FrameInfo(*frameinfo_args)\n module = inspect.getmodule(frameinfo[0])\n if module and not module.__name__.startswith(\"requre\"):\n return module\n\n\ndef get_class_that_defined_method(meth):\n \"\"\"\n return class for given method meth\n\n :param meth: method where we would like to find class, where is methond defined\n :returns: class or None\n \"\"\"\n # https://stackoverflow.com/questions/961048/get-class-that-defined-method\n for cls in inspect.getmro(meth.im_class):\n if meth.__name__ in cls.__dict__:\n return cls\n return None\n\n\ndef get_datafile_filename(obj, suffix=DEFAULT_SUFIX):\n \"\"\"\n get default path for data files.\n It consist of 3 pieces: \"location of test\"/test_data/test_file_name/\"test_id or function name\"\n\n :param obj: object from which try to guess name of file and functioon\n :return: str with path where to store data_file\n \"\"\"\n\n try:\n if isinstance(obj, Function):\n # pytest fixture\n current_fn_file_name = obj.module.__file__\n else:\n # try to get filename via class if possible (pytest way)\n current_fn_file_name = inspect.getfile(obj.__class__)\n except (AttributeError, TypeError):\n # try to get filename from object\n current_fn_file_name = inspect.getfile(obj)\n\n real_path_dir = Path(current_fn_file_name).parent.absolute()\n test_file_name = Path(current_fn_file_name).name.rsplit(\".\", 1)[0]\n try:\n # try to use object.id() function (it is defined inside pytest unittests)\n test_name = obj.id()\n except AttributeError:\n try:\n if isinstance(obj, Function):\n # pytest fixture\n test_name = obj.name\n else:\n # try to use __name__ of the object (typically name of function)\n test_name = obj.__name__\n except AttributeError:\n # if not possible, use this name as name of data file\n test_name = \"static_test_data_name\"\n testdata_dirname = real_path_dir / RELATIVE_TEST_DATA_DIRECTORY / test_file_name\n\n return testdata_dirname / f\"{test_name}.{suffix}\"\n","sub_path":"requre/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"411209635","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport _pickle as pickle\n\nbaseFilename = \"cifar-10-batches-py/data_batch_\"\ndata = []\nlabels = []\nfilenames = []\n\ndef loadCIFAR10():\n for i in range(1,2):\n with open(baseFilename + str(i), \"rb\") as f:\n out = pickle.load(f, encoding=\"latin1\")\n data.extend(out[\"data\"])\n filenames.extend(out[\"filenames\"])\n global labels\n for label in out[\"labels\"]:\n z = [0]*10\n z[label] = 1\n labels.append(z)\n\ndef sig(x): return 1/(1+np.exp(-x))\ndef dSig(x): return x*(1-x)\n\ndef relu(x): return np.maximum(0, x)\ndef drelu(x): return np.minimum((np.maximum(0, x)), 1)\n\ndef softmax(x): return np.exp(x)/np.sum(np.exp(x))\n\ndef main():\n loadCIFAR10()\n\n # Output Labels (50, 10)\n y = labels[0:1000]\n\n # Input Images (50, 3072)\n x = data[0:1000]\n\n # Center on 0 (max pixel value is 255)\n x = np.subtract(x, 128.)\n\n # Normalize data (max centered value is 128)\n x = np.divide(x, 128.)\n\n # Create small, random weights\n W1 = (2.*(np.random.rand(3072, 10)) - 1.) * 1e-2\n\n print(\"W1:\", W1)\n \n for i in range(1000):\n # Multiply by weights\n H1 = np.matmul(x, W1)\n\n # Rectify\n H1 = sig(H1)\n # H1 = relu(H1)\n\n # Create mask (inverted dropout)\n # M = (np.random.rand(*H1.shape) < p) / p\n # H1 = np.multiply(H1, M)\n\n # Find Error\n err = np.subtract(y, H1)\n\n # Calculate delta\n delta = err * dSig(H1) * 1e-2\n # delta = err * drelu(H1) * 1e-5\n\n W1 += np.dot(x.T, delta)\n \n if i%100 is 0:\n print(i, \":\", H1[0])\n\n np.set_printoptions(formatter={'float': lambda x: \"{0:0.3f}\".format(x)})\n print(\"final:\", H1[0])\n print(\"actual:\", y[0])\n\n print(\"final:\", H1[100])\n print(\"actual:\", y[100])\n\n ind = 0\n out = np.matmul(np.divide(np.subtract(data[ind], 128), 128), W1)\n print(out)\n print(softmax(out))\n print(labels[ind])\n\n # np.isclose()\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"cifar-10/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"188149971","text":"import RPi.GPIO as GPIO\nimport time\nimport threading\n\nclass SonicSonar(threading.Thread):\n soundSpeed = 17150.0\n\n def __init__(self, threadId, name, trigPin, echoPin, settleTime = 0.1):\n threading.Thread.__init__(self)\n self.threadId = threadId\n self.name = name\n self.trigPin = trigPin\n self.echoPin = echoPin\n self.settleTime = settleTime\n self.lastDistance = None\n\n def run(self):\n print(\"Distance Measurement In Progress\")\n self.loop = True\n GPIO.setup(self.trigPin,GPIO.OUT) \n GPIO.setup(self.echoPin,GPIO.IN)\n GPIO.output(self.trigPin, False)\n self.sonarLoop()\n print(\"Distance Measurement ended\")\n\n def stop(self):\n self.loop = False\n\n def getDistance(self):\n return self.lastDistance\n\n def sonarLoop(self):\n while self.loop:\n time.sleep(self.settleTime)\n GPIO.output(self.trigPin, True)\n time.sleep(0.00001)\n GPIO.output(self.trigPin, False)\n\n pulse_start = time.time()\n\n while GPIO.input(self.echoPin)==0:\n pulse_start = time.time()\n\n while GPIO.input(self.echoPin)==1:\n pulse_end = time.time()\n\n pulse_duration = pulse_end - pulse_start\n distance = pulse_duration * self.soundSpeed\n self.lastDistance = round(distance, 2)\n\n\nif __name__ == \"__main__\":\n GPIO.setmode(GPIO.BCM)\n TRIG = 28\n ECHO = 30\n sonarThread = SonicSonar(1, \"Front Sonar\", TRIG, ECHO)\n\n try :\n sonarThread.start()\n\n while True:\n distance = sonarThread.getDistance()\n print (\"Distance: \",distance,\"cm\")\n time.sleep(0.5)\n except:\n sonarThread.stop()\n sonarThread.join()\n finally:\n GPIO.cleanup()\n\n\n\n","sub_path":"Sources/sonicSonarTests/sonicSonar.py","file_name":"sonicSonar.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"479194147","text":"#Given a sorted array, remove the duplicates in place\r\n#such that each element can appear atmost twice and return the new length.\r\nclass Solution:\r\n # @param A : list of integers\r\n # @return an integer\r\n def removeDuplicates(self, A):\r\n i=0\r\n j=0\r\n n=len(A)\r\n while i=1:\r\n A[j]=t\r\n j=j+1\r\n A[j]=t\r\n j=j+1\r\n\r\n i=i+1\r\n\r\n return j\r\n\r\n\r\n#Given a sorted array, remove the duplicates in place\r\n#such that each element appears only once and return the new length.\r\nclass Solution:\r\n def removeDuplicates(self, nums: List[int]) -> int:\r\n i=0\r\n j=0\r\n while i=1:\r\n nums[j]=t\r\n j=j+1\r\n\r\n i=i+1\r\n\r\n return j\r\n","sub_path":"ZZZ/Remove_Duplicates.py","file_name":"Remove_Duplicates.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"594581106","text":"#!/usr/bin/env python3\n\ndef f1():\n X = 88\n def f2():\n print(X)\n return f2\n\nprint(f1)\n#Вызываем функци f1 и возвращаем функцию f2\naction = f1()\n\nprint(action)\n#Вызываем подфункцию f2\naction()\n\n\n# Страница 492\ndef makeActions():\n acts = []\n for i in range(5):\n acts.append(lambda x: i ** x)\n return acts\n\nacts = makeActions()\nprint(acts[0])\n\n#Поиск в объемлющей области видимости (enclose)\n#происходит только при вызове, а не в цикле функции\n#поэтому у всех будет последнее значение i\nprint(acts[0](2))\nprint(acts[2](2))\n\n#Для решения этой ситуации нужно задать во \n# вложенной фукции значение по умолчанию\ndef makeActions():\n acts = []\n for i in range(5):\n #i=i задаёт значение по умолчанию\n acts.append(lambda x, i=i: i ** x)\n return acts\n\nacts = makeActions()\nprint(acts[0])\n\nprint(acts[0](2))\nprint(acts[2](2))\n\n","sub_path":"script-09.py","file_name":"script-09.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"142130661","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nfrom scipy import ndimage\nimport random\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n\n# In[77]:\n\n\nfrom google.colab import files\nuploaded = files.upload()\n\n\n# In[ ]:\n\n\nimgnames = ['x_satImage_088.png']\nx_train = np.array([np.asarray(cv2.imread(imgname)) for imgname in imgnames], dtype=np.float32)\nimgnames = ['y_satImage_088.png']\ny_train = np.array([np.asarray(cv2.imread(imgname)) for imgname in imgnames], dtype=np.float32)\nimgx = x_train[0]\nimgy = y_train[0]\n\n\n# In[81]:\n\n\nplt.subplot(121)\nplt.imshow(imgx.astype(int))\nplt.subplot(122)\nplt.imshow(imgy.astype(int))\nplt.show()\n\n\n# In[89]:\n\n\nsmallx = cv2.resize(imgx, dsize=(263, 263), interpolation=cv2.INTER_CUBIC)\nresizedx = cv2.resize(smallx, dsize=(400, 400), interpolation=cv2.INTER_CUBIC)\nsmally = cv2.resize(imgy, dsize=(263, 263), interpolation=cv2.INTER_CUBIC)\nresizedy = cv2.resize(smally, dsize=(400, 400), interpolation=cv2.INTER_CUBIC)\nplt.subplot(121)\nplt.imshow(resizedx.astype(int))\nplt.subplot(122)\nplt.imshow(resizedy.astype(int))\nplt.show()\n\n\n# In[82]:\n\n\nrotx = ndimage.rotate(imgx, angle=90, order=1, reshape=False, axes=(0,1))\nroty = ndimage.rotate(imgy, angle=90, order=1, reshape=False, axes=(0,1))\nplt.subplot(121)\nplt.imshow(rotx.astype(int))\nplt.subplot(122)\nplt.imshow(roty.astype(int))\nplt.show()\n\n\n# In[83]:\n\n\nflipx = np.flipud(imgx)\nflipy = np.flipud(imgy)\nplt.subplot(121)\nplt.imshow(flipx.astype(int))\nplt.subplot(122)\nplt.imshow(flipy.astype(int))\nplt.show()\n\n\n# In[84]:\n\n\ncrop_len = 100\nzoomx = cv2.resize(imgx[crop_len:-crop_len,crop_len:-crop_len], dsize=(400, 400), interpolation=cv2.INTER_CUBIC)\nzoomy = cv2.resize(imgy[crop_len:-crop_len,crop_len:-crop_len], dsize=(400, 400), interpolation=cv2.INTER_CUBIC)\nplt.subplot(121)\nplt.imshow(zoomx.astype(int))\nplt.subplot(122)\nplt.imshow(zoomy.astype(int))\nplt.show()\n\n\n# In[85]:\n\n\ncrop_len = 60\nrotx_ = ndimage.rotate(imgx, angle=45, order=1, reshape=False, axes=(0,1))\nzrotx = cv2.resize(rotx_[crop_len:-crop_len,crop_len:-crop_len], dsize=(400, 400), interpolation=cv2.INTER_CUBIC)\nroty_ = ndimage.rotate(imgy, angle=45, order=1, reshape=False, axes=(0,1))\nzroty = cv2.resize(roty_[crop_len:-crop_len,crop_len:-crop_len], dsize=(400, 400), interpolation=cv2.INTER_CUBIC)\nplt.subplot(121)\nplt.imshow(zrotx.astype(int))\nplt.subplot(122)\nplt.imshow(zroty.astype(int))\nplt.show()\n\n","sub_path":"demos/data-aug-demo.py","file_name":"data-aug-demo.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"379440135","text":"from django import forms\nfrom .models import Comment\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field, Column\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ('ratings','content',)\n helper = FormHelper()\n helper.layout = Layout(\n Div(\n Div('ratings', style='text-align:center;', css_class='form-group col-xs'),\n Div('content',style='text-align:center; width: 100%;', css_class='form-group col-8'),\n css_class='form-row align-items-container'\n ),\n Submit('submit', 'Submit', style='width: 100%;', css_class='col-sm-6 btn-info'),\n\n )\n","sub_path":"products/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"268841440","text":"# uncompyle6 version 3.3.5\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]\n# Embedded file name: c:\\Jenkins\\live\\output\\win_64_static\\Release\\python-bundle\\MIDI Remote Scripts\\_Framework\\Capabilities.py\n# Compiled at: 2018-11-30 15:48:11\nfrom __future__ import absolute_import, print_function, unicode_literals\nGENERIC_SCRIPT_KEY = b'generic_script'\nPORTS_KEY = b'ports'\nCONTROLLER_ID_KEY = b'controller_id'\nTYPE_KEY = b'surface_type'\nFIRMWARE_KEY = b'firmware_version'\nAUTO_LOAD_KEY = b'auto_load'\nVENDORID = b'vendor_id'\nPRODUCTIDS = b'product_ids'\nMODEL_NAMES = b'model_names'\nDIRECTIONKEY = b'direction'\nPORTNAMEKEY = b'name'\nMACNAMEKEY = b'mac_name'\nPROPSKEY = b'props'\nHIDDEN = b'hidden'\nSYNC = b'sync'\nSCRIPT = b'script'\nNOTES_CC = b'notes_cc'\nREMOTE = b'remote'\nPLAIN_OLD_MIDI = b'plain_old_midi'\n\ndef __create_port_dict(direction, port_name, mac_name, props):\n assert isinstance(direction, basestring)\n assert isinstance(port_name, basestring)\n assert props == None or type(props) is list\n if props:\n for prop in props:\n if not isinstance(prop, basestring):\n raise AssertionError\n\n assert mac_name == None or isinstance(mac_name, basestring)\n capabilities = {DIRECTIONKEY: direction, PORTNAMEKEY: port_name, PROPSKEY: props}\n if mac_name:\n capabilities[MACNAMEKEY] = mac_name\n return capabilities\n\n\ndef inport(port_name=b'', props=[], mac_name=None):\n \"\"\" Generate a ...\"\"\"\n return __create_port_dict(b'in', port_name, mac_name, props)\n\n\ndef outport(port_name=b'', props=[], mac_name=None):\n \"\"\" Generate a ...\"\"\"\n return __create_port_dict(b'out', port_name, mac_name, props)\n\n\ndef controller_id(vendor_id, product_ids, model_name):\n \"\"\" Generate a hardwareId dict\"\"\"\n assert type(vendor_id) is int\n assert type(product_ids) is list\n for product_id in product_ids:\n if not type(product_id) is int:\n raise AssertionError\n\n assert isinstance(model_name, (basestring, list))\n if isinstance(model_name, basestring):\n model_names = [\n model_name]\n else:\n model_names = model_name\n return {VENDORID: vendor_id, PRODUCTIDS: product_ids, MODEL_NAMES: model_names}","sub_path":"MIDI Remote Scripts/_Framework/Capabilities.py","file_name":"Capabilities.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"219150608","text":"import cx_Oracle\nimport lec08_darabase.oracle_comfig as cfg\n\nwith cx_Oracle.connect(cfg.user, cfg.pwd, cfg.dsn) as connection:\n with connection.cursor() as cursor:\n sql_insert = \"insert into dept2(deptno, dname, loc) values(91, '강의장10번', 'Seoul')\"\n cursor.execute(sql_insert)\n\n sql_select = 'select * from dept2'\n cursor.execute(sql_select)\n for row in cursor:\n print(row)\n connection.commit()","sub_path":"lec08_darabase/ex05_insert.py","file_name":"ex05_insert.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"471774726","text":"import zmq\nimport time\nfrom threading import Thread\nfrom queue import Queue\n\n\ndef client_msg_receive(q):\n context = zmq.Context()\n sock = context.socket(zmq.REP)\n sock.bind(\"tcp://127.0.0.1:5677\")\n while True:\n message = str(sock.recv().decode())\n sock.send_string(\"Echo: \" + message)\n q.put(message)\n\ndef client_msg_broadcast(q):\n context = zmq.Context()\n sock = context.socket(zmq.PUB)\n sock.bind(\"tcp://127.0.0.1:5678\")\n while True:\n while q.qsize() != 0:\n current_message = q.get()\n sock.send_string(current_message)\n q.task_done()\n\nq = Queue(maxsize=0)\n\ninput = (Thread(target=client_msg_receive, args=(q, )))\ninput.start()\n\nbroadcast = (Thread(target=client_msg_broadcast, args=(q, )))\nbroadcast.start()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"607570188","text":"import markdown\nfrom django.core import serializers\nfrom django.core.serializers.base import DeserializationError\nfrom django.utils.text import slugify\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom oldp.apps.courts.models import Court\nfrom oldp.apps.laws.models import *\nfrom oldp.apps.search.models import RelatedContent, SearchableContent\n\n# Get an instance of a logger\nlogger = logging.getLogger(__name__)\n\n\nclass Case(models.Model, SearchableContent):\n title = models.CharField(\n max_length=255,\n default='',\n blank=True,\n help_text='Title (currently not used due to copyright issues)'\n )\n slug = models.SlugField(\n max_length=200,\n unique=True,\n db_index=True,\n help_text='Used to urls (consists of court, date, file number)',\n )\n court = models.ForeignKey(\n Court,\n default=Court.DEFAULT_ID,\n help_text='Responsible court entity',\n on_delete=models.SET_DEFAULT\n )\n court_raw = models.CharField(\n max_length=255,\n default='{}',\n help_text='Raw court information from crawler (JSON)'\n ) # JSON field\n court_chamber = models.CharField(\n max_length=150,\n null=True,\n blank=True,\n help_text='Court chamber (e.g. 1. Senat)'\n )\n date = models.DateField(\n null=True,\n db_index=True,\n help_text='Publication date as in source'\n )\n created_date = models.DateTimeField(\n auto_now_add=True,\n help_text='Entry is created at this date time'\n )\n updated_date = models.DateTimeField(\n auto_now=True,\n help_text='Date time of last change'\n )\n file_number = models.CharField(\n max_length=100,\n null=True,\n blank=True,\n help_text='File number as defined by court'\n )\n type = models.CharField(\n max_length=100,\n null=True,\n blank=True,\n help_text='Type of decision (Urteil, Beschluss, ...)'\n )\n pdf_url = models.URLField(\n # TODO Maybe we should store PDF files locally as well\n null=True,\n blank=True,\n max_length=255,\n help_text='URL to original PDF file (not in use)'\n )\n source_url = models.URLField(\n max_length=255,\n help_text='Path to source of crawler'\n )\n source_homepage = models.URLField(\n max_length=200,\n help_text='Link to source homepage'\n )\n source_name = models.CharField(\n max_length=100,\n help_text='Name of source (crawler class)'\n )\n private = models.BooleanField(\n default=False,\n db_index=True,\n help_text='Private content is hidden in production for non-staff users'\n )\n raw = models.TextField(\n null=True,\n blank=True,\n help_text='Raw content (HTML) from crawler that can used to reconstruct all case information'\n )\n content = models.TextField(\n help_text='Case full-text formatted in Legal Markdown'\n )\n annotations = models.TextField(\n blank=True\n )\n ecli = models.CharField(\n max_length=255,\n blank=True,\n help_text='European Case Law Identifier'\n )\n # source_path = None\n reference_markers = None\n references = None\n\n # Define files that will be excluded in JSON export / Elasticsearch document\n es_fields_exclude = ['content', 'raw']\n es_type = 'case'\n\n class Meta:\n unique_together = ((\"court\", \"file_number\"),)\n\n def is_private(self):\n return self.private\n\n def get_filename(self, ext='json'):\n return '%s.%s' % (self.slug, ext)\n\n def get_topics(self):\n # TODO\n return _('Unknown topic')\n\n def get_court(self) -> Court:\n return Court.objects.get(pk=self.court_id)\n\n def get_court_raw(self):\n return json.loads(self.court_raw)\n\n def get_relevant_laws(self):\n # TODO\n return []\n\n def get_references(self):\n \"\"\"\n Get reference with custom query (grouped by to_hash).\n :return:\n \"\"\"\n if self.references is None:\n from oldp.apps.references.models import CaseReference, CaseReferenceMarker\n\n query = '''\n SELECT *, COUNT(*) as `count`\n FROM ''' + CaseReference._meta.db_table + ''' as r, ''' + CaseReferenceMarker._meta.db_table + ''' as m\n WHERE r.marker_id = m.id AND m.referenced_by_id = %(source_id)s\n GROUP BY `to_hash`\n ORDER BY `count` DESC'''\n self.references = CaseReference.objects.raw(query, {'source_id': self.pk})\n\n # self.references = CaseReference.objects\\\n # .filter(marker__referenced_by=self)\\\n # .annotate(count=Count('to'))\\\n # .order_by('-count')\n\n return self.references\n\n def get_reference_markers(self):\n if self.reference_markers is None:\n from oldp.apps.references.models import CaseReferenceMarker\n self.reference_markers = CaseReferenceMarker.objects.filter(referenced_by=self)\n return self.reference_markers\n\n def get_type(self):\n return self.__class__.__name__\n\n def get_id(self):\n return self.id\n\n def get_content_as_html(self):\n return markdown.markdown(self.content, extensions=[\n 'oldp.apps.lib.legal_md.extensions.line_numbers',\n # 'markdown.extensions.meta'\n # 'markdown.extensions.tables',\n # 'markdown.extensions.footnotes'\n ])\n\n def get_text(self) -> str:\n \"\"\" Case content as plain text\n\n :return: plain-text\n \"\"\"\n\n # if self.text != '':\n # return self.text\n\n raise NotImplementedError('get_text missing')\n\n def get_source_url(self) -> str:\n return self.source_url\n\n def get_title(self) -> str:\n\n try:\n court_name = self.get_court().name\n\n # Attach chamber if available\n if self.court_chamber is not None and self.court_chamber != '':\n court_name += ' (' + self.court_chamber + ')'\n except Court.DoesNotExist:\n court_name = '(no court)'\n\n return '%s vom %s - %s' % (self.get_case_type(), court_name, self.file_number)\n\n def get_short_title(self, max_length=75) -> str:\n title = self.get_title()\n if len(title) > max_length:\n return title[:max_length] + '...'\n else:\n return title\n\n def get_case_type(self):\n return self.type\n\n def get_date(self, date_format='%Y-%m-%d'):\n return self.date.strftime(date_format)\n\n def get_related(self, n=5):\n \"\"\"\n Related items that are pre-computed with \"generate_related_cases\" command.\n\n :param n: number of items\n :return:\n \"\"\"\n items = []\n for item in RelatedCase.objects.filter(seed_content=self).order_by('-score')[:n]:\n items.append(item.related_content)\n return items\n\n def get_url(self):\n if self.slug is None or self.slug == '':\n self.slug = 'no-slug'\n\n return reverse('cases:case', args=(self.slug,))\n\n def get_admin_url(self):\n return reverse('admin:cases_case_change', args=(self.pk, ))\n\n def get_es_url(self):\n return settings.ES_URL + '/case/%s' % self.id\n\n def get_search_snippet(self, max_length=100):\n if self.search_snippet is None:\n text = self.get_text()\n\n from oldp.apps.references.models import CaseReferenceMarker\n text = CaseReferenceMarker.remove_markers(text)\n\n return text[:max_length]\n else:\n return self.search_snippet\n\n def set_slug(self):\n # Transform date to string\n if isinstance(self.date, datetime.date):\n date_str = self.date.strftime('%Y-%m-%d')\n else:\n date_str = '%s' % self.date\n\n self.slug = self.court.slug + '-' + date_str+ '-' + slugify(self.file_number)\n\n def set_ecli(self):\n \"\"\"Generate ECLI from court code and file number\n\n See ECLI definition:\n\n Consists of:\n - ‘ECLI’: to identify the identifier as being a European Case Law Identifier;\n - the country code;\n - the code of the court that rendered the judgment;\n - the year the judgment was rendered;\n - an ordinal number, up to 25 alphanumeric characters, in a format that is decided upon by each Member State.\n Dots are allowed, but not other punctuation marks.\n\n \"\"\"\n self.ecli = 'ECLI:de:' + self.court.code + ':' + str(self.date.year) + ':' + slugify(self.file_number)\n\n def save_reference_markers(self):\n \"\"\"\n Save references markers generated by ExtractRefs processing step\n\n :return: None\n \"\"\"\n from oldp.apps.references.models import CaseReferenceMarker\n\n if self.reference_markers:\n for ref in self.reference_markers:\n marker = CaseReferenceMarker().from_ref(ref, self)\n marker.save()\n # logger.debug('Saved: %s' % marker)\n\n marker.set_references(marker.ids)\n marker.save_references()\n\n else:\n # logger.debug('No reference markers to save')\n pass\n\n def __str__(self):\n return 'Case(title=%s, file_number=%s)' % (self.get_title(), self.file_number)\n\n def to_json(self, file_path=None) -> str:\n json_str = serializers.serialize(\"json\", [self])\n\n if file_path is not None:\n with open(file_path, 'w') as f:\n f.write(json_str)\n\n return json_str\n\n @staticmethod\n def from_json_file(file_path):\n with open(file_path) as f:\n out = serializers.deserialize(\"json\", f.read()) # , ignorenonexistent=True)\n # print(len(out))\n\n try:\n for o in out:\n return o.object\n except DeserializationError:\n pass\n\n raise ValueError('Cannot deserialize: %s' % file_path)\n\n # MySQL utf8mb4 bugfix\n # if instance.raw is not None:\n # instance.raw = ''.join([char if ord(char) < 128 else '' for char in instance.raw])\n #\n # if instance.text is not None:\n # instance.text = ''.join([char if ord(char) < 128 else '' for char in instance.text])\n #\n # return instance\n\n @staticmethod\n def from_hit(hit):\n try:\n court = Court.objects.get(pk=hit['court'])\n except Court.DoesNotExist:\n court = None\n\n obj = Case(title=hit['title'], slug=hit['slug'], court=court, date=hit['date'], file_number=hit['file_number'],\n type=hit['type'], source_url=hit['source_url'], pdf_url=hit['pdf_url'], content=hit['text'])\n return obj\n\n @staticmethod\n def get_queryset(request=None):\n # TODO superuser?\n if settings.DEBUG:\n return Case.objects.all()\n else:\n # production\n # hide private content\n return Case.objects.filter(private=False)\n\n\ndef jsonfy_model_fields(obj, fields):\n for field in fields:\n items = getattr(obj, field)\n # print(type(items))\n if isinstance(items, list):\n if len(items) > 0 and isinstance(items[0], JSONSerializableObject):\n _items = []\n for item in items:\n _items.append(item.to_dict())\n setattr(obj, field, json.dumps(_items))\n else:\n setattr(obj, field, json.dumps(items))\n\n # try:\n # setattr(obj, field, json.dumps(items))\n # except TypeError as e:\n # if len(items) > 0 and isinstance(items[0], JSONSerializableObject):\n # _items = []\n # for item in items:\n #\n # _items.append(item.to_dict())\n # setattr(obj, field, json.dumps(_items))\n return obj\n\n\n@receiver(pre_save, sender=Case)\ndef pre_save_case(sender, instance: Case, *args, **kwargs):\n\n # Is private content?\n # logger.info('Determining if private: %s ' % instance)\n instance.private = 'jportal' in instance.source_url or 'juris' in instance.source_url\n\n if instance.slug is None or instance.slug == \"\":\n instance.set_slug()\n\n\nclass JSONSerializableObject(object):\n def to_json(self, file_path: str=None, indent=4) -> str:\n if file_path is None:\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=indent)\n else:\n with open(file_path, 'w') as f:\n json_str = self.to_json()\n f.write(json_str)\n f.close()\n return json_str\n\n def from_dict(self, _dict):\n self.__dict__ = _dict\n\n def to_dict(self):\n _dict = self.__dict__\n\n for k in self.__dict__:\n v = self.__dict__[k]\n # print('%s === %s' % (k, isinstance(v, JSONSerializableObject)))\n\n if isinstance(v, JSONSerializableObject):\n setattr(self, k, v.to_dict())\n\n # print('%s === list %s' % (k, isinstance(v, list)))\n\n if isinstance(v, list) and len(v) > 0 and isinstance(v[0], JSONSerializableObject):\n _v = []\n for i in v:\n _v.append(i.to_dict())\n setattr(self, k, _v)\n\n # print(type(v))\n\n # print(self.__dict__)\n\n return self.__dict__\n\n def from_json_file(self, file_path):\n return self.from_json(open(file_path).read())\n\n def from_json(self, json_str: str):\n self.__dict__ = json.loads(json_str)\n return self\n\n # @staticmethod\n # def remove_inner_padding(title):\n # \"\"\"\n # Remove title whitespaces (e.g. G r ü n d e : -> Gründe:). Do not remove whitespaces with I I I.?\n # :param title:\n # :return:\n # \"\"\"\n # m = re.findall(r'([^\\s])\\s', title)\n # if len(m) > 3 and m[0] != m[1] and m[1] != m[2]:\n # title = re.sub(r'([^\\s])\\s', '\\\\1', title)\n # return title\n\n\nclass RelatedCase(RelatedContent):\n seed_content = models.ForeignKey(Case, related_name='seed_id', on_delete=models.CASCADE)\n related_content = models.ForeignKey(Case, related_name='related_id', on_delete=models.CASCADE)\n\n","sub_path":"oldp/apps/cases/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"259947271","text":"# -*- coding: utf-8 -*-\n# __author__ = 'qinjincheng'\n\ndef timethis(func):\n def wrapper(*args, **kwargs):\n print('INFO: begin of the function ({}) at {}'.format(func.__name__, __name__))\n result = func(*args, **kwargs)\n print('INFO: final of the function ({}) at {}'.format(func.__name__, __name__))\n return result\n return wrapper\n\nclass Test:\n @staticmethod\n def timethis(func):\n def wrapper(*args, **kwargs):\n print(dir(func))\n for i in dir(func):\n print('{} -> {}'.format(i, getattr(func, i)))\n print('INFO: begin of the function ({}) at {}'.format(func.__name__, func))\n result = func(*args, **kwargs)\n print('INFO: final of the function ({}) at {}'.format(func.__name__, func))\n return result\n\n return wrapper\n\nclass Count:\n @Test.timethis\n def countdown(self, n):\n while n> 0:\n n -= 1\n\nif __name__ == '__main__':\n c = Count()\n c.countdown(1000)\n","sub_path":"test/test_wrapper.py","file_name":"test_wrapper.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"430106104","text":"import random\n\nboard = []\nfor x in range(0,10):\n board.append([\"-\"] * 10)\n\ndef print_board(board):\n # Prints numbers at the top and leaves a space first\n print(\" \",\" \".join(\"0123456789\"))\n # Create row numbers on the side\n for num, row in zip(\"0123456789\", board):\n print(num, \" \".join(row))\n\n'''\nis_open_sea(row, column, fleet) -- checks if the square given by row and column neither contains \nnor is adjacent (horizontally, vertically, or diagonally) to some ship in fleet. Returns Boolean\nTrue if so and False otherwise\n'''\ndef is_open_sea(row,column,fleet):\n if fleet == []:\n return True\n all_ship_coordinates = set()\n for elem in fleet:\n hor = elem[2]\n l = elem[3]\n all_ship_coordinates.add((elem[0],elem[1]))\n i = 1\n if hor == True:\n while i < l:\n all_ship_coordinates.add((elem[0],elem[1]+i))\n i += 1\n else:\n while i < l:\n all_ship_coordinates.add((elem[0]+i,elem[1]))\n i += 1\n\n for elem in all_ship_coordinates:\n if abs(row-elem[0]) <= 1 and abs(column - elem[1]) <= 1:\n return False\n return True \n'''\nok_to_place_ship_at(row, column, horizontal, length, fleet)-- checks if addition of a \nship, specified by row, column, horizontal, and length as in ship representation above, \nto the fleet results in a legal arrangement (see the figure above). If so, the function\nreturns Boolean True and it returns False otherwise. This function makes use of the \nfunction is_open_sea\n'''\ndef ok_to_place_ship_at(row, column, horizontal, length, fleet):\n new_ship_coords = {(row,column)} \n if horizontal == True:\n if column + length > 10:\n return False\n for i in range(1,length):\n new_ship_coords.add((row,column + i))\n else:\n if row + length > 10:\n return False\n for i in range(1,length):\n new_ship_coords.add((row + i,column))\n\n for elem in new_ship_coords:\n if is_open_sea(elem[0],elem[1],fleet) == False:\n return False\n\n return True\n\n'''\nplace_ship_at(row, column, horizontal, length, fleet) -- returns a new fleet that is the result \nof adding a ship, specified by row, column, horizontal, and length as in ship representation above, \nto fleet. It may be assumed that the resulting arrangement of the new fleet is legal\n'''\ndef place_ship_at(row, column, horizontal, length, fleet):\n return fleet.append((row,column,horizontal,length,set()))\n\n'''\nrandomly_place_all_ships() -- returns a fleet that is a result of a random legal arrangement of \nthe 10 ships in the ocean. This function makes use of the functions ok_to_place_ship_at and place_ship_at\n'''\ndef randomly_place_all_ships():\n fleet = []\n ship_lengths = [1,2,3,4]\n # Place battleship\n row = random.randint(0,9)\n col = random.randint(0,9)\n horizontal = bool(random.getrandbits(1))\n place_ship_at(row,col,horizontal,ship_lengths[3],fleet)\n \n # Place cruisers\n i = 0\n while i != 2:\n row = random.randint(0,9)\n col = random.randint(0,9)\n horizontal = bool(random.getrandbits(1))\n if ok_to_place_ship_at(row, col, horizontal, ship_lengths[2], fleet):\n place_ship_at(row,col,horizontal,ship_lengths[2],fleet)\n i += 1\n \n # Place destroyers\n i = 0\n while i != 3:\n row = random.randint(0,9)\n col = random.randint(0,9)\n horizontal = bool(random.getrandbits(1))\n if ok_to_place_ship_at(row, col, horizontal, ship_lengths[1], fleet):\n place_ship_at(row,col,horizontal,ship_lengths[1],fleet)\n i += 1\n \n # Place destroyers\n i = 0\n while i != 4:\n row = random.randint(0,9)\n col = random.randint(0,9)\n horizontal = bool(random.getrandbits(1))\n if ok_to_place_ship_at(row, col, horizontal, ship_lengths[0], fleet):\n place_ship_at(row,col,horizontal,ship_lengths[0],fleet)\n i += 1\n \n return fleet\n\n'''\nship_type(ship) -- returns one of the strings \"Battleship\", \"Cruiser\", \"Destroyer\", or \"Submarine\" identifying \nthe type of ship\n'''\ndef ship_type(ship):\n ships = {4:'Battleship', 3: 'Cruiser', 2: \"Destroyer\", 1: \"Submarine\"}\n return ships[ship[3]]\n\n'''\nis_sunk(ship) -- returns Boolean value, which is True if ship is sunk and False otherwise\n'''\ndef is_sunk(ship):\n if ship[3] == len(ship[4]):\n return True\n else: return False\n\n'''\ncheck_if_hits(row, column, fleet) -- returns Boolean value, which is True if the shot of the human player at \nthe square represented by row and column hits any of the ships of fleet, and False otherwise\n'''\ndef check_if_hits(row, column, fleet):\n for elem in fleet:\n ship_coords= ship_coordinates(elem[0], elem[1],elem[2],elem[3])\n if (row,column) in ship_coords:\n return True\n return False\n\n'''\nship_coordinates(row,column,horizontal, length) -- returns all the coordinates of the ship specified by row, column,\nhorizontal and length\n'''\ndef ship_coordinates(row,column,horizontal, length):\n coords = [(row,column)]\n if horizontal == True:\n for i in range(1,length):\n coords.append((row,column + i))\n else:\n for i in range(1,length):\n coords.append((row + i,column)) \n return coords\n\n'''\nhit(row, column, fleet) -- returns a tuple (fleet1, ship) where ship is the ship from the fleet fleet that \nreceives a hit by the shot at the square represented by row and column, and fleet1 is the fleet resulting \nfrom this hit. It may be assumed that shooting at the square row, column results in of some ship in fleet\n'''\ndef hit(row, column, fleet):\n for i in range(len(fleet)):\n ship_coords= ship_coordinates(fleet[i][0], fleet[i][1], fleet[i][2],fleet[i][3])\n if (row,column) in ship_coords:\n fleet[i][4].add((row,column) )\n ship = fleet[i]\n return (fleet, ship)\n\n'''\nare_unsunk_ships_left(fleet) -- returns Boolean value, which is True if there are ships in the fleet that are still \nnot sunk, and False otherwise\n'''\ndef are_unsunk_ships_left(fleet):\n if fleet != []:\n return True\n else: return False\n\n'''\nmain() -- returns nothing. It prompts the user to call out rows and columns of shots and outputs the responses of \nthe computer iteratively until the game stops.\n'''\ndef main():\n current_fleet = randomly_place_all_ships()\n game_over = False\n shots = 0\n hits = []\n while not game_over:\n print_board(board)\n loc_str = input(\"Enter row and colum to shoot (separted by space): \").split()\n valid = False\n # Check that the user input is valid, plus, it gives the user the option to quit the game by entering 'q'\n while not valid:\n if len(loc_str) != 2:\n if loc_str == ['q']:\n break\n else:\n print(\"Error! Please enter integers between 0 and 9 (separted by space) to continue\")\n print(\"If you want to quit, type \" + \"q\")\n loc_str = input().split()\n else:\n try:\n current_row = int(loc_str[0])\n current_column = int(loc_str[1]) \n if current_row < 0 or current_row >9 or current_column <0 or current_column > 9:\n print(\"Error! Please enter integers between 0 and 9 (separted by space) to continue\")\n print(\"If you want to quit, type \" + \"q\")\n loc_str = input().split()\n else: valid = True\n\n except ValueError:\n print(\"Error! Please enter integers between 0 and 9 (separted by space) to continue\")\n print(\"If you want to quit, type \" + \"q\")\n loc_str = input().split()\n\n if loc_str == ['q']:\n break \n \n shots += 1\n if (current_row, current_column) in hits:\n print(\"You missed!\")\n elif check_if_hits(current_row, current_column, current_fleet):\n print(\"You have a hit!\")\n hits.append((current_row, current_column))\n board[current_row][current_column] = 'X'\n (current_fleet, ship_hit) = hit(current_row, current_column, current_fleet)\n if is_sunk(ship_hit):\n print(\"You sank a \" + ship_type(ship_hit) + \"!\")\n # Change the board to show the type of ship sunk\n for elem in ship_hit[4]:\n if ship_type(ship_hit) == \"Destroyer\":\n board[elem[0]][elem[1]] = 'D'\n elif ship_type(ship_hit) == 'Cruiser':\n board[elem[0]][elem[1]] = 'C'\n elif ship_type(ship_hit) == 'Battleship':\n board[elem[0]][elem[1]] = 'B'\n else:\n board[elem[0]][elem[1]] = 'S'\n\n current_fleet.remove(ship_hit)\n else:\n print(\"You missed!\")\n # Change the board to show the missed hit\n board[current_row][current_column] = 'o'\n\n if not are_unsunk_ships_left(current_fleet): game_over = True\n\n print(\"Game over! You required\", shots, \"shots.\")\n\n\nif __name__ == '__main__': #keep this in\n main()\n","sub_path":"extension.py","file_name":"extension.py","file_ext":"py","file_size_in_byte":9396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"321720885","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nAUTHOR\n\n Sébastien Le Maguer \n\nDESCRIPTION\n\n Package contains the renderer to produced STRAIGHT-based audio signal and the EMA results\n\nLICENSE\n This script is in the public domain, free from copyrights or restrictions.\n Created: 10 October 2016\n\"\"\"\nimport logging\n\nfrom rendering.emarenderer import EMARenderer\nfrom rendering.straightrenderer import STRAIGHTRenderer\n\n###############################################################################\n# Functions\n###############################################################################\nclass STRAIGHTEMARenderer(STRAIGHTRenderer, EMARenderer):\n \"\"\"Composite renderer to produce STRAIGHT audio signal and EMA related results\n \"\"\"\n def __init__(self, conf, nb_proc, preserve):\n \"\"\"Constructor\n\n :param conf: the configuration object\n :param nb_proc: the number of process to run\n :param preserve: switch to preserve intermediate files or not\n :returns: None\n :rtype:\n\n \"\"\"\n\n self.conf = conf\n self.logger = logging.getLogger(\"STRAIGHTEMARenderer\")\n self.nb_proc = nb_proc\n self.preserve = preserve\n self.MATLAB=\"matlab\"\n\n def render(self, out_path, gen_labfile_base_lst):\n \"\"\"Rendering\n\n :param out_path: the output directory path\n :param gen_labfile_base_lst: the file containing the list of utterances\n :returns: None\n :rtype:\n\n \"\"\"\n STRAIGHTRenderer.render(self, out_path, gen_labfile_base_lst)\n EMARenderer.render(self, out_path, gen_labfile_base_lst)\n","sub_path":"rendering/straightemarenderer.py","file_name":"straightemarenderer.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"176811879","text":"# Circle clicking problem\n\n###################################################\n# Student should enter code below\n\nimport simpleguitk as simplegui\nimport math\n\n'''\nModify the program template below so that clicking inside any of the three displayed circles \nprints the color of the clicked circle to the console. \n\nHint: Use the supplied function dist to compute the distance between the center of each circle and the mouse click.\n'''\n\n\n# define global constants\nRADIUS = 20\nRED_POS = (50, 100)\nGREEN_POS = (150, 100)\nBLUE_POS = (250, 100)\n\n# define helper function\ndef dist(p, q):\n return math.sqrt((p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2)\n\n# define mouseclick handler\ndef click(pos):\n # if coordinate of click is in radius of circle, print color circle\n # dist to check if click is in range of center / radius circle\n\n\n if dist(RED_POS,pos) <= RADIUS:\n print('You\\'ve hit the red btn')\n elif dist(GREEN_POS,pos) <= RADIUS:\n print('You\\'ve hit the Green btn')\n elif dist(BLUE_POS,pos) <= RADIUS:\n print('You\\'ve hit the BLUE btn')\n else:\n print('The deep space of the canvas')\n\n\n# define draw\ndef draw(canvas):\n canvas.draw_circle(RED_POS, RADIUS, 1, \"Red\", \"Red\")\n canvas.draw_circle(GREEN_POS, RADIUS, 1, \"Green\", \"Green\")\n canvas.draw_circle(BLUE_POS, RADIUS, 1, \"Blue\", \"Blue\")\n\n# create frame and register handlers\nframe = simplegui.create_frame(\"Echo click\", 300, 200)\nframe.set_mouseclick_handler(click)\nframe.set_draw_handler(draw)\n\n# start frame\nframe.start()\n\n\n###################################################\n# Sample output\n\n#Clicked red ball\n#Clicked green ball\n#Clicked blue ball\n#Clicked green ball\n#Clicked red ball\n#Clicked green ball\n#Clicked blue ball\n","sub_path":"Area52/Resources/MouseClickExersise.py","file_name":"MouseClickExersise.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"147723149","text":"# connect to yelp api\nfrom yelp.client import Client\nfrom yelp.oauth1_authenticator import Oauth1Authenticator\nimport os\n\n# pass yelp creds\nauth = Oauth1Authenticator(\n consumer_key=os.environ['CONSUMER_KEY'],\n consumer_secret=os.environ['CONSUMER_SECRET'],\n token=os.environ['TOKEN'],\n token_secret=os.environ['TOKEN_SECRET']\n)\n\ndef get_businesses(location):\n # create a connection to yelp\n client = Client(auth)\n #pass in search terms\n params = {\n 'term': 'restaurants',\n 'lang': 'en',\n 'limit': 3\n }\n response = client.search(location, **params)\n #create a list to hold API results\n businesses = []\n #loop over list and pull out returns\n for business in response.businesses:\n #print(business.name, business.rating)\n businesses.append({\"name\": business.name,\n \"display_phone\": business.display_phone,\n \"rating_img_url\": business.rating_img_url,\n \"snippet_text\": business.snippet_text,\n \"url\":business.url,\n \"review_count\":business.review_count,\n \"image_url\": business.image_url\n })\n return businesses\n","sub_path":"yelp_API.py","file_name":"yelp_API.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"251786720","text":"# Muestras base de Pandas, dataframes\n# https://docs.google.com/spreadsheets/d/1NpzJARb8qkofgUC0I_rkzun1pmHG7s14tg1M0H11TFY/export?format=csv\n# https://docs.google.com/spreadsheets/d/1NpzJARb8qkofgUC0I_rkzun1pmHG7s14tg1M0H11TFY/edit#gid=0\n\n# La librería numpy es muy útil para muchas operaciones de álgebra, Fourier, etc\nimport numpy as np\nimport pandas as pd\n\n\n# RUTA_GSHEETS = \"https://docs.google.com/spreadsheets/d/1NpzJARb8qkofgUC0I_rkzun1pmHG7s14tg1M0H11TFY/export?format=csv\"\nRUTA_GSHEETS = \"https://docs.google.com/spreadsheets/d/1NpzJARb8qkofgUC0I_rkzun1pmHG7s14tg1M0H11TFY/export?format=xlsx\"\n\n\ndef main():\n\tdataframe = pd.read_excel(RUTA_GSHEETS)\n\tmedia = np.mean(dataframe[\"Puntos\"])\n\tfilaMax = np.argmax(dataframe[\"Puntos\"])\n\tfilaMin = np.argmin(dataframe[\"Puntos\"])\n\n\tprint(dataframe)\n\tprint(media)\n\tprint(filaMax)\n\tprint(filaMin)\n\n\t\nif (__name__ == \"__main__\"):\n\tmain()\n","sub_path":"semana12/pandas_excel.py","file_name":"pandas_excel.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"95711761","text":"#!/usr/bin/env python\n# (C) 2017 OpenEye Scientific Software Inc. All rights reserved.\n#\n# TERMS FOR USE OF SAMPLE CODE The software below (\"Sample Code\") is\n# provided to current licensees or subscribers of OpenEye products or\n# SaaS offerings (each a \"Customer\").\n# Customer is hereby permitted to use, copy, and modify the Sample Code,\n# subject to these terms. OpenEye claims no rights to Customer's\n# modifications. Modification of Sample Code is at Customer's sole and\n# exclusive risk. Sample Code may require Customer to have a then\n# current license or subscription to the applicable OpenEye offering.\n# THE SAMPLE CODE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED. OPENEYE DISCLAIMS ALL WARRANTIES, INCLUDING, BUT\n# NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n# PARTICULAR PURPOSE AND NONINFRINGEMENT. In no event shall OpenEye be\n# liable for any damages or liability in connection with the Sample Code\n# or its use.\n\nfrom __future__ import print_function\nimport sys\nimport math\nfrom openeye import oechem\n\n\ndef DropLigandFromProtein(prot, lig):\n \"\"\"delete atoms from the protein w/same coords as the ligand\n as well as any waters\"\"\"\n\n approximatelyTheSame = 0.05\n nn = oechem.OENearestNbrs(prot, approximatelyTheSame)\n\n # mark ligand atoms for deletion\n bv = oechem.OEBitVector(prot.GetMaxAtomIdx())\n for nbrs in nn.GetNbrs(lig):\n r1 = oechem.OEAtomGetResidue(nbrs.GetBgn())\n r2 = oechem.OEAtomGetResidue(nbrs.GetEnd())\n if r1.GetModelNumber() == r2.GetModelNumber():\n bv.SetBitOn(nbrs.GetBgn().GetIdx())\n\n # mark waters for deletion too\n for atom in prot.GetAtoms():\n res = oechem.OEAtomGetResidue(atom)\n if oechem.OEGetResidueIndex(res) == oechem.OEResidueIndex_HOH:\n bv.SetBitOn(atom.GetIdx())\n\n pred = oechem.OEAtomIdxSelected(bv)\n for atom in prot.GetAtoms(pred):\n prot.DeleteAtom(atom)\n\n\ndef LigandProteinCloseContacts(prot, lig, maxgap):\n \"\"\"atoms in the protein within maxgap Angstroms of the ligand\"\"\"\n\n oechem.OESuppressHydrogens(prot)\n oechem.OESuppressHydrogens(lig)\n\n DropLigandFromProtein(prot, lig)\n\n nn = oechem.OENearestNbrs(prot, maxgap)\n\n return list(nn.GetNbrs(lig))\n\n\ndef PrintCloseContacts(prot, lig, maxgap):\n \"\"\"print atoms in the protein within maxgap Angstroms of the ligand\"\"\"\n\n contacts = LigandProteinCloseContacts(prot, lig, maxgap)\n\n if len(contacts) > 0:\n print(\"%s: %d contacts within %.2fA\" % (prot.GetTitle(), len(contacts), maxgap))\n for nbrs in contacts:\n pat = nbrs.GetBgn()\n lat = nbrs.GetEnd()\n rp = oechem.OEAtomGetResidue(pat)\n rl = oechem.OEAtomGetResidue(lat)\n print(\"%6.2fA:%5s %4s%s %s %s %4s%s:%5s %4s%s %s %s %4s%s\" %\n (math.sqrt(nbrs.GetDist2()),\n rp.GetSerialNumber(), pat.GetName(), rp.GetAlternateLocation(),\n rp.GetName(), rp.GetChainID(), rp.GetResidueNumber(), rp.GetInsertCode(),\n rl.GetSerialNumber(), lat.GetName(), rl.GetAlternateLocation(),\n rl.GetName(), rl.GetChainID(), rl.GetResidueNumber(), rl.GetInsertCode()))\n print()\n\n\ndef main(argv=[__name__]):\n if len(argv) != 4:\n oechem.OEThrow.Usage(\"%s \" % argv[0])\n\n ifs = oechem.oemolistream()\n if not ifs.open(argv[1]):\n oechem.OEThrow.Fatal(\"Unable to open protein %s for reading\" % argv[1])\n\n prot = oechem.OEGraphMol()\n oechem.OEReadMolecule(ifs, prot)\n if not oechem.OEHasResidues(prot):\n oechem.OEPerceiveResidues(prot, oechem.OEPreserveResInfo_All)\n\n ifs = oechem.oemolistream()\n if not ifs.open(argv[2]):\n oechem.OEThrow.Fatal(\"Unable to open ligand %s for reading\" % argv[2])\n\n lig = oechem.OEGraphMol()\n oechem.OEReadMolecule(ifs, lig)\n if not oechem.OEHasResidues(lig):\n oechem.OEPerceiveResidues(lig, oechem.OEPreserveResInfo_All)\n\n maxgap = float(argv[3])\n\n PrintCloseContacts(prot, lig, maxgap)\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","sub_path":"venv/Scripts/closecontacts.py","file_name":"closecontacts.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"636386634","text":"class Solution:\n def removeInvalidParentheses(self, s: str):\n stack = []\n for c in s:\n if c == ')' and stack and stack[-1] == '(':\n stack.pop()\n elif c in \"()\":\n stack.append(c)\n if not stack:\n return [s]\n n = len(s)\n m = len(stack)\n res = set()\n\n def backtrack(a, left, right, i, j):\n if right > left:\n return\n if i == n:\n if left == right:\n res.add(''.join(a))\n return\n return\n if s[i] == '(':\n backtrack(a + [s[i]], left + 1, right, i + 1, j)\n if j < m and stack[j] == s[i]:\n backtrack(a, left, right, i + 1, j + 1)\n elif s[i] == ')':\n if left > right:\n backtrack(a + [s[i]], left, right + 1, i + 1, j)\n if j < m and stack[j] == s[i]:\n backtrack(a, left, right, i + 1, j + 1)\n else:\n backtrack(a + [s[i]], left, right, i + 1, j)\n\n backtrack([], 0, 0, 0, 0)\n return list(res)\n\n\ns = Solution()\nprint(s.removeInvalidParentheses(\"()\"))\nprint(s.removeInvalidParentheses(\"()())()\"))\nprint(s.removeInvalidParentheses(\"(a)())()\"))\n","sub_path":"leetcode/2021/remove-invalid-parentheses.py","file_name":"remove-invalid-parentheses.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"279244455","text":"n,m = map(int, input().split(\" \"))\narr = [[0]*m for _ in range(n)]\nfor i in range(n):\n tmp = input()\n for j in range(m):\n arr[i][j]=tmp[j]\n arr[i]=list(map(int,arr[i]))\n\n\n# 1인 모든 노드에 대하여 위,아래,대각선 방향으로 확장해가면서 정사각형의 개수를 카운트하려고함.\n# bfs를 활용하여 distance, 여기서는 정사각형 length를 재려고했음\n# 이렇게 하면 시간초과가 뜸, 1000*1000*bfs시간복잡도\n \n# def square(i,j):\n# global arr, m, n\n# que = [[i,j]]\n# d = [[-1]*m for _ in range(n)]\n# d[i][j]=1\n# check = [(1,0),(0,1),(1,1)]\n# length = 0\n# while que:\n# x,y = que.pop(0)\n# for c in check:\n# dx, dy = c[0],c[1]\n# if x+dx=0 and j-1>=0:\n arr[i][j]+=min(arr[i][j-1], arr[i-1][j], arr[i-1][j-1])\n # ans = max(ans, arr[i][j]*arr[i][j]) 라고 하려고했는데, \n # if 조건이 arr[i][j]=1에 국소적으로 또 다른 의미까지 포함되어 있기때문에\n # ans = 0, arr[i][j]=1 인 경우가 ans=1로 반영이 안된다.\n if arr[i][j]*arr[i][j]>ans:\n ans = arr[i][j]*arr[i][j]\nprint(ans)\n\n\n","sub_path":"백준_1915_가장 큰 정사각형.py","file_name":"백준_1915_가장 큰 정사각형.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"593762191","text":"#Part2\nimport json\nfrom collections import Counter\n\n#Seattle,WA,GHCND:US1WAKG0038\nseattle_prec = []\nmonthly_precip = []\n\nwith open('precipitation.json') as file:\n precipitations = json.load(file)\nsum_month_precip = Counter() \n\nfor measurement in precipitations:\n if measurement['station'] == 'GHCND:US1WAKG0038': #This only gives me the dictionaries with that code\n split_date_month = measurement['date'].split(\"-\")[1]\n measurement['date'] = split_date_month #split_date_month only month \n sum_month_precip[measurement['date']] += measurement['value']\n\nprint(sum_month_precip)\n\ntotal_year_seattle = 0\nfor month_rain in sum_month_precip:\n print(sum_month_precip[month_rain])\n total_year_seattle += sum_month_precip[month_rain]\n\nprint(f'The total yearly rainfall for Seattle is {total_year_seattle}')\n\n\nwith open('precipitationSE.json', 'w') as file:\n json.dump(precipitations, file, indent=4)\n","sub_path":"part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"558718575","text":"import operator\nimport os\nimport shutil\n\nimport file_manager\nimport tabu\nfrom data_analysis import clustering\nfrom optimiser import optimise\n\n\ndef check_stop_signal():\n if os.path.exists('stop') or os.path.exists('STOP'):\n print(\"Found stop file, in \", os.getcwd())\n return 1\n\n\ndef aggregate(seeds, monomer, aggregate_size=2, hm_orientations=8,\n method=None):\n \"\"\"\n Input: a list of seed molecules, a monomer Molecule objects\n \"\"\"\n if check_stop_signal():\n print(\"Function: aggregate\")\n return StopIteration\n\n if hm_orientations == 'auto':\n number_of_orientations = 8\n else:\n number_of_orientations = hm_orientations\n\n starting_directory = os.getcwd()\n print(\"Starting Aggregation in\\n {}\".format(starting_directory))\n for aggregation_counter in range(2, aggregate_size + 2):\n aggregate_id = \"{:03d}\".format(aggregation_counter)\n aggregate_home = 'aggregate_' + aggregate_id\n file_manager.make_directories(aggregate_home)\n os.chdir(aggregate_home)\n\n print(\" Starting aggregation cycle: {}\".format(aggregation_counter))\n seeds = add_one(aggregate_id, seeds, monomer, number_of_orientations, method)\n print(\" Aggregation cycle: {} completed\\n\".format(aggregation_counter))\n\n if hm_orientations == 'auto' and number_of_orientations <= 256:\n number_of_orientations *= 2\n os.chdir(starting_directory)\n return\n\n\ndef add_one(aggregate_id, seeds, monomer, hm_orientations, method):\n \"\"\"\n :type aggregate_id str\n :type seeds list of Molecules\n :type monomer Molecule.Molecule\n :type hm_orientations int how many orientations\n :type method dict containing charge, multiplicity, software\n \"\"\"\n if check_stop_signal():\n print(\"Function: add_one\")\n return StopIteration\n print(' There are', len(seeds), 'seed molecules')\n cwd = os.getcwd()\n\n dict_of_optimized_molecules = {}\n for seed_count, each_seed in enumerate(seeds):\n if check_stop_signal():\n print(\"Function: add_one\")\n return\n print(' Seed: {}'.format(seed_count))\n seed_id = \"{:03d}\".format(seed_count)\n seeds_home = 'seed_' + seed_id\n file_manager.make_directories(seeds_home)\n os.chdir(seeds_home)\n each_seed.mol_to_xyz('seed.xyz')\n monomer.mol_to_xyz('monomer.xyz')\n mol_id = '{0}_{1}'.format(seed_id, aggregate_id)\n\n all_orientations = tabu.generate_orientations(mol_id, seeds[seed_count], monomer, hm_orientations)\n for name, molecule in sorted(all_orientations.items(), key=operator.itemgetter(0)):\n o_status = optimise(molecule, method)\n if o_status is True:\n print(\" E(%10s): %12.7f\" % (name, molecule.energy))\n dict_of_optimized_molecules[name] = molecule\n else:\n print(' Optimisation failed:', name, 'will be discarded')\n os.chdir(cwd)\n if len(dict_of_optimized_molecules) < 2:\n return list(dict_of_optimized_molecules.values())\n print(\" Clustering\")\n selected_seeds = clustering.choose_geometries(dict_of_optimized_molecules)\n file_manager.make_directories('selected')\n for each_file in selected_seeds.values():\n xyz_file = 'seed_' + each_file.name[4:7] + '/result_' + each_file.name + '.xyz'\n shutil.copy(xyz_file, 'selected/')\n return list(selected_seeds.values())\n\ndef main():\n pass\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"aggregator.py","file_name":"aggregator.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"447946020","text":"#!/usr/bin/python3.6\nimport requests\nfrom bs4 import BeautifulSoup\nimport os\nimport sys\n\n\nsubtle_link = {} # Dict. for holding movie names\n\n\nprint(\"\\n\")\n\n# Take movie name\n\nmoviename = input(\"\\nEnter movie name : \")\n\nif moviename == '':\n print(\"You didnot enter anything!!!!\\n\")\n sys.exit()\n# moviename = 'spider man'\n\n# URLS\nmainURL = \"https://opensubtitles.co\"\nsearchurl = \"https://opensubtitles.co/search?q=\" + moviename.replace(\" \", \"+\")\n\nprint(\"\\n\\nSearching movies.... \\n\\n\") # + searchurl + \"\\n\")\n\n# Request to webpage\n\nr = requests.get(searchurl).text\n\nsoup = BeautifulSoup(r, 'html5lib')\n\n\n# finding links and names\n\n\nscratch_link = soup.find_all(\"a\", class_=\"list-group-item\")\n\n\nname_Count = 0\n\nfor moviename in scratch_link[:-5]:\n name_Count += 1\n print(f\"[{str(name_Count)}] {moviename['href'].split('/')[-1]}\")\n subtle_link[name_Count] = moviename['href'] # adding values to \"subtle_link\" Dictionary\n\n\nif len(subtle_link) == 0:\n print(\" Zero search Resultss :( !!!\\n\")\n sys.exit()\nprint()\n\n\ntry:\n Uesrs_Choice = int(input(\"Enter your choice : \"))\n if Uesrs_Choice > len(subtle_link):\n print(\" \\n INVALID CHOICE \\n\")\n sys.exit()\n\n MV_name = subtle_link[Uesrs_Choice].split('/')[-1] # actual movie name\n\n# except ValueError:\n# print(\"\\nInvalid choice\\n\")\n# sys.exit\n\n r2 = requests.get(subtle_link[Uesrs_Choice]).text\n\n sub2 = BeautifulSoup(r2, 'html5lib')\n\n links = sub2.find('ul', class_=\"list-group\").a['href']\n\n # print(links)\n\n r3 = requests.get(links).text\n\n sub3 = BeautifulSoup(r3, 'html5lib')\n\n half_Link = sub3.find('a', class_=\"btn btn-danger\")['href']\n\n # print(\"\\n \", half_Link)\n\n full_Link = mainURL + half_Link\n\n # print(full_Link)\n\n generateDown = requests.get(full_Link)\n\n with open(f\"{MV_name}.srt\", 'wb') as file:\n file.write(generateDown.content)\n print(\"\\n\" + MV_name + \" succesfully downloaded in > \" + os.getcwd() + \" \\n\")\n\nexcept ValueError:\n print(\"\\nInvalid choice\\n\")\n","sub_path":"subtle.py","file_name":"subtle.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"414856283","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAnalysis of strategies.\n\"\"\"\n\nimport math\nimport time\nimport game\nimport rows\nimport symmetry\nimport scores\n\n\nmemoization = {}\n\n\ndef init():\n \"\"\"(Re)init data-structures for a new negamax search.\"\"\"\n global memoization\n memoization = {}\n\n\n# --- BELOW: PROVIDED FUNCTIONS THAT YOU MUST NOT MODIFY ! ---\n# NB: these functions are used by validation scripts.\n\n\ndef enum_move():\n \"\"\"Enumerate valid columns of the board\"\"\"\n for j in range(game.width):\n if game.valid(j):\n yield j # add j in the enumeration\n\n\ndef negamax(depth):\n \"\"\"Basic implementation of negamax\"\"\"\n if rows.winner != None:\n # current player has lost !\n return -scores.best\n if game.is_full():\n return 0\n if depth == 0:\n return scores.approx()\n alpha = -scores.best # minimal score of current player\n for j in enum_move():\n game.move(j)\n val = -negamax(depth-1)\n game.undo()\n if val > alpha:\n alpha = val\n if alpha == scores.best:\n # the score can not be improved !\n return alpha\n return alpha\n\n\ndef perfect_score():\n \"\"\"Computes the perfect score of the current player\n (a good test of select_perfect_move)\"\"\"\n assert rows.winner == None\n j0 = select_perfect_move()\n if j0 == None:\n return -scores.best\n game.move(j0)\n j1 = select_perfect_move()\n game.undo()\n if j1 == None:\n return scores.best\n return 0\n\n\ndef check_alphabeta(alpha, beta, effective, exact):\n \"\"\"Check that the 'effective' result for alpha-beta algorithm\n is a correct approximation of the 'exact' result\"\"\"\n assert (alpha < beta)\n if alpha < effective and effective < beta:\n assert (effective == exact)\n return\n if effective <= alpha:\n assert (exact <= effective)\n return\n # else:\n assert (beta <= effective)\n assert (effective <= exact)\n return\n\n\ndef scored_select_move():\n \"\"\"'select_move' without negamax, but only based on score\n (useful to debug or tune scoring)\"\"\"\n for j in scored_enum_move():\n return j # return the first element in the enumeration\n # return None if the current player is probably losing !\n\n\n# --- ADD YOUR OWN NEGAMAX FUNCTIONS BELOW ---\n\ndef alphabeta_negamax(alpha, beta, depth):\n if rows.winner != None:\n return -scores.best\n if game.is_full():\n return 0\n if depth == 0:\n return scores.approx()\n for j in enum_move():\n game.move(j)\n score = -memoized_alphabeta_negamax(-beta, -alpha, depth - 1)\n game.undo()\n if score > alpha :\n alpha = score\n if alpha >= beta:\n return alpha\n return alpha\n\ndef memoized_alphabeta_negamax(alpha, beta, depth):\n if symmetry.get_code() in memoization:\n a = memoization[symmetry.get_code()][0]\n b = memoization[symmetry.get_code()][1]\n score = memoization[symmetry.get_code()][2]\n if a < score < b:\n return score\n elif score >= b :\n if score >= beta:\n return score\n else:\n score = alphabeta_negamax(score, beta, depth)\n memoization[symmetry.get_code()] = [a, beta, score]\n return score\n elif score <= a:\n if score <= alpha:\n return score\n else:\n score = alphabeta_negamax(alpha, score, depth)\n memoization[symmetry.get_code()] = [alpha, b, score]\n return score\n score = alphabeta_negamax(alpha, beta, depth)\n memoization[symmetry.get_code()] = [alpha, beta, score]\n return score\n \ndef memoized_negamax(depth):\n if symmetry.get_code() in memoization:\n return memoization[symmetry.get_code()]\n score = rec_negamax(depth)\n memoization[symmetry.get_code()] = score\n return score\n\ndef rec_negamax(depth):\n \"\"\"Basic implementation of negamax with calls to memoized_negamax \n rather than rec_negamax\"\"\"\n if rows.winner != None:\n # current player has lost !\n return -scores.best\n if game.is_full():\n return 0\n if depth == 0:\n return scores.approx()\n alpha = -scores.best # minimal score of current player\n for j in enum_move():\n game.move(j)\n val = -memoized_negamax(depth-1)\n game.undo()\n if val > alpha:\n alpha = val\n if alpha == scores.best:\n # the score can not be improved !\n return alpha\n return alpha\n\ndef scored_enum_move():\n \"\"\"Enumerate valid columns of the board leading to smallest opponent scores\n (by increasing order)\"\"\"\n if not game.move_list:\n # first move (on an empty board)\n if game.width+1 >= 2*game.lim:\n # enumerating a single column (the middle one)\n # is sufficient to decide the perfect score.\n yield (game.width // 2)\n return\n # else: enumerate all columns of the board !\n yield from enum_move()\n return\n # other cases\n scored_moves = []\n\n for i in enum_move():\n game.move(i)\n if rows.winner != None:\n game.undo()\n yield i\n scored_moves.append([scores.approx(),i])\n game.undo()\n is_positive = False\n for s, j in reversed(sorted(scored_moves)):\n if s >= 0:\n is_positive = True\n yield j\n # When we're in a perfect losing game, we could have only negative scores from approx, hence the need to treat\n # some negative cases to choose the least bad move\n if not is_positive:\n maximum = -scores.best\n best_move = None\n for s, j in scored_moves:\n if s > maximum:\n maximum = s\n best_move = j\n if maximum == -scores.best:\n yield None\n yield best_move\n\n\n# -------\n# WARNING: for the sake of \"validation\" script,\n# set in \"current_negamax\" the current version of the recursive negamax\n# used by \"select_perfect_move\" below.\n\ncurrent_negamax = memoized_alphabeta_negamax\n\n# Tips: keep \"current_negamax\" as the negamax in \"select_perfect_move\"\n# However, you may need to adapt the parameters of \"current_negamax\".\n\n\n# ------ Alpha-beta checking (in \"validation\" script) ----\n\n\n# \"find_alpha_beta\" should have the same parameters (in the same order)\n# than your (memoized) alpha-beta negamax store in current_negamax above.\n# It must return a triple (alpha, beta, depth) in this order.\n# Modify it if needed !\ndef find_alpha_beta(alpha, beta, depth):\n return (alpha, beta, depth)\n\n\n# USE THIS ONE for the memoized version of the negamax (without alpha-beta)\n#def find_alpha_beta(depth):\n# return (-scores.best-1, scores.best+1, depth)\n\n\n# ---------------------------------\n# select_move functions\n\n\ndef select_perfect_move(depth=-1):\n \"\"\"Returns 'None' on a perfect losing game in less than 'depth' moves.\n Otherwise, it returns a perfect move (modulo 'depth').\"\"\"\n assert rows.winner == None and depth != 0\n init()\n alpha = -scores.best\n score = alpha\n move = None\n for i in enum_move():\n game.move(i) # We simulate multiples moves, and we call negamax to have the\n score = -current_negamax(-scores.best, -alpha, depth - 1) # best score for each one\n game.undo()\n if score > alpha :\n alpha = score\n move = i\n if score == scores.best: # If we already achieved a perfect score, no need for further calls\n return i # to negamax.\n return move\n \n\n\ndef select_move():\n \"\"\"Automatic player invoked by 'gui'.\"\"\"\n assert rows.winner == None\n if len(game.move_list) == 0 and game.width+1 >= 2*game.lim:\n # returns quickly a \"good\" first move in this special case !\n return game.width // 2\n # SUITE A CHANGER !\n move = select_perfect_move(10)\n depth = 6\n while move == None and depth > 0 :\n move = select_perfect_move(depth)\n depth -= 1\n return move\n","sub_path":"strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":8140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"279931831","text":"# -*- coding: utf-8 -*-\nimport collections\nimport os\n\nfrom dbaccess import DbAccess\nfrom dictdiff import DictDiffer\nfrom gidhelper import GidHelper\nfrom openpyxl.reader.excel import load_workbook\n\nfrom vnc import Vnc\n\n\nclass vncDiff:\n def __init__(\n self,\n excel_files_dir=r\"vnc_input_for_diff/\",\n db_schema=\"whcomm\",\n vnc_to_process=['Retouren', 'Otto Germany', 'Alba Moda', 'bon prix Germany', 'Corso',\n 'Heinrich Heine GmbH Germany', 'baumarkt direkt', 'Schwab Versand Germany',\n 'Universal', 'Intrastat', 'EK_Relevanz'\n ]\n ):\n self.excel_data = collections.OrderedDict()\n self.excel_data_new = []\n self.excel_data_new_vncid_wtid = []\n self.excel_data_dupkey = []\n self.db_data = collections.OrderedDict()\n self.excel_files = []\n self.gidhelper = GidHelper()\n self.db_schema = db_schema\n self.vnc_to_process = vnc_to_process\n self.is_excel_valid = True\n\n os.environ[\"NLS_LANG\"] = \"GERMAN_GERMANY.WE8ISO8859P1\"\n self.db_access = DbAccess(\"oracle:DAYSNAP:noa:noa\")\n\n for file in vnc_to_process:\n self.excel_files.append(excel_files_dir + file + \".xlsx\")\n\n self.read_excel_data()\n self.read_db_data()\n\n def read_excel_data(self):\n for file in self.excel_files:\n self.is_excel_valid = True\n wb = load_workbook(file, data_only=True)\n ws = wb.get_sheet_by_name(wb.get_sheet_names()[0])\n rows = ws.get_highest_row()\n columns = ws.get_highest_column()\n\n headerdata_list = self.gidhelper.get_excel_header(ws, columns)\n\n for row in range(2, rows + 1):\n row_dict = self.gidhelper.parse_excel_row(ws, headerdata_list, columns, row)\n if row_dict is not None:\n # workaround if the excel-doc has empty rows\n row_has_value = False\n for key, value in row_dict.items():\n if value is not None:\n row_has_value = True\n if row_has_value is False:\n continue\n\n for key, value in row_dict.items():\n # removing None keys in row_dict which shouldnt be there in the first place(buggy excel-reader?)\n if key is None:\n del row_dict[None]\n\n self.validate_excel_row(row, row_dict)\n\n if row_dict['vncid'] is None and row_dict['wtid'] is None:\n self.excel_data_new_vncid_wtid.append(row_dict)\n elif row_dict['vncid'] is None:\n self.excel_data_new.append(row_dict)\n elif row_dict['vncid'] in self.excel_data:\n self.excel_data_dupkey.append(row_dict)\n\n else:\n self.excel_data[row_dict['vncid']] = row_dict\n\n def read_db_data(self):\n try:\n self.db_access.parse_sql(Vnc.get_attributes())\n basesql = Vnc.get_base_sql_diff() + \" FROM \" + self.db_schema + \"\"\".WHBUSINESSTRANSACTION join \"\"\" + self.db_schema + \"\"\".VouchernumberCatalog on WHBUSINESSTRANSACTION.id = VouchernumberCatalog.ID_WHBUSINESSTRANSACTION left join sysp.company co on co.id = VouchernumberCatalog.id_company_bdf\"\"\"\n\n appsql = []\n\n if \"Retouren\" in self.vnc_to_process:\n appsql.append(\"WHBUSINESSTRANSACTION.ID_WHBUSINESSTRANSACTIONTYPE = 1\")\n self.vnc_to_process.remove(\"Retouren\")\n elif \"EK_Relevanz\" in self.vnc_to_process:\n appsql.append(\"WHBUSINESSTRANSACTION.ID_WHBUSINESSTRANSACTIONTYPE = 2\")\n self.vnc_to_process.remove(\"EK_Relevanz\")\n elif \"Intrastat\" in self.vnc_to_process:\n appsql.append(\"WHBUSINESSTRANSACTION.ID_WHBUSINESSTRANSACTIONTYPE = 4\")\n self.vnc_to_process.remove(\"Intrastat\")\n else:\n appsql.append(\"WHBUSINESSTRANSACTION.ID_WHBUSINESSTRANSACTIONTYPE = 6\")\n\n for vnc in self.vnc_to_process:\n if vnc == \"Universal\":\n vnc = \"Baur Versand Germany\"\n appsql.append(\"co.name = '\" + vnc + \"'\")\n\n extsql = \"\"\n for i, d in enumerate(appsql):\n if i == 0:\n extsql += \" where \" + d\n else:\n extsql += \" and \" + d\n\n basesql = basesql + extsql\n self.db_access.execute(basesql)\n rows = self.db_access.fetchall()\n if rows == []:\n raise ValueError\n\n for row in rows:\n row_dict = self.db_access.parse_db_row(row)\n self.db_data[row_dict['vncid']] = row_dict\n except Exception as e:\n raise e\n\n def spit_out_differences(self):\n key_set_excel = set(self.excel_data.keys())\n key_set_db = set(self.db_data.keys())\n insertUpdateWhbu = {'inserts': {}, 'updates': []}\n\n if not self.is_excel_valid:\n return\n\n # New Excel rows (vncid is None)\n print(\"-- Inserts --\")\n insertUpdateWhbu = self.spit_out_inserts(insertUpdateWhbu)\n\n # duplicate keys\n print(\"-- Dupkeys --\")\n for diff in self.excel_data_dupkey:\n print(diff)\n\n # keys in excel but not in db (cannot be insert, because vncid is not None)\n print(\"-- Zeilen im Excel aber nicht in der Datenbank --\")\n diff_set1 = key_set_excel.difference(key_set_db)\n if len(diff_set1) != 0:\n for diff in diff_set1:\n print(self.excel_data[diff])\n\n # keys in db but not in excel (delete)\n print(\"-- Deletes --\")\n diff_set2 = key_set_db.difference(key_set_excel)\n if len(diff_set2) != 0:\n for diff in diff_set2:\n self.db_access.generate_delete(diff)\n\n # key in db and excel but different value (updates)\n print(\"-- Updates --\")\n self.spit_out_updates(diff_set1, diff_set2, insertUpdateWhbu)\n print('\\n')\n\n def spit_out_inserts(self, insertUpdateWhbu):\n # generate inserts in vouchernumbercatalog AND whbusinesstransaction (vnc_id and wtid == None)\n for diff in self.excel_data_new_vncid_wtid:\n self.db_access.generate_insert_vnc_and_whbu(diff, self.db_data)\n\n # generate inserts in vouchernumbercatalog (vnc_id == None and wtid != None)\n for diff in self.excel_data_new:\n self.db_access.generate_insert_vnc(diff)\n\n # generate inserts in whbusinesstransaction (wtid is empty)\n for key in self.excel_data:\n diff_dict = DictDiffer(self.excel_data[key], self.db_data[key]).changed()\n if diff_dict is not None and u'Geschäftsvorfall' in diff_dict['excel'] is not None:\n res = self.db_access.generate_insert_whbu(diff_dict, self.excel_data, self.db_data)\n if res:\n insertUpdateWhbu['inserts'] = dict(insertUpdateWhbu['inserts'], **res['inserts'])\n insertUpdateWhbu['updates'] = insertUpdateWhbu['updates'] + res['updates']\n\n return insertUpdateWhbu\n\n def spit_out_updates(self, diff_set1, diff_set2, insertUpdateWhbu):\n for key in self.excel_data:\n if key in diff_set1 or key in diff_set2: # already printed\n continue\n try:\n d = DictDiffer(self.excel_data[key], self.db_data[key])\n\n diff = d.changed()\n if diff is not None:\n # print(\"Changed: \", diff)\n if u'Geschäftsvorfall' in diff['excel']:\n self.db_access.generate_update_whbu(diff, insertUpdateWhbu)\n else:\n self.db_access.generate_update_vnc(diff)\n\n except KeyError:\n print(\"Key nicht gefunden = \" + key)\n\n def validate_excel_row(self, rowCount, row_dict):\n to_be_validated_rows = {\n \"Einzelbelegnummernkennzeichen\",\n \"Belegnummer von\",\n \"Belegnummer bis\",\n \"Intrastat\",\n u\"Gültig ab\",\n u\"Gültig bis\",\n \"Aufnehmen in das Soreextract\",\n u\"Geschäftsvorfall\"\n }\n\n for row in to_be_validated_rows:\n if row_dict[row] is None:\n print(' Zeile: ' + str(rowCount))\n print(\"Oh nein! Das Feld \" + row + \" ist null obwohl es nicht nullable ist!\")\n self.is_excel_valid = False\n\n def spit_out_postprocessing(self):\n print(\"/*------ Dies immer NACH jeder Transaction, die das \",\n \"Belegnummernverzeichnis ändert, ausführen ------*/\")\n print(\"\"\"\n /* Setze SAP-Konten auf gültige Werte */\n \n update WHCOMM.VOUCHERNUMBERCATALOG set accountsap1 = 0 where accountsap1 is null;\n update WHCOMM.VOUCHERNUMBERCATALOG set accountsap2 = 0 where accountsap2 is null;\n\n /* Setze YN_SOREEXCLUDE in der Übergangsphase auf N wenn null */\n update whcomm.vouchernumbercatalog set YN_SOREEXCLUDE = 'N'\n where YN_SOREEXCLUDE is null;\n\n /* Setze SUMEXCLUDE auf gültige Werte */\n update WHCOMM.VOUCHERNUMBERCATALOG vnc set VNC.YN_SUMEXCLUDE = 'N';\n update WHCOMM.VOUCHERNUMBERCATALOG vnc set VNC.YN_SUMEXCLUDE = 'Y'\n where\n (VNC.ACCOUNTSAP1 = 0 and VNC.ACCOUNTSAP2 = 0)\n or vnc.vouchernumber_from between 872000 and 872999 \n or vnc.vouchernumber_from between 874000 and 874999 \n or vnc.vouchernumber_from between 950000 and 959999; \n /* YN_Sales auf gültige Werte setzen */\n update WHCOMM.VOUCHERNUMBERCATALOG vnc set VNC.YN_SALES = 'N';\n \n /* Nur die belegnummernkreise 873x and Y setzen */\n update WHCOMM.VOUCHERNUMBERCATALOG vnc set VNC.YN_SALES = 'Y'\n where VNC.VOUCHERNUMBER_FROM between 873000 and 873999;\n\n /* YN_WJINCLUDE auf gültige Werte setzen */\n update WHCOMM.VOUCHERNUMBERCATALOG vnc set VNC.YN_WJINCLUDE = 'Y';\n\n /* nun den richtigen Wert setzen */\n update WHCOMM.VOUCHERNUMBERCATALOG vnc set VNC.YN_WJINCLUDE = 'N'\n where\n VNC.VOUCHERNUMBER_FROM between 872000 and 872999\n or\n VNC.VOUCHERNUMBER_FROM between 874000 and 874999;\n \n /* YN_STOCK auf gültige Werte setzen */\n update WHCOMM.VOUCHERNUMBERCATALOG vnc set VNC.YN_STOCK = 'Y';\n\n /* nun den Wert setzen */\n update WHCOMM.VOUCHERNUMBERCATALOG vnc set VNC.YN_STOCK = 'N'\n where\n (vnc.ACCOUNTSAP1 = 0 and vnc.ACCOUNTSAP2 = 0)\n or\n (vnc.ACCOUNTSAP1 != 0 and vnc.ACCOUNTSAP2 != 0\n and vnc.ACCOUNTSAP1 != vnc.ACCOUNTSAP2);\n\n /* setze den dafault für EK-Relevanz */\n update vouchernumbercatalog set YN_EKRELEVANT = 'N' where YN_EKRELEVANT is null;\n\n /* Setze EK-Relevanz Kennzeichen und Referenz auf whbusinesstrasactiontype */\n update whbusinesstransaction set id_whbusinesstransactiontype = 2\n where id in\n (select id_whbusinesstransaction from vouchernumbercatalog vnc\n where vnc.YN_EKRELEVANT = 'Y');\n\n /* Setze Intrastat Kennzeichen und Referenz auf whbusinesstrasactiontype */\n update whbusinesstransaction set id_whbusinesstransactiontype = 4\n where id in\n (select id_whbusinesstransaction from vouchernumbercatalog vnc\n where vnc.YN_INTRASTATRELEVANT = 'Y');\n \n /* Setze ROM Referenz auf whbusinesstrasactiontype */\n update whbusinesstransaction set id_whbusinesstransactiontype = 1\n where enumname like 'ROM_%';\n\n /* Setze Referenz auf whbusinesstrasactiontype an allen anderen Belegnummernverzeichnissen */\n update whbusinesstransaction set id_whbusinesstransactiontype = 6\n where id_whbusinesstransactiontype not in\n (1,2,3,4,5,7) and id_whbusinesstransactiontype != 6\n or id_whbusinesstransactiontype is null;\n \n /* Setze Referenz auf whbusinesstrasactiontype an allen anderen Belenummernverzeichnissen */\n update vouchernumbercatalog set YN_INTRASTATRELEVANT = 'N',\n YN_EKRELEVANT = 'N', YN_OBLIGORELEVANT = 'N' where id in\n (select vnc.id from vouchernumbercatalog vnc\n join whbusinesstransaction wt\n on wt.id = VNC.ID_WHBUSINESSTRANSACTION\n where wt.id_whbusinesstransactiontype = 6);\n \n /* whbusinesstransaction ohne eintrag in vouchernumercatalog */\n delete from whbusinesstransaction where id not in\n (\n select vnc.id_whbusinesstransaction from vouchernumbercatalog vnc join whbusinesstransaction wt on wt.id = VNC.ID_WHBUSINESSTRANSACTION\n );\n \"\"\")\n\n\nif __name__ == \"__main__\":\n vnc_to_process = []\n vnc_to_process.append(\"Otto Germany\")\n vnc_to_process.append(\"Schwab Versand Germany\")\n vnc_to_process.append(\"Heinrich Heine GmbH Germany\")\n vnc_to_process.append(\"Eddie Bauer\")\n vnc_to_process.append(\"Corso\")\n vnc_to_process.append(\"bon prix Germany\")\n vnc_to_process.append(\"Alba Moda\")\n# vnc_to_process.append(\"baumarkt direkt\")\n vnc_to_process.append(\"Universal\")\n vnc_to_process.append(\"Retouren\")\n# vnc_to_process.append(\"Intrastat\")\n vnc_to_process.append(\"EK_Relevanz\")\n for vnc in vnc_to_process:\n print(\"/*------ \" + vnc + \" ------*/\")\n vcn_diff = vncDiff(vnc_to_process=[vnc])\n vcn_diff.spit_out_differences()\n\n vcn_diff.spit_out_postprocessing()\n","sub_path":"vnc_diff_excel_and_db.py","file_name":"vnc_diff_excel_and_db.py","file_ext":"py","file_size_in_byte":14459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"154607255","text":"from hoomd_script import *\r\nimport sys\r\nsys.dont_write_bytecode = True\r\nN = 100\r\ns = init.read_xml(filename=\"coor.xml\")\r\n\r\nlj = pair.lj(r_cut=2.5)\r\nwca_r_cut = 2**(1.0/6.0)\r\nlj.pair_coeff.set('P','P', epsilon=1.0, sigma=1.0, r_cut=wca_r_cut);\r\nlj.pair_coeff.set('P','X', epsilon=1.0, sigma=1.0, r_cut=wca_r_cut);\r\nlj.pair_coeff.set('X','X', epsilon=1.0, sigma=1.0, r_cut=wca_r_cut);\r\nlj.pair_coeff.set('P','C', epsilon=1.0, sigma=0.75, r_cut=2.5);\r\nlj.pair_coeff.set('X','C', epsilon=1.0, sigma=0.75, r_cut=2.5);\r\nlj.pair_coeff.set('P','S', epsilon=1.0, sigma=0.5, r_cut=0.5*wca_r_cut);\r\nlj.pair_coeff.set('X','S', epsilon=1.0, sigma=0.5, r_cut=0.5*wca_r_cut);\r\nlj.pair_coeff.set('C','C', epsilon=1.0, sigma=0.5, r_cut=0.5*wca_r_cut);\r\nlj.pair_coeff.set('S','C', epsilon=1.0, sigma=0.5, r_cut=0.5*wca_r_cut);\r\nlj.pair_coeff.set('S','S', epsilon=1.0, sigma=0.5, r_cut=0.5*wca_r_cut);\r\n\r\nfene = bond.fene()\r\nfene.bond_coeff.set('P-P', k=30.0, r0=1.5, epsilon=1.0, sigma=1.0)\r\nfene.bond_coeff.set('X-X', k=30.0, r0=1.5, epsilon=1.0, sigma=1.0)\r\nfene.bond_coeff.set('X-P', k=30.0, r0=1.5, epsilon=1.0, sigma=1.0)\r\nfene.bond_coeff.set('P-X', k=30.0, r0=1.5, epsilon=1.0, sigma=1.0)\r\n\r\nnlist.set_params(r_buff = 0.1, check_period = 1)\r\n# integrate at constant temperature\r\nall = group.all()\r\nintegrate.mode_standard(dt=0.001)\r\nintegrator = integrate.langevin(group=all, T=0.5, seed=5)\r\n#integrate.nvt(group=_all, T=1.0, tau=0.1)\r\n# integrate at constant temperature,momentum\r\nzeroer= update.zero_momentum(period=1000)\r\n#\r\nsorter = update.sort()\r\nsorter.set_period(100)\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n@nb.jit\r\ndef pbc(a, box):\r\n return a - box * np.round(a/box)\r\n\r\nr_balance = 1.40544\r\n\r\n# add cross-linking bond type into the system types group\r\na = s.take_snapshot()\r\nnptps = np.array(a.particles.types)[a.particles.typeid[:N]]\r\nnpmas = a.particles.mass[:N]\r\nsites = np.argwhere((a.particles.typeid==a.particles.types.index('X'))==True).flatten()\r\nNPBonds = [ (b.type, b.a, b.b) for b in s.bonds if max(b.a, b.b) < N ]\r\n#NPangles = [ (a.type, a.a, a.b, a.c) for a in s.angles if max(a.a,a.b,a.c) < N ]\r\nused = np.zeros((sites.shape[0],), dtype=bool)\r\nidx = np.arange(sites.shape[0])\r\nBonds = []\r\n#for _ in s.bonds:\r\n# if _.type == '33':\r\n# used[idx[sites == _.a]]=True\r\n# used[idx[sites == _.b]]=True\r\n# Bonds.append(('33',_.a,_.b))\r\ndef crosslink(timestep):\r\n a = s.take_snapshot()\r\n nppos = a.particles.position[:N]\r\n psites = nppos[sites] # '3' is crosslink site type\r\n npimg = a.particles.image[:N]\r\n box = np.array([a.box.Lx,a.box.Ly, a.box.Lz])\r\n cnt = 0\r\n for _ in range(5): # copy from ChenTao's method, try 5 times or all sites are connected\r\n if used.all() == True:\r\n break\r\n for i in range(sites.shape[0]-1):\r\n if used[i]:\r\n continue\r\n pi = psites[i]\r\n for j in range(i+1,sites.shape[0]):\r\n if used[j] or used[i]: # Adding `or used[i]` To avoid Pair (True False) goes into crosslink process\r\n continue\r\n pj = psites[j]\r\n dp = pbc(pj-pi,box)\r\n dr = (dp.dot(dp)) ** 0.5\r\n if abs(dr - r_balance) <0.1 and abs(sites[j]-sites[i])>14: # close to balance and interval between 2 sites farther than 14\r\n a, b = int(min(sites[i], sites[j])), int(max(sites[i], sites[j]))\r\n Bonds.append(('X-X', a, b))\r\n s.bonds.add('X-X', a, b)\r\n cnt += 1\r\n used[i] = True\r\n used[j] = True\r\n if cnt == 0:\r\n return None\r\n #print(Bonds)\r\n npcm = pbc(np.array([ pbc(_ - nppos[0], box) for _ in nppos ]).sum(axis=0)/N + nppos[0], box)\r\n nppos_ = np.array([ pbc(_ - npcm, box) for _ in nppos ])\r\n o = open('NP-step-2.5-%.10d-%.5d.xml' % (timestep, len(Bonds)*2),'w')\r\n o.write(\"\\n\\n\\n\\n\\n\" % (N, box[0], box[1], box[2], N))\r\n o.write('\\n'.join([\"%s %s %s\" % (str(p[0]), str(p[1]), str(p[2])) for p in nppos_]))\r\n o.write(\"\\n\\n\")\r\n o.write(\"\\n\" % (N))\r\n o.write('\\n'.join(list(nptps)))\r\n o.write(\"\\n\\n\")\r\n o.write(\"\\n\" % (N))\r\n o.write('\\n'.join(list([ str(_) for _ in npmas])))\r\n o.write(\"\\n\\n\")\r\n bds = [ (_.type, _.a, _.b) for _ in s.bonds if max(_.a, _.b)\\n\" % (len(bds)))\r\n o.write('\\n'.join([ \"%s %s %s\" % (str(p[0]), str(p[1]), str(p[2])) for p in bds ]))\r\n o.write(\"\\n\\n\")\r\n #o.write(\"\" % (len(NPangles)))\r\n #o.write('\\n'.join([ \"%s %s %s %s\" % (str(p[0]), str(p[1]), str(p[2]), str(p[3])) for p in NPangles ]))\r\n #o.write('\\n')\r\n o.write(\"\\n\\n\")\r\n\r\n\r\n#xml = dump.xml(filename=\"particle\", period=1E7)\r\n#xml.set_params(position=True, image=True, bond=True, velocity=True,type=True,angle=True,mass=True)\r\n#run(int(2e7+1)) # Equilibrium time\r\nanalyze.callback(callback=crosslink, period=int(160))\r\nrun(int(8e5+1))\r\n","sub_path":"tmp_script/crosslink.py","file_name":"crosslink.py","file_ext":"py","file_size_in_byte":5241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"234649321","text":"\"\"\"\n SimpleSQL Connections & Database Management\n\n Full-Documentation on : https://github.com/Thomas-SBE/pythonsqlconnector\n\n\"\"\"\n\nimport mysql.connector\nimport colorama\n\n__version__ = \"0.0.1\"\n\n# ----------------------------------\n# Simple coloured messages\n# ----------------------------------\n\ndef succ(s):\n return colorama.Fore.GREEN + str(s) + colorama.Style.RESET_ALL\ndef err(s):\n return colorama.Fore.RED + str(s) + colorama.Style.RESET_ALL\ndef warn(s):\n return colorama.Fore.YELLOW + str(s) + colorama.Style.RESET_ALL\n\n\n# ----------------------------------\n# Custom Exception Messages\n# ----------------------------------\n\nclass SQLPermissionNotGranted:\n def __init__(self, accessed, perm, debugmode):\n if debugmode: print(\"{} [ {} ] - You cannot do this as the permission {} does not allow it !\".format(err(\"-\"), err(\"pSQL → \" + accessed), warn(perm)))\nclass SQLActionResult:\n def __init__(self, action, table, amount, debugmode):\n if debugmode: print(\"{} [ {} ] - {} affected {} of {}.\".format(succ(\"+\"), succ(\"pSQL → \" + action), action, warn(amount), warn(table)))\nclass SQLAlreadyExist:\n def __init__(self, typ, action, name, debugmode):\n if debugmode: print(\"{} [ {} ] - Cannot {} {} because the name {} already exists !\".format(err(\"-\"), err(\"pSQL → \" + action + typ), warn(action), warn(typ), warn(name)))\n\n\n# ----------------------------------\n# SQL Framework\n# ----------------------------------\n\nclass SQLPermissions:\n \n ALL_ACCESSES = \"#999\"\n READ_ONLY = \"#000\"\n WRITE_AND_READ = \"#050\"\n TABLE_ONLY = \"#005\"\n \n def __init__(self):\n self.SelectPermission = False\n self.UpdatePermission = False\n self.InsertPermission = False\n self.DeletePermission = False\n self.CreateDatabasePermission = False\n self.CreateTablePermission = False\n self.DropTablePermission = False\n self.DropDatabasePermission = False\n\n def preset(self, mode):\n if mode == SQLPermissions.ALL_ACCESSES: \n self.SelectPermission = True\n self.UpdatePermission = True\n self.InsertPermission = True\n self.DeletePermission = True\n self.CreateDatabasePermission = True\n self.CreateTablePermission = True\n self.DropTablePermission = True\n self.DropDatabasePermission = True\n elif mode == SQLPermissions.READ_ONLY:\n self.SelectPermission = True\n self.UpdatePermission = False\n self.InsertPermission = False\n self.DeletePermission = False\n self.CreateDatabasePermission = False\n self.CreateTablePermission = False\n self.DropTablePermission = False\n self.DropDatabasePermission = False\n elif mode == SQLPermissions.WRITE_AND_READ:\n self.SelectPermission = True\n self.UpdatePermission = True\n self.InsertPermission = True\n self.DeletePermission = True\n self.CreateDatabasePermission = False\n self.CreateTablePermission = False\n self.DropTablePermission = False\n self.DropDatabasePermission = False\n elif mode == SQLPermissions.TABLE_ONLY:\n self.SelectPermission = True\n self.UpdatePermission = True\n self.InsertPermission = True\n self.DeletePermission = True\n self.CreateDatabasePermission = False\n self.CreateTablePermission = True\n self.DropTablePermission = True\n self.DropDatabasePermission = False\n return self\n\nclass SQLConnection:\n def __init__(self, debug = False, custom_permissions = None):\n self.debugmode = debug\n colorama.init()\n self.config = { \"host\": None, \"username\": None, \"password\": None }\n self.permissions = SQLPermissions()\n self.permissions.preset(SQLPermissions.ALL_ACCESSES)\n if custom_permissions != None and type(custom_permissions) is type(SQLPermissions()): self.permissions = custom_permissions\n\n def CONNECT(self, config):\n if self.debugmode: print(\"{} [ {} ] - Attempting connection to {} with user {} ...\".format(succ(\"+\"), succ(\"pSQL\"), warn(config[\"host\"]), warn(config[\"username\"])))\n self.db = mysql.connector.connect(host=config[\"host\"], user=config[\"username\"], passwd=config[\"password\"])\n if self.debugmode: print(\"{} [ {} ] - Connection to {} established successfully !\".format(succ(\"+\"), succ(\"pSQL\"), warn(config[\"host\"])))\n self.config = config\n self.db_cursor = self.db.cursor()\n return self\n\n def DB_CONNECT(self, db_name):\n _db = SQLDatabase(self)\n _db.INIT(str(db_name))\n return _db\n\n def CREATE_DATABASE(self, db_name):\n if not self.permissions.CreateDatabasePermission: SQLPermissionNotGranted(\"CREATE DATABASE\", \"CreateDatabasePermission\", self.debugmode);return\n if db_name in self.GET_DATABASES(): SQLAlreadyExist(\"DATABASE\", \"CREATE\", db_name);return\n self.db_cursor.execute(\"CREATE DATABASE \" + str(db_name))\n SQLActionResult(\"CREATE DATABASE\", db_name, 1, self.debugmode)\n\n def GET_DATABASES(self):\n self.db_cursor.execute(\"SHOW DATABASES\")\n return [x[0] for x in self.db_cursor]\n\n def DROP_DATABASE(self, db_name):\n if not self.permissions.DropDatabasePermission: SQLPermissionNotGranted(\"DROP DATABASE\", \"DropDatabasePermission\", self.debugmode);return\n affected = 0\n if db_name in self.GET_DATABASES(): affected = 1\n self.db_cursor.execute(\"DROP DATABASE IF EXISTS \" + str(db_name))\n SQLActionResult(\"DROP DATABASE\", db_name, affected, self.debugmode)\n\nclass SQLDatabase:\n def __init__(self, master):\n self.master = master\n \n def INIT(self, db_name):\n self.mastername = db_name\n if self.master.debugmode: print(\"{} [ {} ] - Creating database connection to {} from {} ...\".format(succ(\"+\"), succ(\"pSQL\"), warn(db_name), warn(self.master.config[\"host\"])))\n self.db = mysql.connector.connect(host=self.master.config[\"host\"], user=self.master.config[\"username\"], passwd=self.master.config[\"password\"], database=db_name)\n if self.master.debugmode: print(\"{} [ {} ] - Connection to {} from {} established !\".format(succ(\"+\"), succ(\"pSQL\"), warn(db_name), warn(self.master.config[\"host\"])))\n self.cursor = self.db.cursor()\n\n def SHOW_COLUMNS(self, table):\n self.cursor.execute(\"SHOW COLUMNS FROM `\" + str(table) + \"`\")\n res = self.cursor.fetchall()\n final = [i[0] for i in res]\n return tuple(final)\n\n def SELECT_WHERE(self, table, key, value):\n if not self.master.permissions.SelectPermission: SQLPermissionNotGranted(\"SELECT\", \"SelectPermission\", self.master.debugmode);return\n self.cursor.execute(\"SELECT * FROM `\" + str(table) + \"` WHERE \" + str(key) + \"='\" + str(value) +\"'\")\n ret = self.cursor.fetchall()\n if len(ret) <= 0: return {}\n columns = self.SHOW_COLUMNS(table)\n final = {}\n for x in range(len(ret[0])):\n final[columns[x]] = ret[0][x]\n SQLActionResult(\"SELECT\", str(table), len(ret), self.master.debugmode)\n return final\n\n def SELECT_ALL(self, table, ref = None):\n if not self.master.permissions.SelectPermission: SQLPermissionNotGranted(\"SELECT\", \"SelectPermission\", self.master.debugmode);return\n self.cursor.execute(\"SELECT * FROM `\" + str(table) + \"` WHERE 1\")\n res = self.cursor.fetchall()\n columns = self.SHOW_COLUMNS(table)\n refindex = -1\n if ref != None:\n for i in range(len(columns)):\n if columns[i] == ref: refindex = i; break\n final = {}\n for x in range(len(res)):\n _temp = {}\n for w in range(len(res[x])):\n _temp[columns[w]] = res[x][w]\n if refindex != -1:\n final[res[x][refindex]] = _temp\n else:\n final[x] = _temp\n SQLActionResult(\"SELECT\", str(table), len(res), self.master.debugmode)\n return final\n\n def INSERT(self, table, keyvalues):\n if not self.master.permissions.InsertPermission: SQLPermissionNotGranted(\"INSERT\", \"InsertPermission\", self.master.debugmode);return\n keys, values = \"(\", \"(\"\n for k,v in keyvalues.items():\n keys += \"`\"+str(k)+\"`,\"\n values += '\"'+str(v)+'\",'\n self.cursor.execute(\"INSERT INTO `\" + str(table) + \"`\" + keys[:-1] + \")\" + \" VALUES \" + values[:-1] + \")\" + \"\")\n self.db.commit()\n SQLActionResult(\"INSERT\", str(table), self.cursor.rowcount, self.master.debugmode)\n\n def UPDATE(self, table, keyvalues, references):\n if not self.master.permissions.UpdatePermission: SQLPermissionNotGranted(\"UPDATE\", \"UpdatePermission\", self.master.debugmode);return\n strtemp, reftemp = \"\", \"\"\n for k,v in keyvalues.items():\n strtemp += \"`\"+str(k)+'`=\"'+str(v)+'\",'\n for k,v in references.items():\n reftemp += \"`\" + str(k) + '`=\"' + str(v) + '\" &&'\n self.cursor.execute(\"UPDATE `\" +str(table)+ \"` SET \" +strtemp[:-1]+ \" WHERE \" + reftemp[:-2])\n self.db.commit()\n SQLActionResult(\"UPDATE\", str(table), self.cursor.rowcount, self.master.debugmode)\n\n def DELETE(self, table, references):\n if not self.master.permissions.DeletePermission: SQLPermissionNotGranted(\"DELETE\", \"DeletePermission\", self.master.debugmode);return\n reftemp = \"\"\n for k,v in references.items():\n reftemp += \"`\" + str(k) + '`=\"' + str(v) + '\" &&'\n self.cursor.execute(\"DELETE FROM `\" +str(table)+ \"` WHERE \" + reftemp[:-2])\n self.db.commit()\n SQLActionResult(\"DELETE\", str(table), self.cursor.rowcount, self.master.debugmode)\n\n def CREATE_TABLE(self, table_name, columns):\n if not self.master.permissions.CreateTablePermission: SQLPermissionNotGranted(\"CREATE TABLE\", \"CreateTablePermission\", self.master.debugmode);return\n if table_name in self.GET_TABLES(): SQLAlreadyExist(\"TABLE\", \"CREATE\", table_name, self.master.debugmode);return\n formatd = \"(\"\n for v in columns:\n formatd += str(v) + \" TEXT,\"\n self.cursor.execute(\"CREATE TABLE \" + str(table_name) + formatd[:-1] + \")\")\n SQLActionResult(\"CREATE TABLE\", self.mastername, 1, self.master.debugmode)\n\n def GET_TABLES(self):\n self.cursor.execute(\"SHOW TABLES\")\n return [x[0] for x in self.cursor]\n\n def DROP_TABLE(self, table_name):\n if not self.master.permissions.DropTablePermission: SQLPermissionNotGranted(\"DROP TABLE\", \"DropTablePermission\", self.master.debugmode);return\n affected = 0\n if table_name in self.GET_TABLES(): affected = 1\n self.cursor.execute(\"DROP TABLE IF EXISTS \" + str(table_name))\n SQLActionResult(\"DROP TABLE\", self.mastername, affected, self.master.debugmode)\n\n","sub_path":"install/simplesql.py","file_name":"simplesql.py","file_ext":"py","file_size_in_byte":10956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"555373044","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data as Data\nfrom torch.autograd import Variable \nimport numpy as np\nfrom torch.utils.data import TensorDataset, DataLoader\nimport sklearn.preprocessing\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_absolute_error\nfrom datasets import SyntheticSplit\nfrom datasets import SyntheticDataset\n\n\nclass NN(nn.Module):\n def __init__(self):\n super(NN, self).__init__()\n # params\n self.inputSize = 2005\n self.hiddenSize1 = 1000\n self.hiddenSize2 = 500\n self.outputSize = 3\n \n self.linear1 = nn.Linear(self.inputSize, self.hiddenSize1)\n self.linear2 = nn.Linear(self.hiddenSize1, self.hiddenSize2)\n self.linear3 = nn.Linear(self.hiddenSize2, self.outputSize)\n\n def forward(self, x):\n x = self.linear1(x)\n x = F.relu(x)\n x = self.linear2(x)\n x = F.relu(x)\n x = self.linear3(x)\n return x\n\ndef main():\n gen = SyntheticDataset()\n data = gen.split()\n trainPercent = 0.7\n predPercent = 0.2\n validPercent = 0.1\n\n # train\n trainInputs, b = data[0].tensors()\n tInputs = torch.FloatTensor(trainInputs)\n tInputs = tInputs.view(int(gen.__len__() * trainPercent), -1)\n \n trainTargets = []\n for a in range(int(gen.__len__() * trainPercent)):\n resultTuple = (data[0].quantitative_tensor('infectiousness')[a], data[0].quantitative_tensor('i_out')[a], data[0].quantitative_tensor('i_rec_prop')[a])\n trainTargets.append(resultTuple)\n tTargets = torch.FloatTensor(trainTargets)\n\n # test\n predInputs, c = data[1].tensors()\n pInputs = torch.FloatTensor(predInputs)\n pInputs = pInputs.view(int(gen.__len__() * predPercent), -1)\n \n predTargets = []\n for a in range(int(gen.__len__() * predPercent)):\n resultTuple = (data[1].quantitative_tensor('infectiousness')[a], data[1].quantitative_tensor('i_out')[a], data[1].quantitative_tensor('i_rec_prop')[a])\n predTargets.append(resultTuple)\n pTargets = torch.FloatTensor(predTargets)\n \n # validation\n '''validInputs, c = data[2].tensors()\n vInputs = torch.FloatTensor(validInputs)\n vInputs = vInputs.view(int(gen.__len__() * validPercent), -1)\n \n validTargets = []\n for a in range(int(gen.__len__() * validPercent)):\n resultTuple = (data[2].quantitative_tensor('infectiousness')[a], data[2].quantitative_tensor('i_out')[a], data[2].quantitative_tensor('i_rec_prop')[a])\n validTargets.append(resultTuple)\n vTargets = torch.FloatTensor(validTargets)'''\n\n model = NN()\n\n opt = optim.SGD(model.parameters(), lr=1e-8)\n loss_fn = F.mse_loss\n\n loss = loss_fn(model(tInputs), tTargets)\n\n\n for epoch in range(500):\n # Generate predictions\n pred = model(tInputs)\n loss = loss_fn(pred, tTargets)\n # Perform gradient descent\n loss.backward()\n opt.step()\n opt.zero_grad()\n\n print('Training loss: ', loss_fn(model(tInputs), tTargets))\n\n preds = model(pInputs)\n print(mean_absolute_error(pTargets.detach().numpy(), preds.detach().numpy()))\n \"\"\"sumIOut=0\n sumIRecProp=0\n sumInfectiousness=0\n for a in range(3):\n print(\"i_out: \",(float(preds[a][0]), \" \", (float(pTargets[a][0]))))\n print(\"i_rec_prop: \",(float(preds[a][1]), \" \", (float(pTargets[a][1]))))\n print(\"infectiousness: \",(float(preds[a][2]), \" \", (float(pTargets[a][2]))))\n\n q=100 * (abs((float(preds[a][0]) - float(pTargets[a][0])))/float(pTargets[a][0]))\n p=100 * (abs((float(preds[a][1]) - float(pTargets[a][1])))/float(pTargets[a][1]))\n s=100 * (abs((float(preds[a][2]) - float(pTargets[a][2])))/float(pTargets[a][2]))\n sumIOut+=q\n sumIRecProp+=p\n sumInfectiousness+=s\n print('i_out error for this case: ',q)\n print('i_rec_prop error for this case: ',p)\n print('infectiousness error for this case: ',s)\n print()\"\"\"\n\n #print('\\ni_out the average percent error is: ',sumIOut/240)\n #print('\\ni_rec_prop the average percent error is: ',sumIRecProp/240)\n #print('\\ninfectiousness the average percent error is: ',sumInfectiousness/240)\nif __name__ == \"__main__\":\n main()\n","sub_path":"all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"65044008","text":"\"\"\"\nMIT License\n\nCopyright (c) 2021 isaa-ctaylor\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport discord\nfrom discord.ext import commands\nimport statcord\n\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nclass Statcord(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.key = os.getenv(\"statcord\")\n self.api = statcord.Client(self.bot, self.key, mem=True, cpu=True, bandwidth=True)\n self.api.start_loop()\n\n @commands.Cog.listener(name=\"on_command\")\n async def log_command(self, ctx):\n self.api.command_run(ctx)\n\n\ndef setup(bot):\n bot.add_cog(Statcord(bot))\n","sub_path":"bot/cogs/Statcord.py","file_name":"Statcord.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"243209253","text":"import circularLoophole\nimport RockPaperScissors\nimport FindMyNum\nimport TicTacToe\n\nfrom tkinter import *\nimport tkinter as tk\nimport tkinter.font as font\nfrom tkinter import Menu\n\nclass MainMenu:\n def playttt(self): \n print(\"ttt\")\n self.menu.destroy()\n self.appThree= TicTacToe.tripleT()\n self.appThree.startPlaying() \n\n\n def playrps(self): \n print(\"rps\")\n self.menu.destroy()\n self.appTwo=RockPaperScissors.RockPS()\n self.appTwo.playTheGame()\n\n def playfmn(self): \n print(\"fmn\")\n self.menu.destroy()\n self.app=FindMyNum.findX()\n self.app.startGame() \n\n \"\"\"\n ███  ███  █████  ██ ███  ██  ███████ ██  ██ ███  ██  ██████ ████████ ██  ██████  ███  ██ \n ████  ████ ██   ██ ██ ████  ██  ██      ██  ██ ████  ██ ██         ██    ██ ██    ██ ████  ██ \n ██ ████ ██ ███████ ██ ██ ██  ██  █████  ██  ██ ██ ██  ██ ██  ██  ██ ██  ██ ██ ██  ██ \n ██  ██  ██ ██   ██ ██ ██  ██ ██  ██     ██  ██ ██  ██ ██ ██  ██  ██ ██  ██ ██  ██ ██ \n ██      ██ ██  ██ ██ ██   ████  ██   ██████  ██   ████  ██████  ██  ██  ██████  ██   ████                                                                                   \n\n \"\"\"\n def openMenu(self):\n self.menu=tk.Tk()\n self.menu.title(\"Main Menu\")\n self.menu.iconbitmap(\"images/menuDavid.ico\")\n self.menu.configure(bg=\"#a07ab1\")\n\n self.labelFont= font.Font(family=\"Times\",size=40,weight=\"bold\",slant=\"italic\")\n self.myFont=font.Font(family=\"Times\",size=20,weight=\"bold\",slant=\"italic\")\n self.buttonColor=\"#a07ab1\"\n self.textColor=\"#411a52\"\n\n self.welcome = Label(self.menu, text='Marhbé Sahbi\\nAkhtar Game', font=self.labelFont,padx=40,pady=20,bg=self.buttonColor,fg=\"#fff\")\n self.welcome.grid(row=1,column=1,pady=4)\n\n self.tttImage=PhotoImage(file=\"images/TicTacToe.png\")\n self.tttImage=self.tttImage.subsample(6,6)\n self.ttt= Button(self.menu,command= lambda :self.playttt(), text=\"Click to Play 'TicTacToe'\",font=self.myFont,image=self.tttImage,width=420,height=150,compound=\"top\",bd=0,bg=\"#8feff9\",fg=self.textColor,activebackground=self.buttonColor,activeforeground=self.textColor).grid(row=2,column=1,pady=2)\n\n #self.tttt= Button(self.menu,command= lambda :self.playttt(), text=\"Click to Play 'TicTacToe'\",font=self.myFont,image=self.tttImage,width=418,height=148,compound=\"top\",bd=0,bg=\"#fff\",fg=self.textColor,activebackground=self.buttonColor,activeforeground=self.textColor).grid(row=2,column=1,pady=2)\n\n self.rpsImage=PhotoImage(file=\"images/RockPaperScissors.png\")\n self.rpsImage=self.rpsImage.subsample(6,6)\n self.rps= Button(self.menu,command= lambda :self.playrps(), text=\"Click to Play 'Rock Paper Scissors'\",font=self.myFont,image=self.rpsImage,width=420,height=150,compound=\"top\",bd=0,bg=\"#f9ed80\",fg=self.textColor,activebackground=self.buttonColor,activeforeground=self.textColor).grid(row=3,column=1,pady=2)\n\n self.fmnImage=PhotoImage(file=\"images/FindMyNum.png\")\n self.fmnImage=self.fmnImage.subsample(6,6)\n self.fmn=Button(self.menu,command= lambda :self.playfmn(), text=\"Click to Play 'Find My Number'\",font=self.myFont,image=self.fmnImage,width=420,height=150,compound=\"top\",bd=0,bg=\"#f39faf\",fg=self.textColor,activebackground=self.buttonColor,activeforeground=self.textColor).grid(row=4,column=1,pady=2)\n\n self.menu.mainloop() \n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"144631810","text":"from django.contrib import admin\nfrom django.urls import path, include\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('cats/', include('cats.urls')),\n path('base-auth/', include('rest_framework.urls')),\n path('', include('oauth2_provider.urls'), name='oauth2_provider'),\n path('auth/', include('djoser.urls')),\n\n]","sub_path":"tecint-master/index/index/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"572562887","text":"import numpy as np\nfrom dezero import Layer, Variable, Function, Layer\nimport dezero.functions as F\nimport dezero.layers as L\nfrom dezero import utils\n\n\nclass Model(Layer):\n\n def plot(self, *inputs, to_file='model.png'):\n ys = self.__call__(*inputs)\n return utils.plot_dot_graph(ys, verbose=True, to_file=to_file)\n\n\nclass Sequential(Model):\n\n def __init__(self, *layers):\n self.layers = []\n for i, layer in enumerate(layers):\n setattr(self, 'l' + str(i), layer)\n self.layers.append(layer)\n\n def __call__(self, x):\n for layer in self.layers:\n x = layer(x)\n return x\n\n\nclass MLP(Model):\n\n def __init__(self, fc_output_sizes, activation=F.sigmoid):\n super().__init__()\n self.activation = activation\n self.layers = []\n\n for i, out_size in enumerate(fc_output_sizes):\n layer = L.Linear(out_size)\n setattr(self, 'l' + str(i), layer)\n self.layers.append(layer)\n\n def __call__(self, x):\n for l in self.layers[:-1]:\n x = self.activation(l(x))\n return self.layers[-1](x)\n\n\nclass VGG16(Model):\n WEIGHTS_PATH = 'https://github.com/koki0702/dezero-models/releases/download/v0.1/vgg16.npz'\n\n def __init__(self, pretrained=False):\n super().__init__()\n self.conv1_1 = L.Conv2d(3, 64, 3, 1, 1)\n self.conv1_2 = L.Conv2d(64, 64, 3, 1, 1)\n self.conv2_1 = L.Conv2d(64, 128, 3, 1, 1)\n self.conv2_2 = L.Conv2d(128, 128, 3, 1, 1)\n self.conv3_1 = L.Conv2d(128, 256, 3, 1, 1)\n self.conv3_2 = L.Conv2d(256, 256, 3, 1, 1)\n self.conv3_3 = L.Conv2d(256, 256, 3, 1, 1)\n self.conv4_1 = L.Conv2d(256, 512, 3, 1, 1)\n self.conv4_2 = L.Conv2d(512, 512, 3, 1, 1)\n self.conv4_3 = L.Conv2d(512, 512, 3, 1, 1)\n self.conv5_1 = L.Conv2d(512, 512, 3, 1, 1)\n self.conv5_2 = L.Conv2d(512, 512, 3, 1, 1)\n self.conv5_3 = L.Conv2d(512, 512, 3, 1, 1)\n self.fc6 = L.Linear(4096)\n self.fc7 = L.Linear(4096)\n self.fc8 = L.Linear(1000)\n\n if pretrained:\n weights_path = utils.get_file(VGG16.WEIGHTS_PATH)\n self.load_weights(weights_path)\n\n def __call__(self, x):\n x = F.relu(self.conv1_1(x))\n x = F.relu(self.conv1_2(x))\n x = F.pooling(x, 2, 2)\n x = F.relu(self.conv2_1(x))\n x = F.relu(self.conv2_2(x))\n x = F.pooling(x, 2, 2)\n x = F.relu(self.conv3_1(x))\n x = F.relu(self.conv3_2(x))\n x = F.relu(self.conv3_3(x))\n x = F.pooling(x, 2, 2)\n x = F.relu(self.conv4_1(x))\n x = F.relu(self.conv4_2(x))\n x = F.relu(self.conv4_3(x))\n x = F.pooling(x, 2, 2)\n x = F.relu(self.conv5_1(x))\n x = F.relu(self.conv5_2(x))\n x = F.relu(self.conv5_3(x))\n x = F.pooling(x, 2, 2)\n x = F.reshape(x, (x.shape[0], -1))\n x = F.dropout(F.relu(self.fc6(x)))\n x = F.dropout(F.relu(self.fc7(x)))\n x = self.fc8(x)\n return x","sub_path":"dezero/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"38644365","text":"# monitor.py 27/09/2015 D.J.Whale\n#\n# Monitor settings of Energine MiHome plugs\n\nimport time\n\nfrom energenie import OpenHEMS, Devices\nfrom energenie import radio\nfrom Timer import Timer\nimport os\n\nLOG_FILENAME = \"energenie.csv\"\n\ndef warning(msg):\n print(\"warning:%s\" % str(msg))\ndef trace(msg):\n print(\"monitor:%s\" % str(msg))\n\nlog_file = None\n\ndef logMessage (msg):\n HEADINGS = 'timestamp,mfrid,prodid,sensorid,flags,switch,voltage,freq,reactive,real'\n\n global log_file\n if log_file == None:\n if not os.path.isfile(LOG_FILENAME):\n log_file = open(LOG_FILENAME, 'w')\n log_file.write(HEADINGS + '\\n')\n else:\n log_file = open(LOG_FILENAME, 'a') # append\n\n # get the header\n header = msg['header']\n timestamp = time.time()\n mfrid = header['mfrid']\n productid = header['productid']\n sensorid = header['sensorid']\n\n # set defaults for any data that doesn't appear in this message\n # but build flags so we know which ones this contains\n flags = [0 for i in range(7)]\n switch = None\n voltage = None\n freq = None\n reactive = None\n real = None\n apparent = None\n current = None\n\n # capture any data that we want\n #print(msg)\n for rec in msg['recs']:\n paramid = rec['paramid']\n try:\n value = rec['value']\n except:\n value = None\n \n if paramid == OpenHEMS.PARAM_SWITCH_STATE:\n switch = value\n flags[0] = 1\n elif paramid == OpenHEMS.PARAM_VOLTAGE:\n flags[1] = 1\n voltage = value\n elif paramid == OpenHEMS.PARAM_FREQUENCY:\n flags[2] = 1\n freq = value\n elif paramid == OpenHEMS.PARAM_REACTIVE_POWER:\n flags[3] = 1\n reactive = value\n elif paramid == OpenHEMS.PARAM_REAL_POWER:\n flags[4] = 1\n real = value\n elif paramid == OpenHEMS.PARAM_APPARENT_POWER:\n flags[5] = 1\n apparent = value\n elif paramid == OpenHEMS.PARAM_CURRENT:\n flags[6] = 1\n current = value\n\n # generate a line of CSV\n flags = \"\".join([str(a) for a in flags])\n csv = \"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\" % (timestamp, mfrid, productid, sensorid, flags, switch, voltage, freq, reactive, real, apparent, current)\n log_file.write(csv + '\\n')\n log_file.flush()\n trace(csv) # testing\n\n\n#----- TEST APPLICATION -------------------------------------------------------\n\ndirectory = {}\n\ndef allkeys(d):\n result = \"\"\n for k in d:\n if len(result) != 0:\n result += ','\n result += str(k)\n return result\n\n \ndef updateDirectory(message):\n \"\"\"Update the local directory with information about this device\"\"\"\n now = time.time()\n header = message[\"header\"]\n sensorId = header[\"sensorid\"]\n\n if not directory.has_key(sensorId):\n # new device discovered\n desc = Devices.getDescription(header[\"mfrid\"], header[\"productid\"])\n print(\"ADD device:%s %s\" % (hex(sensorId), desc))\n directory[sensorId] = {\"header\": message[\"header\"]}\n #trace(allkeys(directory))\n\n directory[sensorId][\"time\"] = now\n #TODO would be good to keep recs, but need to iterate through all and key by paramid,\n #not as a list index, else merging will be hard.\n\n\nSWITCH_MESSAGE = {\n \"header\": {\n \"mfrid\": Devices.MFRID,\n \"productid\": Devices.PRODUCTID_R1_MONITOR_AND_CONTROL,\n \"encryptPIP\": Devices.CRYPT_PIP,\n \"sensorid\": 0 # FILL IN\n },\n \"recs\": [\n {\n \"wr\": True,\n \"paramid\": OpenHEMS.PARAM_SWITCH_STATE,\n \"typeid\": OpenHEMS.Value.UINT,\n \"length\": 1,\n \"value\": 0 # FILL IN\n }\n ]\n}\n\n\nJOIN_ACK_MESSAGE = {\n \"header\": {\n \"mfrid\": 0, # FILL IN\n \"productid\": 0, # FILL IN\n \"encryptPIP\": Devices.CRYPT_PIP,\n \"sensorid\": 0 # FILL IN\n },\n \"recs\": [\n {\n \"wr\": False,\n \"paramid\": OpenHEMS.PARAM_JOIN,\n \"typeid\": OpenHEMS.Value.UINT,\n \"length\": 0\n }\n ]\n}\n\n\n\ndef monitor():\n \"\"\"Send discovery and monitor messages, and capture any responses\"\"\"\n\n # Define the schedule of message polling\n sendSwitchTimer = Timer(5, 1) # every n seconds offset by initial 1\n switch_state = 0 # OFF\n radio.receiver()\n decoded = None\n\n while True:\n # See if there is a payload, and if there is, process it\n if radio.isReceiveWaiting():\n #trace(\"receiving payload\")\n payload = radio.receive()\n try:\n decoded = OpenHEMS.decode(payload)\n except OpenHEMS.OpenHEMSException as e:\n warning(\"Can't decode payload:\" + str(e))\n continue\n \n OpenHEMS.showMessage(decoded)\n updateDirectory(decoded)\n logMessage(decoded)\n \n #TODO: Should remember report time of each device,\n #and reschedule command messages to avoid their transmit slot\n #making it less likely to miss an incoming message due to\n #the radio being in transmit mode\n\n # handle messages with zero recs in them silently\n #trace(decoded)\n if len(decoded[\"recs\"]) == 0:\n print(\"Empty record:%s\" % decoded)\n else:\n # assume only 1 rec in a join, for now\n if decoded[\"recs\"][0][\"paramid\"] == OpenHEMS.PARAM_JOIN:\n #TODO: write OpenHEMS.getFromMessage(\"header_mfrid\")\n # send back a JOIN ACK, so that join light stops flashing\n response = OpenHEMS.alterMessage(JOIN_ACK_MESSAGE,\n header_mfrid=decoded[\"header\"][\"mfrid\"],\n header_productid=decoded[\"header\"][\"productid\"],\n header_sensorid=decoded[\"header\"][\"sensorid\"])\n p = OpenHEMS.encode(response)\n radio.transmitter()\n radio.transmit(p)\n radio.receiver()\n\n if sendSwitchTimer.check() and decoded != None:\n request = OpenHEMS.alterMessage(SWITCH_MESSAGE,\n header_sensorid=decoded[\"header\"][\"sensorid\"],\n recs_0_value=switch_state)\n p = OpenHEMS.encode(request)\n radio.transmitter()\n radio.transmit(p)\n radio.receiver()\n switch_state = (switch_state+1) % 2 # toggle\n \n\nif __name__ == \"__main__\":\n \n trace(\"starting monitor\")\n radio.init()\n OpenHEMS.init(Devices.CRYPT_PID)\n\n try:\n monitor()\n\n finally:\n radio.finished()\n\n# END\n","sub_path":"src/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":6850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"293571792","text":"import unittest\n\nfrom guess import Guess\n\nclass TestGuess(unittest.TestCase):\n\n def setUp(self):\n self.g1 = Guess('default')\n\n def tearDown(self):\n pass\n\n def testDisplayCurrent(self):\n currentList = ['d', 'e', 'f', 'a', 'u', 'l', 't']\n currentString = \"\"\n currentStringList = [\"_\", \"_\", \"_\", \"_\", \"_\", \"_\", \"_\"]\n\n for i in range(len(currentList)):\n self.g1.guess(currentList[i])\n currentStringList[i] = currentList[i]\n for j in range(len(currentStringList)):\n currentString = currentString + currentStringList[j] + \" \"\n self.assertEqual(self.g1.displayCurrent(), currentString)\n\n\n def testDisplayGuessed(self):\n currentList = ['d', 'e', 'f', 'a', 'u', 'l', 't']\n currentString = \" \"\n currentStringList = ['n']\n\n for i in range(len(currentList)):\n self.g1.guess(currentList[i])\n currentStringList.append(currentList[i])\n currentStringList = sorted(currentStringList)\n for j in range(len(currentStringList)):\n currentString = currentString + currentStringList[j] + \" \"\n self.assertEqual(self.g1.displayGuessed(), currentString)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"hangman_unittest/20171678hangman/testGuess.py","file_name":"testGuess.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"250157039","text":"import pandas as pd\r\nimport mysql.connector\r\n\r\ncities_pop = {}\r\n\r\ncities_lookup = {}\r\ncities_full = set()\r\n\r\ndef parse_cities_sql():\r\n\tdata = pd.read_csv(\"cities15000.txt\", sep=\"\\t\")\r\n\tfor row in data.itertuples():\r\n\t\tif row[3] in cities_pop:\r\n\t\t\tif row[15] > cities_pop[row[3]][\"pop\"]:\r\n\t\t\t\tcities_pop[row[3]][\"pop\"] = int(row[15])\r\n\t\t\t\tcities_pop[row[3]][\"lat\"] = int(row[5])\r\n\t\t\t\tcities_pop[row[3]][\"long\"] = int(row[6])\r\n\t\t\t\tcontinue\r\n\t\tcities_pop[row[3]] = {}\r\n\t\tcities_pop[row[3]][\"pop\"] = int(row[15])\r\n\t\tcities_pop[row[3]][\"lat\"] = float(row[5])\r\n\t\tcities_pop[row[3]][\"long\"] = float(row[6])\r\n\r\ndef parse_cities_python():\r\n\tdata = pd.read_csv(\"cities15000.txt\", sep=\"\\t\")\r\n\tfor row in data.itertuples():\r\n\t\tif \" \" in row[3]:\r\n\t\t\tcities_lookup[row[3].split()[0]] = \"1\"\r\n\t\t\tcities_full.add(row[3])\r\n\t\t\tcontinue\r\n\t\tif row[3] in cities_lookup:\r\n\t\t\tcities_lookup[row[3]] = 1\r\n\t\t\tcontinue\r\n\t\tcities_lookup[row[3]] = 0\r\n\t\tcities_full.add(row[3])\r\n\t#manually print these to stdout and redirect to file\r\n\t#print(cities_full)\r\n\t#print(cities_lookup)\r\n\r\ndef import_cities_sql():\r\n\tcnx = mysql.connector.connect(user=\"root\", password=\"root\", database=\"gutenberg\")\r\n\tcursor = cnx.cursor()\r\n\r\n\tquery = \"INSERT INTO city_t (name, latitude, longtitude) VALUES (%s, %s, %s)\"\r\n\tfor city in cities_pop:\r\n\t\tlatitude = cities_pop[city][\"lat\"]\r\n\t\tlongtitude = cities_pop[city][\"long\"]\r\n\t\ttry:\r\n\t\t\tcursor.execute(query, (city, str(latitude), str(longtitude)))\r\n\t\texcept mysql.connector.Error as err:\r\n\t\t\tprint(\"Whoops: {}\".format(err))\r\n\tcnx.commit()\r\n\tcnx.close()\r\n\r\nif __name__ == \"__main__\":\r\n\tparse_cities_python()\r\n\r\n","sub_path":"Gutenberg-book-project/samplepython/city_parser.py","file_name":"city_parser.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"178654528","text":"__author__ = 'max'\nimport random\nfrom pip._vendor.distlib.compat import raw_input\nfrom vocab import *\nfrom hiragana import *\nfrom katakana import *\n# add double consonant sign \"っ\"\n\n\n\n\n\n\ndef printHira(dictionary):\n\ttimes = int(input('Times: '))\n\tfor i in range(times):\n\t\tprint(random.choice(list(dictionary)))\n\n\ndef printHiraCSV(dictionary):\n\trows = int(input('Rows: '))\n\tcolumns = int(input('Columns: '))\n\tfor i in range(rows):\n\t\trow = ''\n\t\tfor i in range(columns):\n\t\t\trow += '%s| |' % random.choice(list(dictionary))\n\t\tprint(row)\n\n\ndef testHira(hiragana_dict, hiragana_list):\n\tprint('こんにちわ!\\n')\n\tif 'は' in hiragana_list:\n\t\tprint('Hiragan is used for:'\n\t\t '\\n\t— Difficult kanji'\n\t\t '\\n\t— Words with no kanji'\n\t\t '\\n\t— Grammatical particles'\n\t\t '\\n\t— Suffixes'\n\t\t '\\n\t— Verb & adjective Inflections')\n\telse:\n\t\tprint('')\n\tcorrect = 0\n\tfalse = 0\n\tx = int(input('Times: '))\n\tfalse_kana = []\n\tfor i in range(x):\n\t\thiraKey = random.choice(hiragana_list)\n\t\tchoice = 'Specify kana --> ' + hiraKey + ' --> '\n\t\tvar = raw_input(choice)\n\t\tif (var == hiragana_dict[hiraKey]):\n\t\t\tprint('はい!すごい しごと!')\n\t\t\tcorrect += 1\n\t\telse:\n\t\t\tprint('いいえ!それは せい かい ありません!')\n\t\t\tprint('Correct is %s = %s' % (hiraKey, hiragana_dict[hiraKey]))\n\t\t\tfalse += 1\n\t\t\tfalse_kana.append(hiraKey)\n\tprint('You had %i correct and %i false.' % (correct, false))\n\tif (len(list(set(false_kana))) > 0):\n\t\tprint('False kana were: ')\n\t\tfor each in list(set(false_kana)):\n\t\t\tprint(each + ' : ' + hiragana_dict[each])\n\texit()\n\n\ndef testVocab(vocab):\n\tprint(\n\t\t'こんにちわ!\\n\\nThe vocabulary currently contains %i items.\\nType \"exit\" to exit program.\\nType \"help\" to view this text again.\\n' % (\n\t\tlen(vocab)))\n\tcorrect = 0\n\tfalse = 0\n\tx = int(input('Times: '))\n\tfalse_kana = []\n\tfor i in range(x):\n\t\tKey = random.choice(list(vocab))\n\t\tchoice = 'Specify word --> ' + Key + ' --> '\n\t\tvar = raw_input(choice)\n\t\tif (var == vocab[Key]):\n\t\t\tprint('はい!すごい しごと!')\n\t\t\tcorrect += 1\n\t\telif (var == 'exit'):\n\t\t\tbreak\n\t\telif (var == 'help'):\n\t\t\tprint('\\nThe vocabulary currently contains %i items.' % (len(vocab)))\n\t\t\tprint('You had %i correct and %i false.\\n' % (correct, false))\n\t\t\tprint(\"Type 'exit' to exit program.\\nType 'help' to view this text again.\\n\")\n\t\telse:\n\t\t\tprint('いいえ!それは せい かい ありません!')\n\t\t\tprint('Correct is %s = %s' % (Key, vocab[Key]))\n\t\t\tfalse += 1\n\t\t\tfalse_kana.append(Key)\n\tprint('\\nYou had %i correct and %i false.' % (correct, false))\n\tif (len(list(set(false_kana))) > 0):\n\t\tprint('\\nFalse words were: ')\n\t\tfor each in list(set(false_kana)):\n\t\t\tprint(each + ' : ' + vocab[each])\n\texit()\n\n# testHira(hiraDict, hiraList)\n# testHira(kanaDict, kanaList)\n# testHira(kanaDict, learnListKana)\ntestHira(hiraDict, learnListHira)\n# testVocab(learnVocab)\n# printHira(hiraList)\n# printHira(kanaList)\n# printHiraCSV(learnListHira)\n# printHiraCSV(learnListKana)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"166045523","text":"#!/usr/bin/python3\n\nimport os\nimport re\nimport sys\nimport threading\nimport subprocess\nimport ahocorasick\n\nfrom PyQt5 import QtGui, QtCore, QtWidgets\nfrom PyQt5.QtCore import Qt, QProcess\nfrom PyQt5.QtGui import QDesktopServices, QFont\n\nfrom GText import GTextEdit\nfrom GSyntax import GSyntaxHighlighter\nfrom GFile import GDocument, GTranslation, GVideo, GEGLFile\nfrom GImage import GImageGrid, GCustomImageDialog, GCustomScreenShotDialog\n\nfrom GScreenUtils import GLayeredDocumentCanvas\n\nfrom time import sleep\nfrom GServer import GServer\n\nfrom GSettings import GDefaultValues, GSettingsMenu\n\nclass Main(QtWidgets.QMainWindow):\n\tcwd\t\t= GDefaultValues.cwd\n\thome\t\t= GDefaultValues.home\n\tdefault_pngDir = GDefaultValues.pngDir\n\tdefault_videoId = GDefaultValues.videoId\n\tdefault_imgDir = GDefaultValues.imgDir\n\tdefault_pngDirSuffix = GDefaultValues.pngDirSuffix\n\t\n\n\tdef __init__(self, parent = None):\n\t\tQtWidgets.QMainWindow.__init__(self, parent)\n\t\tself.openTextFileName = \"\"\n\t\tself.openDocumentFileName = \"\"\n\t\tself.isRecording = False\n\t\tself.translationFileName = \"\"\n\t\tself.hasOpenTranslation = False\n\t\tself.server = GServer()\n\t\tself.server.sender.finishedRecording.connect(self.createVideo)\n\t\tself.initUI()\n\n\t#####################################\n\t#\n\t# Menubar\n\t#\n\t#####################################\n\tdef initMenubar(self):\n\t\tmenubar = self.menuBar()\n\t\tfile = menubar.addMenu(\"Arquivos\")\n\t\tavatar = menubar.addMenu(\"Avatar\")\n\t\ttraducao = menubar.addMenu(\"Tradução\")\n\t\timagens = menubar.addMenu(\"Imagens\")\n\n\t\tfileNovo = QtWidgets.QAction(\"Novo\", self)\n\t\tfileNovo.setShortcut(\"Ctrl+N\")\n\t\tfileNovo.setStatusTip(\"Criar nova tradução\")\n\t\tfileNovo.triggered.connect(self.newTextFile)\n\n\t\tfileAbrir = QtWidgets.QAction(\"Abrir documento\", self)\n\t\tfileAbrir.setShortcut(\"Ctrl+O\")\n\t\tfileAbrir.setStatusTip(\"Abre novo documento\")\n\t\tfileAbrir.triggered.connect(self.openDocument)\n\n\t\tfileImportar = QtWidgets.QAction(\"Importar tradução\", self)\n\t\tfileImportar.setShortcut(\"Ctrl+I\")\n\t\tfileImportar.setStatusTip(\"Importa tradução\")\n\t\tfileImportar.triggered.connect(self.importTextFile)\n\n\t\tfileSalvar = QtWidgets.QAction(\"Salvar\", self)\n\t\tfileSalvar.setShortcut(\"Ctrl+S\")\n\t\tfileSalvar.setStatusTip(\"Salva arquivo da tradução\")\n\t\tfileSalvar.triggered.connect(self.saveTextFile)\n\n\t\tfileSalvarComo = QtWidgets.QAction(\"Salvar como...\", self)\n\t\tfileSalvarComo.setShortcut(\"Ctrl+Shift+S\")\n\t\tfileSalvarComo.setStatusTip(\"Salvar arquivo da tradução como...\")\n\t\tfileSalvarComo.triggered.connect(self.saveTextFileAs)\n\n\t\tfileExportar = QtWidgets.QMenu(\"Exportar\", self)\n\n\t\tself.exportarTXT = QtWidgets.QAction(\"TXT\")\n\t\tself.exportarPDF = QtWidgets.QAction(\"PDF\")\n\t\tself.exportarDOCX= QtWidgets.QAction(\"DOCX (Microsoft Word)\")\n\t\tself.exportarODT = QtWidgets.QAction(\"ODT (Libre Office)\")\n\t\t\n\t\tself.exportarTXT.triggered.connect(lambda : self.exportTextFile(\"txt\"))\n\t\tself.exportarPDF.triggered.connect(lambda : self.exportTextFile(\"pdf\"))\n\t\tself.exportarDOCX.triggered.connect(lambda : self.exportTextFile(\"docx\"))\n\t\tself.exportarODT.triggered.connect(lambda : self.exportTextFile(\"odt\"))\n\t\t\n\t\tfileExportar.setStatusTip(\"Exportar tradução para...\")\n\t\t\n\t\tfileExportar.addAction(self.exportarTXT)\n\t\tfileExportar.addAction(self.exportarPDF)\n\t\tfileExportar.addAction(self.exportarDOCX)\n\t\tfileExportar.addAction(self.exportarODT)\n\n\n\t\t#fileExportar.triggered.connect(self.exportTextFile)\t\n\n\t\tfileQuit = QtWidgets.QAction(\"Sair\", self)\n\t\tfileQuit.setShortcut(\"Ctrl+Q\")\n\t\tfileQuit.setStatusTip(\"Sair da aplicação\")\n\t\tfileQuit.triggered.connect(self.__del__)\t\n\n\t\tfile.addAction(fileNovo)\n\t\tfile.addSeparator()\n\t\tfile.addAction(fileAbrir)\n\t\tfile.addAction(fileImportar)\n\t\tfile.addSeparator()\n\t\tfile.addAction(fileSalvar)\n\t\tfile.addAction(fileSalvarComo)\n\t\tfile.addMenu(fileExportar)\n\t\tfile.addSeparator()\n\t\tfile.addAction(fileQuit)\n\n\n\t\tavatarEnviar = QtWidgets.QAction(\"Enviar texto\", self)\n\t\tavatarEnviar.setShortcut(\"Ctrl+Shift+Return\")\n\t\tavatarEnviar.setStatusTip(\"Envia o texto selecionado para o avatar sinalizar\")\n\t\tavatarEnviar.triggered.connect(self.sendText)\n\n\n\t\tavatarGravar = QtWidgets.QAction(\"Gravar vídeo\", self)\n\t\tavatarGravar.setStatusTip(\"Grava o vídeo para o texto selecionado\")\n\t\tavatarGravar.triggered.connect(self.recordVideo)\n\t\t\n\t\tavatarMostrar = QtWidgets.QAction(\"Mostrar avatar\", self)\n\t\tavatarMostrar.setShortcut(\"Ctrl+Shift+T\")\n\t\tavatarMostrar.setStatusTip(\"Habilita/Desabilita tela do avatar\")\n\t\tavatarMostrar.triggered.connect(self.toggleAvatarVisible)\n\n\t\tavatar.addAction(avatarEnviar)\n\t\tavatar.addAction(avatarGravar)\n\t\tavatar.addSeparator()\n\t\tavatar.addAction(avatarMostrar)\n\n\t\ttraducaoShowAll = QtWidgets.QAction(\"Mostrar tudo\", self)\n\t\ttraducaoShowAll.setStatusTip(\"Exibir toda a tradução do arquivo\")\n\t\ttraducaoShowAll.triggered.connect(self.showAllTranslation)\n\t\t\n\t\ttraducaoNext\t= QtWidgets.QAction(\"Próxima linha\", self)\n\t\ttraducaoNext.setStatusTip(\"Próxima linha da tradução do arquivo\")\n\t\ttraducaoNext.triggered.connect(self.addNextTranslationParagraph)\n\t\t\n\t\ttraducaoReset\t= QtWidgets.QAction(\"Resetar tradução\", self)\n\t\ttraducaoReset.setStatusTip(\"Apaga todo o conteúdo do editor e reinicia a tradução para a primeira linha\")\n\t\ttraducaoReset.triggered.connect(self.resetTranslation)\n\t\t\n\t\ttraducaoCreate\t= QtWidgets.QAction(\"Gerar tradução\", self)\n\t\ttraducaoCreate.setStatusTip(\"Traduz o arquivo selecionado\")\n\t\ttraducaoCreate.triggered.connect(self.getTranslationFromFile)\n\t\t\n\t\ttraducao.addAction(traducaoNext)\n\t\ttraducao.addSeparator()\n\t\ttraducao.addAction(traducaoShowAll)\n\t\ttraducao.addAction(traducaoReset)\n\t\ttraducao.addSeparator()\n\t\ttraducao.addAction(traducaoCreate)\n\n\t\timagensNewFromFile = QtWidgets.QAction(\"Adicionar imagem do computador\", self)\n\t\timagensNewFromFile.setStatusTip(\"Adiciona uma imagem do computador à lista de imagens disponíveis para o vídeo\")\n\t\timagensNewFromFile.triggered.connect(self.addImagesFromFile)\n\n\t\timagensNewFromUrl = QtWidgets.QAction(\"Adicionar imagem da internet\", self)\n\t\timagensNewFromUrl.setStatusTip(\"Adiciona uma imagem da internet à lista de imagens disponíveis para o vídeo\")\n\t\timagensNewFromUrl.triggered.connect(self.addImageFromUrl)\n\t\t\n\t\tself.imagensDelete = QtWidgets.QAction(\"Remover imagens\")\n\t\tself.imagensDelete.setStatusTip(\"Remover imagens da área de seleção\")\n\t\tself.imagensDelete.triggered.connect(self.setRemoveImagesState)\n\t\t\n\t\timagens.addAction(imagensNewFromFile)\n\t\timagens.addAction(imagensNewFromUrl)\n\t\timagens.addSeparator()\n\t\timagens.addAction(self.imagensDelete)\n\t\t\n\t\t# Preferências\n\t\tedit = QtWidgets.QAction(\"Preferências\", self)\n\t\tedit.setStatusTip(\"Opções de customização\")\n\t\tedit.triggered.connect(self.openSettingsMenu)\n\t\tmenubar.addAction(edit)\n\n\t\tbar = QtWidgets.QMenuBar(menubar)\n\t\tmenubar.setCornerWidget(bar, QtCore.Qt.TopRightCorner)\n\n\t\tself.voltar = QtWidgets.QAction(self.style().standardIcon(QtWidgets.QStyle.SP_ArrowBack), \"\", self)\n\t\tself.voltar.setStatusTip(\"Voltar para a página inicial\")\n\t\tself.voltar.triggered.connect(lambda: self.homePage())\n\t\tbar.addAction(self.voltar)\n\t\tself.voltar.setVisible(False)\n\n\t\thelp = QtWidgets.QAction(\"Ajuda\", self)\n\t\thelp.setStatusTip(\"Manual do sistema\")\n\t\thelp.triggered.connect(lambda: self.openPage(\"textos_padrao/ajuda.html\"))\n\t\tbar.addAction(help)\n\t\t\n\t\tsobre = QtWidgets.QAction(\"Sobre o projeto\", self)\n\t\tsobre.setStatusTip(\"Conheça mais sobre o projeto\")\n\t\tsobre.triggered.connect(lambda: self.openPage(\"textos_padrao/sobre\"))\n\t\tbar.addAction(sobre)\n\n\t\t#btn_nxt.setText(\"Próxima linha\")\n\n\n\t###########################################\n\t#\n\t# Componentes da UI\n\t#\n\t# Janela do servidor, editor de texto,\n\t# visualizador de PDF e das imagens\n\t#\n\t###########################################\n\tdef initUI(self):\n\t\t\n\t\t# Preferências\n\t\tself.settingsMenu = GSettingsMenu()\n\t\tself.settingsMenu.newColorScheme.connect(self.onNewColorScheme)\n\t\tself.settingsMenu.newFont.connect(self.onNewFont)\n\t\n\t\t# Dimensões iniciais da janela\n\t\tself.screen_rect = QtWidgets.QDesktopWidget().screenGeometry()\n\t\tself.setGeometry(self.screen_rect)\n\t\tself.setWindowTitle(\"Inclua\")\n\t\t\n\t\t# Componentes principais do editor\n\t\tself.splitter\t= QtWidgets.QSplitter(self)\n\t\tself.text\t= GTextEdit(self.settingsMenu.getColorScheme())\n\t\t\n\t\tself.translation = GTranslation()\n\t\tself.translation.sender.translationReady.connect(self.onTranslationReady)\n\t\t\n\t\t# Arquivo egl\n\t\tself.eglFile = GEGLFile()\n\n\t\t# Visualizador de pdf pode ser uma página web dentro de um webView\n\t\tself.pdf_widget = GDocument()\n\t\tself.pdf_widget.sender.formattedReady.connect(self.onPDFTextReady)\n\t\t\n\t\tself.screenshotLayer = GLayeredDocumentCanvas(self.pdf_widget)\n\t\tself.screenshotLayer.screenShot.connect(self.onScreenShot)\n\t\t#self.screenshotLayer.hide()\n\t\t\n\t\t# Widget que contém a janela do avatar e o grid com as imagens\n\t\tself.filler\t= QtWidgets.QSplitter(Qt.Vertical)\n\t\t\n\t\t# Setup do widget com o display virtual\n\t\tself.server_widget = self.server.getServerWidget()\n\t\tself.server_widget.setMinimumSize(QtCore.QSize(640, 480))\n\t\tself.server_widget.setMaximumSize(QtCore.QSize(640, 480))\n\n\t\tself.timer = QtCore.QTimer()\n\t\tself.timer.timeout.connect(self.refreshServerWidget)\n\t\tself.timer.start(30)\n\t\t\n\t\t# Widget das imagens\n\t\tself.images_widget = GImageGrid(self.default_imgDir)\n\t\tself.images_widget.onClick.connect(self.onImageClick)\n\n\t\t#Sobre e Ajuda\n\t\tself.view_padrao = QtWidgets.QTextEdit()\n\t\tself.view_padrao.setReadOnly(True)\n\t\tself.view_padrao.hide()\n\n\t\t#####################################\n\t\t#\n\t\t# Toolbar para gerenciar imagens\n\t\t#\n\t\t#####################################\n\t\tself.images_toolbar = QtWidgets.QWidget()\n\t\tself.images_toolbar.setMaximumHeight(20)\n\t\t\n\t\tself.it_layout = QtWidgets.QHBoxLayout()\n\t\tself.it_layout.setContentsMargins(5, 0, 5, 0)\n\t\t\n\t\tself.confirmar_selecao = QtWidgets.QPushButton(self.style().standardIcon(QtWidgets.QStyle.SP_DialogApplyButton), \"REMOVER\", self)\n\t\tself.confirmar_selecao.setStatusTip(\"Remover as imagens selecionadas\")\n\t\tself.confirmar_selecao.clicked.connect(self.removeSelected)\n\t\t\n\t\tself.confirmar_selecao.setFixedSize(QtCore.QSize(150, 20))\n\t\tself.confirmar_selecao.hide()\n\n\t\tself.deletar_imagens = QtWidgets.QPushButton(self.style().standardIcon(QtWidgets.QStyle.SP_TrashIcon), \"\", self)\n\t\tself.deletar_imagens.setStatusTip(\"Remover imagens da lista\")\n\t\tself.deletar_imagens.setCheckable(True)\n\t\tself.deletar_imagens.toggled.connect(self.changeImageViewerState)\n\n\t\tself.printar_imagens = QtWidgets.QPushButton(self.style().standardIcon(QtWidgets.QStyle.SP_FileDialogContentsView), \"\", self)\n\t\tself.printar_imagens.setStatusTip(\"Capturar parte do documento\")\n\t\tself.printar_imagens.setCheckable(True)\n\t\tself.printar_imagens.toggled.connect(self.takeScreenShot)\n\n\t\tself.it2_layout = QtWidgets.QHBoxLayout()\n\t\tself.it2_layout.setContentsMargins(5, 0, 5, 0)\n\t\tself.it2_layout.addWidget(self.printar_imagens, alignment = Qt.AlignRight | Qt.AlignBottom)\n\t\tself.it2_layout.addWidget(self.deletar_imagens, alignment = Qt.AlignRight | Qt.AlignBottom)\n\n\t\tself.botoes_imagens_direita = QtWidgets.QWidget()\n\t\tself.botoes_imagens_direita.setLayout(self.it2_layout)\n\n\t\tself.it_layout.addWidget(self.confirmar_selecao, alignment = Qt.AlignLeft | Qt.AlignBottom)\n\t\tself.it_layout.addWidget(self.botoes_imagens_direita, alignment = Qt.AlignRight | Qt.AlignBottom)\t\t\n\t\t\n\t\tself.images_toolbar.setLayout(self.it_layout)\n\n\t\tself.filler.addWidget(self.server_widget)\n\t\tself.filler.addWidget(self.images_toolbar)\n\t\tself.filler.addWidget(self.images_widget)\n\n#\t\tself.filler.setAttribute(Qt.WA_ShowWithoutActivating)\n#\t\tself.server_widget.setAttribute(Qt.WA_ShowWithoutActivating)\n\t\t\n\t\t#####################################\n\t\t#\n\t\t# Toolbar para as screenshots\n\t\t#\n\t\t#####################################\n\t\tself.screenshotsToolbar = QtWidgets.QWidget()\n\t\tself.screenshotsToolbar.setMaximumHeight(40)\n\t\t\n\t\tself.captureButton = QtWidgets.QPushButton(self.style().standardIcon(QtWidgets.QStyle.SP_FileDialogContentsView), \"\", self)\n\t\tself.exitCaptureModeButton = QtWidgets.QPushButton(self.style().standardIcon(QtWidgets.QStyle.SP_BrowserStop), \"\", self)\n\t\t\n\t\tself.captureButton.clicked.connect(self.screenshotLayer.takeScreenShot)\n\t\tself.exitCaptureModeButton.clicked.connect(self.printar_imagens.toggle)\n\t\t\n\t\tself.screenshotsToolbarLayout = QtWidgets.QHBoxLayout()\n\t\tself.screenshotsToolbarLayout.addWidget(self.captureButton)\n\t\tself.screenshotsToolbarLayout.addWidget(self.exitCaptureModeButton)\n\t\t\n\t\tself.screenshotsToolbar.setLayout(self.screenshotsToolbarLayout)\n\t\t\n\t\tself.screenshotMenuWidget = QtWidgets.QWidget()\n\t\tself.screenshotMenuWidgetLayout = QtWidgets.QVBoxLayout()\n\t\tself.screenshotMenuWidgetLayout.setContentsMargins(0, 0, 0, 0)\n\t\tself.screenshotMenuWidgetLayout.addWidget(self.screenshotsToolbar)\n\t\tself.screenshotMenuWidgetLayout.addWidget(self.screenshotLayer)\n\t\t\n\t\tself.screenshotMenuWidget.setLayout(self.screenshotMenuWidgetLayout)\n\t\t\n\t\tself.screenshotsToolbar.hide()\n\t\tself.screenshotMenuWidget.hide()\n\t\t\n\t\t# Widget que aparece na janela é um splitter\n\t\t# os outros são adicionados a ele\n\t\tself.setCentralWidget(self.splitter)\n\t\tself.splitter.addWidget(self.text)\n\t\tself.splitter.addWidget(self.filler)\n\t\tself.splitter.addWidget(self.screenshotMenuWidget)\n\t\tself.splitter.addWidget(self.view_padrao)\n\t\t\n\t\t# Init\n\t\tself.initMenubar()\n\n\t\tself.statusbar = self.statusBar()\n\t\t\n\t\t# Força o widget a atualizar\n\t\tself.toggleVisible(self.server_widget)\n\t\tthreading.Thread(target=self.tryCommunication).start()\n\n\tdef print_cursor(self):\n\t\tcursor = self.text.textCursor()\n\t\tprint(\"position:%2d\\nachor:%5d\\n\" % (cursor.position(), cursor.anchor()))\n\n\t##################################\n\t#\n\t# ARQUIVOS\n\t#\n\t##################################\t\n\t\n\t##################################\n\t#\n\t# Documentos\n\t#\n\t##################################\n\tdef openDocument(self):\n\t\tfilename = QtWidgets.QFileDialog().getOpenFileName(caption=\"Abrir documento\", filter=\"Documentos (*.pdf *.odt *.doc *.docx *.ppt *.pptx *.rtf *.pps *.ppsx *.odp);; Todos os arquivos (*.*)\")\n\t\tif filename[0] == \"\":\n\t\t\treturn 1\n\t\t\n\t\tif not self.text.document().isEmpty():\n\t\t\tbox = QtWidgets.QMessageBox()\t\n\t\t\tbox.setIcon(QtWidgets.QMessageBox.Question)\n\t\t\tbox.setWindowTitle('Abrir documento')\n\t\t\tbox.setText(\"Apagar texto do editor?\")\n\t\t\tbox.setStandardButtons(QtWidgets.QMessageBox.Yes|QtWidgets.QMessageBox.No)\n\t\t\tbuttonY = box.button(QtWidgets.QMessageBox.Yes)\n\t\t\tbuttonY.setText('Sim')\n\t\t\tbuttonN = box.button(QtWidgets.QMessageBox.No)\n\t\t\tbuttonN.setText('Não')\n\t\t\treply = box.exec_()\n\n#\t\t\treply = QtWidgets.QMessageBox.question(self, \"Abrir documento\", \"Apagar texto do editor?\", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\n\t\t\tif reply == QtWidgets.QMessageBox.Yes:\n\t\t\t\tif not self.closeTextFile():\n\t\t\t\t\treturn\n\n\t\tself.pdf_widget.load(filename[0])\n\t\tself.text.setModified(False)\n\t\t\n\t\t# Força o widget a atualizar\n#\t\tself.screenshotLayer.setGeometry(0, 0, self.screen_rect.width() / 10, self.screen_rect.height())\n\n\t\t\n#\t\tself.screenshotMenuWidget.setSizePolicy(QtWidgets.QSizePolicy.Minimum, self.screenshotMenuWidget.sizePolicy().verticalPolicy())\n#\t\tself.screenshotMenuWidget.setGeometry(0, 0, self.geometry().width() // 10000, self.geometry().height())\n\t\t\n\t\tself.screenshotMenuWidget.setFixedWidth(self.geometry().width() // 3)\n\t\t\n\t\tself.screenshotMenuWidget.hide()\n\t\tself.screenshotMenuWidget.show()\n\n\t\tself.screenshotMenuWidget.setMinimumWidth(0)\n\t\tself.screenshotMenuWidget.setMaximumWidth(5000)\n\n\t\tself.hasOpenDocument = True\n\t\t\n\t\treturn 0\n\n\t\n\tdef onPDFTextReady(self):\n\t\tself.images_widget.scanForImages(GDefaultValues.imgDir)\n\t\tself.images_widget.loadImages()\n\t\tbox = QtWidgets.QMessageBox()\n\t\tbox.setIcon(QtWidgets.QMessageBox.Question)\n\t\tbox.setWindowTitle('Abrir documento')\n\t\tbox.setText(\"Traduzir documento?\")\n\t\tbox.setStandardButtons(QtWidgets.QMessageBox.Yes|QtWidgets.QMessageBox.No)\n\t\tbuttonY = box.button(QtWidgets.QMessageBox.Yes)\n\t\tbuttonY.setText('Sim')\n\t\tbuttonN = box.button(QtWidgets.QMessageBox.No)\n\t\tbuttonN.setText('Não')\n\t\treply = box.exec_()\n#\t\treply = QtWidgets.QMessageBox.question(self, \"Abrir documento\", \"Traduzir documento?\", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\n\t\tif reply == QtWidgets.QMessageBox.Yes:\n\t\t\tself.getTranslationFromFile()\n\n\t#################################\n\t#\n\t# Arquivos de traduçao\n\t#\n\t#################################\n\tdef newTextFile(self):\n\t\t\n\t\tif not self.closeTextFile():\n\t\t\treturn\n\t\t\n\t\tself.translationFileName = \"\"\n\t\tself.hasOpenTranslationFile = False\n\t\tself.text.setModified(False)\t\n\t\n\tdef closeTextFile(self):\n\t\tif self.text.isModified():\n\t\t\tbox = QtWidgets.QMessageBox()\n\t\t\tbox.setIcon(QtWidgets.QMessageBox.Question)\n\t\t\tbox.setWindowTitle('Salvar documento')\n\t\t\t\n\t\t\tif self.translationFileName != \"\":\n\t\t\t\tbox.setText(\"Salvar mudanças no arquivo %s?\" % (self.translationFileName))\n\t\t\telse:\n\t\t\t\tbox.setText(\"Salvar mudanças no novo arquivo?\")\n\t\t\tbox.setStandardButtons(QtWidgets.QMessageBox.Yes|QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)\n\t\t\tbuttonY = box.button(QtWidgets.QMessageBox.Yes)\n\t\t\tbuttonY.setText('Sim')\n\t\t\tbuttonN = box.button(QtWidgets.QMessageBox.No)\n\t\t\tbuttonN.setText('Não')\n\t\t\tbuttonC = box.button(QtWidgets.QMessageBox.Cancel)\n\t\t\tbuttonC.setText('Cancelar')\n\t\t\treply = box.exec_()\n\n\n\t\t\tif reply == QtWidgets.QMessageBox.Cancel:\n\t\t\t\treturn False\n\t\t\t\t\n\t\t\tif reply == QtWidgets.QMessageBox.Yes:\n\t\t\t\tif not self.saveTextFile():\n\t\t\t\t\treturn False\n\t\t\n\t\tself.text.clear()\n\t\tself.text.setModified(False)\t\n\t\treturn True\n\t\n\tdef getTranslationFromFile(self):\n\t\tif not self.pdf_widget.hasFile() and self.openDocument() == 1:\n\t\t\treturn\n\t\t\t\n\t\tif self.hasOpenTranslation:\n\t\t\tbox = QtWidgets.QMessageBox()\n\t\t\tbox.setIcon(QtWidgets.QMessageBox.Question)\n\t\t\tbox.setWindowTitle('Gerar tradução')\n\t\t\tbox.setText(\"Já existe uma tradução aberta. Substituir?\")\n\t\t\tbox.setStandardButtons(QtWidgets.QMessageBox.Yes|QtWidgets.QMessageBox.No)\n\t\t\tbuttonY = box.button(QtWidgets.QMessageBox.Yes)\n\t\t\tbuttonY.setText('Sim')\n\t\t\tbuttonN = box.button(QtWidgets.QMessageBox.No)\n\t\t\tbuttonN.setText('Não')\n\t\t\treply = box.exec_()\n\t\t\tif reply == QtWidgets.QMessageBox.No:\n\t\t\t\treturn \n\t\t\n\t\ttxt = self.pdf_widget.getFormattedText()\n\t\tself.translation.update(txt)\n\n\t\n\tdef importTextFile(self):\n\t\tfilename = QtWidgets.QFileDialog().getOpenFileName(caption=\"Abrir arquivo de tradução\", filter=\"EGL (*.egl)\")\n\t\tif filename[0] == \"\":\n\t\t\treturn\n\t\t\t\n\t\tif not self.closeTextFile():\n\t\t\treturn\n\t\t\t\n\t\t#self.translation.load(filename[0])\n\t\tself.eglFile.load(filename[0])\n\t\tself.text.setText(self.eglFile.plainText())\n\t\tself.translation = self.eglFile.translation()\n\t\tself.translationFileName = filename[0]\n\n\tdef saveTextFile(self):\n\t\tif self.translationFileName == \"\":\n\t\t\treturn self.saveTextFileAs()\n\t\telse:\n\t\t\t#self.translation.save(self.translationFileName)\n\t\t\tself.eglFile.setPlainText(self.text.toPlainText())\n\t\t\tself.eglFile.setTranslation(self.translation)\n\t\t\tself.eglFile.save(self.translationFileName)\n\t\t\tself.text.setModified(False)\n\t\t\treturn True\n\t\t\n\tdef saveTextFileAs(self):\n\t\tfilename = QtWidgets.QFileDialog().getSaveFileName(caption=\"Salvar arquivo de tradução\")\n\t\t\n\t\tfname = filename[0]\n\t\tif fname == \"\":\n\t\t\treturn False\n\t\t\t\n\t\tif not fname.endswith(\".egl\"):\n\t\t\tfname += \".egl\"\n\t\t\t\n\t\tself.translationFileName = fname\n\t\t#self.translation.save(self.translationFileName)\n\t\tself.eglFile.setPlainText(self.text.toPlainText())\n\t\tself.eglFile.setTranslation(self.translation)\n\t\tself.eglFile.save(self.translationFileName)\n\t\tself.text.setModified(False)\n\t\treturn True\n\t\t\n\tdef exportTextFile(self, fmt):\n\t\tfilename = QtWidgets.QFileDialog().getSaveFileName(caption=\"Exportar arquivo de tradução\")\n\t\tfname = filename[0]\n\t\tif fname == \"\":\n\t\t\treturn False\n\t\t\t\n\t\ttmpFileName = \".tmpFileName\"\n\t\t\n\t\twith open(tmpFileName, \"w\") as doc:\n\t\t\tdoc.write(self.text.toPlainText())\n\n\t\tos.system(\"ubuntu1804 -c unoconv -f %s \\\"%s\\\"\" % (fmt, tmpFileName))\n\n\t\tos.system(\"move \\\"%s.%s\\\" \\\"%s.%s\\\"\" % (tmpFileName, fmt, fname, fmt))\n\n\t\treturn True\n\n\tdef addNextTranslationParagraph(self):\n\t\tcursor = self.text.textCursor()\n\t\tcursor.movePosition(cursor.End, cursor.MoveAnchor)\n\t\t\n\t\tspaces = \"\"\n\t\ttext = self.translation.next()\n\t\t\n\t\twhile text.isspace():\n\t\t\tspaces += text\n\t\t\ttext = self.translation.next()\n\t\t\n\t\tend = \"\\n\"\n\t\tif text == \"\":\n\t\t\tend = \"\"\n\t\ttext = spaces + text + end\n\t\tcursor.insertText(text)\n\n\tdef showAllTranslation(self):\n\t\tcursor = self.text.textCursor()\n\t\tfor line in self.translation.getParagraphsTillEnd():\n\t\t\tself.text.textCursor().insertText(line + \"\\n\")\t\n\n\tdef clearTranslation(self):\n\t\t#self.text.clear()\n\t\tself.translation = GTranslation()\n\t\tself.hasOpenTranslation = False\n\t\tself.text.setModified(True)\n\t\t\n\tdef resetTranslation(self):\n\t\t#self.text.clear()\n\t\tself.translation.resetIndex()\n\t\tself.text.setModified(True)\n\t\t\n\tdef onTranslationReady(self):\n\t\tself.hasOpenTranslation = True\n\n\n\t##################################\n\t#\n\t# IMAGENS\n\t#\n\t##################################\n\n\tdef addImagesFromFile(self):\n\t\tfilename = QtWidgets.QFileDialog().getOpenFileNames(caption=\"Adicionar imagem do computador\", filter=\"Imagens (*.jpg *.JPG *.jpeg *.JPEG *.png *.PNG);; JPG (*.jpg *.JPG *.jpeg *JPEG);; PNG (*.png *.PNG);; Todos os arquivos (*.*)\")\n\t\tprint(filename[0])\n\t\tif len(filename[0]) == 0:\n\t\t\treturn\n\t\tself.images_widget.addImagesFromFile(filename[0])\n\t\n\tdef addImageFromUrl(self):\n\t\tdlg = QtWidgets.QInputDialog(self)\n\t\tdlg.setOkButtonText(\"Enviar\")\n\t\tdlg.setCancelButtonText(\"Cancelar\")\n\t\tdlg.setInputMode(QtWidgets.QInputDialog.TextInput) \n\t\tdlg.setWindowTitle(\"Adicionar imagem por url\")\n\t\tdlg.setLabelText(\"Url da imagem:\")\n\t\tdlg.resize(500,100) \n\t\tok = dlg.exec_() \n\t\tlineEdit = dlg.textValue()\n\t\tprint (\"LINEEDIT \" + lineEdit)\n\t\tif lineEdit == '':\n\t\t\treturn\n\t\tself.images_widget.addImageFromUrl(lineEdit)\n\n\tdef setRemoveImagesState(self):\n\t\tself.confirmar_selecao.show()\n\t\tself.deletar_imagens.setChecked(True)\n\t\tself.images_widget.setMode(GImageGrid.selectable)\n\n\tdef setClickableImagesState(self):\n\t\tself.confirmar_selecao.hide()\n\t\tself.deletar_imagens.setChecked(False)\n\t\tself.images_widget.setMode(GImageGrid.clickable)\n\t\t\n\tdef changeImageViewerState(self, checked):\n\t\tif checked:\n\t\t\tself.setRemoveImagesState()\n\t\telse:\n\t\t\tself.setClickableImagesState()\n\t\n\tdef removeSelected(self):\n\t\tbox = QtWidgets.QMessageBox()\n\t\tbox.setIcon(QtWidgets.QMessageBox.Question)\n\t\tbox.setWindowTitle('Remover imagens')\n\t\tbox.setText(\"Remover todas as imagens selecionadas?\")\n\t\tbox.setStandardButtons(QtWidgets.QMessageBox.Yes|QtWidgets.QMessageBox.No)\n\t\tbuttonY = box.button(QtWidgets.QMessageBox.Yes)\n\t\tbuttonY.setText('Confirmar')\n\t\tbuttonN = box.button(QtWidgets.QMessageBox.No)\n\t\tbuttonN.setText('Cancelar')\n\t\treply = box.exec_()\n#\t\treply = QtWidgets.QMessageBox.question(self, \"Remover imagens\", \"Remover todas as imagens selecionadas?\", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\n\t\tif reply == QtWidgets.QMessageBox.Yes:\n\t\t\tself.images_widget.removeSelected()\n\t\t\tself.setClickableImagesState()\n\t\n\tdef onImageClick(self, index):\n\t\tlc = self.text.textCursor()\n\t\trange_content = lc.selectedText()\n\t\tpos = GCustomImageDialog().question()\n\t\tif pos == GCustomImageDialog.NoImage:\n\t\t\treturn \n\t\timg = self.images_widget.getImageButtonFromIndex(index)\n\t\tlc.insertText(\"__img%d_%d %s _img%d__\" % (pos, index, range_content, pos))\n\t\n\t##################################\n\t#\n\t# SCREENSHOTS\n\t#\n\t##################################\n\tdef setScreenCaptureState(self, state):\n\t\tprint(\"YE\")\n\t\tprint(state)\n\t\t\n\t\tif self.screenshotMenuWidget.isVisible():\n\t\t\tself.screenshotLayer.setCaptureMode(state)\n\t\t\tself.screenshotsToolbar.setVisible(state)\n\t\t\t\t\t\n\t\n\tdef takeScreenShot(self):\n\t\tself.setScreenCaptureState(not self.screenshotLayer.getCaptureMode())\n\t\n\tdef onScreenShot(self, pixmap):\n\t\tself.targetPixmap = pixmap\n\t\treply = GCustomScreenShotDialog(self.targetPixmap).question()\n\t\tif reply == GCustomScreenShotDialog.No:\n\t\t\treturn\n\t\tself.images_widget.addImageFromPixmap(self.targetPixmap)\n\t\n\t##################################\n\t#\n\t# AVATAR\n\t#\n\t##################################\n\tdef refreshServerWidget(self):\n\t\tself.server_widget.setMinimumSize(QtCore.QSize(1, 1))\n\t\tself.server_widget.setMaximumSize(QtCore.QSize(1, 1))\n\t\tself.server_widget.setMinimumSize(QtCore.QSize(640, 480))\n\t\tself.server_widget.setMaximumSize(QtCore.QSize(640, 480))\n\t\tself.timer.start(33)\n\t\n\tdef sendText(self):\n\t\tcursor = self.text.textCursor()\n\t\tif cursor.hasSelection():\n\t\t\ttext = cursor.selection().toPlainText()\n\n\t\t\tmatches = re.findall(r\"(__img)([0-9])_([0-9]+)\", text)\t\t\t\n\t\t\tmentions = {\"__img0_\" : [], \"__img1_\" : [], \"__img2_\" : [], \"__img3_\" : []}\n\t\t\tfor match in matches:\n\t\t\t\tmentions[match[0]+match[1] + \"_\"].append(match[2])\n\t\t\t\n\t\t\tfor key in mentions:\n\t\t\t\ti = int(re.search(r'\\d+', key).group())\n\t\t\t\tfor index in mentions[key]:\n\t\t\t\t\tpth = self.images_widget.getImageButtonFromIndex(int(index)).getImagePath().replace(\"\\\\\", \"/\")\n\t\t\t\t\tpth = \"/mnt/\" + pth[0].lower() + pth[2:]\n\t\t\t\t\ttext = text.replace(key + index, (\"\" % i)\n\t\t\t\t\n\t\t\tself.server.send(text)\n\t\telse:\n\t\t\ttext = self.text.toPlainText()\n\t\t\tQtWidgets.QMessageBox.information(self, \"Gerar vídeo\", \"Para gerar o vídeo, selecione o texto desejado\", (QtWidgets.QMessageBox.Ok))\n\n\t\tprint(text)\n\t\t\n\tdef toggleAvatarVisible(self):\n\t\tself.toggleVisible(self.server_widget)\n\t\tself.toggleVisible(self.filler)\n\t\n\tdef toggleVisible(self, widget):\n\t\tif widget.isVisible():\n\t\t\twidget.hide()\n\t\telse:\n\t\t\twidget.show()\n\n\tdef createVideo(self, vName, vId = default_videoId, pngDir = \"${HOME}\" + GDefaultValues.pngDirSuffix):\n\t\tvid = GVideo()\n\t\tvid.sender.videoReady.connect(self.onVideoReady)\n\t\tvid.createVideo(vId, vName, pngDir)\n\t\t\n\tdef recordVideo(self):\n\t\tfName = QtWidgets.QFileDialog().getSaveFileName(caption=\"Gerar vídeo\", filter=\"MP4 (*.mp4)\")\n\t\tvName = fName[0]\n\t\tif vName == \"\":\n\t\t\treturn\n\n\t\tprint(\"AQUI!\")\n\t\tcmd = \"ubuntu1804 -c rm ${HOME}%s/%s/*\" % (GDefaultValues.pngDirSuffix, self.default_videoId)\n\t\tsubprocess.run(cmd, shell=True)\n\t\tcursor = self.text.textCursor()\n\t\tprint(\"SAÍ\")\n\t\tif cursor.hasSelection():\n\t\t\ttxt = cursor.selection().toPlainText()\n\t\t\tif not txt.isspace():\n\t\t\t\ttxt = \"__rec \" + txt + \" __stop\"\n\t\t\t\tself.server.sendToRecord(txt, vName)\n\n\tdef tryCommunication(self, n = 10):\n\t\ttries = 0\n\t\twhile self.server.startCommunication() != 0 and tries < n:\t\n\t\t\tprint(\"Tentativa %d\" % (tries))\n\t\t\ttries += 1\n\t\t\tsleep(3)\n\t\n\tdef onVideoReady(self, title):\n\t\tQtWidgets.QMessageBox.question(self, \"Gerar vídeo\", \"Vídeo %s criado com sucesso!\" % title, (QtWidgets.QMessageBox.Ok))\n\n\t####################################\n\t#\n\t# PREFERÊNCIAS\n\t#\n\t####################################\n\tdef openSettingsMenu(self):\n\t\tself.settingsMenu.show()\n\n\tdef onNewColorScheme(self, colorScheme):\n\t\tself.text.setColorScheme(colorScheme)\n\t\t\n\tdef onNewFont(self, font):\n\t\tself.text.setFont(font)\n\t\t\n\t####################################\n\t#\n\t# TELAS ESTÁTICAS\n\t#\n\t###################################\n\n\tdef showOne(self, widget):\n\t\tfor i in range(self.splitter.count()):\n\t\t\tself.splitter.widget(i).hide()\n\t\twidget.show()\n\n\tdef openPage(self, page):\n\t\tself.showOne(self.view_padrao)\n\t\tself.initPage(page)\n\t\tself.voltar.setVisible(True)\n\n\tdef initPage(self, page):\n\t\tself.textGrid = QtWidgets.QGridLayout()\n\t\tself.view_padrao.clear()\n\n\t\tcursor = self.view_padrao.textCursor()\n\n\t\tfs = cursor.charFormat()\n\t\tprop_id = 0x100000 + 1\n\t\tfs.setProperty(prop_id, 100)\n\t\tfs.setFont(QFont(\"arial\", 12, weight=QtGui.QFont.Bold ))\n\n\t\twith open(page,'r',encoding = 'utf-8') as f:\n\t\t\tfor line in f:\n\t\t\t\tcursor.insertHtml(line + \"\\n\")\n\t\t\n\t\n\tdef homePage(self):\n\t\tfor i in range(self.splitter.count()):\n\t\t\tself.splitter.widget(i).show()\n\t\tself.view_padrao.hide()\n\t\tself.voltar.setVisible(False)\n\n\n\t##################################\n\t#\n\t# DESTRUTOR\n\t#\n\t##################################\n\tdef __del__(self):\n\t\tprint(\"Destrutor\")\n\t\tself.server.kill()\n\t\tself.images_widget.clearImages()\n#\t\texit()\n\t\n\tdef closeEvent(self, event):\n\t\tif not self.closeTextFile():\n\t\t\tevent.ignore()\n\t\t\treturn\n\n\t\tsuper().closeEvent(event)\t\t\t\t\n#\t\tself.__del__()\n\n\t\t\n########################################################\n\ndef main():\n\tglobal app\n\tapp = QtWidgets.QApplication(sys.argv)\n\t\n\tGDefaultValues()\n\t\n\tmain = Main()\n\tmain.show()\n\tsys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"Windows/MDLib.py","file_name":"MDLib.py","file_ext":"py","file_size_in_byte":28298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"409448238","text":"# from multiprocessing import Pool\n#\n#\n# def f():\n# print(111)\n#\n# if __name__ == '__main__':\n# with Pool(processes=5) as pool:\n# result = pool.apply_async(f)\n# print(result.get(timeout=3))\n# # for i in range(10):\n# #\n\nfrom socket import *\nimport struct\nimport json\nimport os\nimport sys\nimport time\n\nmy_module = sys.modules[__name__]\nip_host = ('192.168.20.22', 8081)\n\ntcp_client = socket(AF_INET,SOCK_STREAM)\ntcp_client.connect(ip_host)\n\nif not os.path.exists('cdir') and not os.path.isdir('cdir'):\n os.mkdir('cdir')\n\ndef progress(percent):\n width = 100\n show_str = ('[%%-%ds]' % width) % (int(width * percent / 100) * \"*\") # 字符串拼接的嵌套使用\n print(\"\\r%s %d%%\" % (show_str, percent), end='', file=sys.stdout, flush=True)\n\ndef get(filename):\n '''下载文件'''\n header_len = struct.unpack('i', tcp_client.recv(4))[0]\n header_json = tcp_client.recv(header_len).decode('utf-8')\n header_dic = json.loads(header_json)\n recv_size = 0\n with open('cdir/'+filename, 'wb') as f:\n while recv_size < header_dic['total_size']:\n recv_data = tcp_client.recv(1024)\n recv_size += len(recv_data)\n f.write(recv_data)\n #打印进度条\n time.sleep(0.05)\n percent = int(recv_size / header_dic['total_size'] * 100)\n progress(percent)\n print('\\n'+'下载成功')\n\ndef put(filename):\n '''上传文件'''\n if os.path.exists('cdir/'+filename) and os.path.isfile('cdir/'+filename):\n header_dic = {\n 'total_size': os.path.getsize('cdir/'+filename),\n 'filename': filename,\n }\n print(header_dic)\n header_bytes = json.dumps(header_dic).encode('utf-8')\n tcp_client.send(struct.pack('i', len(header_bytes)))\n tcp_client.send(header_bytes)\n with open('cdir/'+filename, 'rb') as f:\n send_size = 0\n for line in f:\n tcp_client.send(line)\n # 打印进度条\n # time.sleep(0.5)\n send_size += len(line)\n percent = int(send_size / header_dic['total_size'] * 100)\n progress(percent)\n print('\\n'+'上传成功')\n\n\nwhile True:\n cmd = input('>>').strip()\n if not cmd:continue\n tcp_client.send(cmd.encode('utf-8')) # 发送命令\n method,filename = cmd.split()\n if hasattr(my_module,method):\n getattr(my_module, method)(filename)\n\ntcp_client.close()","sub_path":"Basis_of_Python/201707week/0825/进程池.py","file_name":"进程池.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"152149890","text":"# Пользователь вводит данные о количестве предприятий, их наименования и прибыль за четыре квартала для каждого\n# предприятия. Программа должна определить среднюю прибыль (за год для всех предприятий) и отдельно вывести\n# наименования предприятий, чья прибыль выше среднего и ниже среднего.\n\n# Дабы не париться с вводом через консоль, сделаем инфо в файле, формат данных - только такой как в примере!!\n# первая строка - число компаний, далее данные через запятую с пробелом после нее.\n# Данные каждого предприятия будем хранить как namedtuple, и объединим их в defaultdict (излишество, конечно,\n# но пусть будет тренировки ради)\n\nfrom collections import namedtuple, defaultdict\n\nenterprise = namedtuple('enterprise', 'name, value1, value2, value3, value4, year_value')\nmydict = defaultdict()\nmid_value = 0\nwith open('data.txt', 'r', encoding='utf-8') as f:\n num = int(f.readline().strip()[1:])\n\n for i in range(num):\n s = f.readline().strip().split(', ')\n # Добавим вычисляемое поле (годовая прибыль), хотя можно было бы обойтись и без него\n s.append(float(s[2]) + float(s[3]) + float(s[4]) + float(s[5]))\n enterpise_new = enterprise(*list(s[1:]))\n mydict[i] = enterpise_new\n # Считаем среднюю годовую прибыль\n mid_value = round((mid_value * i + (float(mydict.get(i).year_value))) / (i + 1), 3)\n\nprint(f'Среднегодовая прибыль составила: {mid_value}')\nprint('*' * 100)\nprint('Предприятия, чья прибыль выше средней:')\nfor i in mydict:\n if mydict.get(i).year_value >= mid_value:\n print(f'{mydict.get(i).name} - годовая прибыль {mydict.get(i).year_value}')\nprint('*' * 100)\nprint('Предприятия, чья прибыль ниже средней:')\nfor i in mydict:\n if mydict.get(i).year_value < mid_value:\n print(f'{mydict.get(i).name} - годовая прибыль {mydict.get(i).year_value}')\n# В теории можно сделать упорядоченный дикт - упорядочить с помощью лямбды по прибыли. но особой разницы не вижу\n","sub_path":"Lesson 5/Les_5_task_1.py","file_name":"Les_5_task_1.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"1253209","text":"from __future__ import print_function\nimport logging\nfrom pygeocoder import Geocoder\nfrom google.appengine.api import taskqueue\nfrom google.appengine.api import memcache\nfrom google.appengine.ext import ndb\nfrom .handlers import RealChangeHandler\nfrom .rcfmdb import RealChangeFileMakerDatabase\nfrom .models import Vendor\n\n\nclass CronError(Exception):\n pass\n\n\nclass RealChangeCronHandler(RealChangeHandler):\n \"\"\"\n Base class for all cron jobs, like filemaker syncing.\n \"\"\"\n SAFE_CRON_HEADER = \"X-Appengine-Cron\"\n\n def ensure_cron(self):\n is_development = self.is_development\n has_cron_header = RealChangeCronHandler.SAFE_CRON_HEADER in self.request.headers\n if (not is_development) and (not has_cron_header):\n raise CronError(\"Illegal call to app engine cron.\")\n\n def is_safe_to_sync(self):\n safe_to_sync = memcache.add(key=\"_safe_to_sync\", value=\"ignore\", time=300)\n return safe_to_sync\n\n def kick_off_sync(self):\n taskqueue.add(url='/task/sync/fm/', queue_name='sync', target=self.service_backend_name)\n\n\nclass SyncHandler(RealChangeCronHandler):\n \"\"\"This is the manually-triggered entrance to the sync tasks.\"\"\"\n def get(self):\n safe = self.is_safe_to_sync()\n if safe:\n self.kick_off_sync()\n return self.respond_with_template('sync.dhtml', {\"safe\": safe})\n\n\nclass SyncFileMakerCronHandler(RealChangeCronHandler):\n \"\"\"This is the cron-triggered entrance to the sync tasks.\"\"\"\n def get(self):\n self.ensure_cron()\n if self.is_safe_to_sync():\n self.kick_off_sync()\n self.respond_ok()\n\n\nclass SyncFileMakerTaskHandler(RealChangeHandler):\n def post(self):\n logging.info(\"SyncFileMakerTask :: START\")\n\n # Download data\n logging.info(\"SyncFileMakerTask :: Download Data\")\n db = RealChangeFileMakerDatabase()\n db.download_latest_data() # THIS IS SLOW\n\n # Build the new vendors\n logging.info(\"SyncFileMakerTask :: Build Vendor Entities\")\n new_vendors = []\n for row in db.rows():\n if row.has_club_status and row.has_current_turf:\n vendor = Vendor(\n vendor_id=row.vendor_id,\n private_name=row.private_name,\n public_name=row.public_name,\n is_public=row.is_public,\n profile_url=row.profile_url,\n club_status=row.club_status,\n assignment_status=row.current_assignment.assignment_status,\n turf_address=row.current_turf.turf_address,\n turf_location=row.current_turf.turf_location,\n turf_city=row.current_turf.turf_city,\n photo_url=row.photo_url,\n )\n new_vendors.append(vendor)\n\n # Blow away the current database\n logging.info(\"SyncFileMakerTask :: Delete Old Entities\")\n Vendor.delete_all()\n\n # Save the new database\n logging.info(\"SyncFileMakerTask :: Save New Entities\")\n new_vendor_keys = Vendor.save_all(new_vendors)\n try:\n logging.info(\"SyncFileMakerTask :: there are {0} new_vendor_keys\".format(len(new_vendor_keys)))\n except Exception:\n logging.info(\"SyncFileMakerTask :: new_vendor_keys is {0}\".format(repr(new_vendor_keys)))\n\n # Queue up tasks to geocode our new thingies.\n for new_vendor_key in new_vendor_keys:\n taskqueue.add(url='/task/vendor/geocode/', queue_name='geocode', target=self.service_backend_name, params={'vendor_key': new_vendor_key.urlsafe()})\n\n logging.info(\"SyncFileMakerTask :: DONE\")\n self.respond_ok()\n\n\nclass GeocodeTaskHandler(RealChangeHandler):\n def post(self):\n logging.info(\"GeocodeTaskHandler :: Geocoding {0}\".format(self.request.get('vendor_key')))\n\n vendor_key = ndb.Key(urlsafe=self.request.get('vendor_key'))\n logging.info(\"GeocodeTaskHandler :: vendor_key = {0}\".format(repr(vendor_key)))\n\n vendor = vendor_key.get()\n logging.info(\"GeocodeTaskHandler :: vendor = {0}\".format(repr(vendor)))\n results = Geocoder.geocode(vendor.address_for_geocoding)\n (lat, lng) = results[0].coordinates\n vendor.new_geo_point = ndb.GeoPt(lat, lng)\n vendor.put()\n\n self.respond_ok()\n\n\n\n\n\n\n\n\n\n","sub_path":"realchange/cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"284212540","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @param x, an integer\n # @return a ListNode\n def partition(self, head, x):\n helper = ListNode(-1)\n l = list()\n cur = head\n while cur != None:\n l.append(cur.val)\n cur = cur.next\n new = [i for i in l if i < x] + [i for i in l if i >= x]\n cur = helper\n for i in new:\n cur.next = ListNode(i)\n cur = cur.next\n return helper.next\n","sub_path":"partitionList/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"369650871","text":"# -*- coding: utf-8 -*-\n\nfrom .base import *\n\n\nDEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nDEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\n\nAWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')\n\nAWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')\n\nAWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME')\n","sub_path":"src/app/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"344125287","text":"\"\"\" testing the function in wordcount\n\nremember the entry point of spark_context\n\"\"\"\n\nimport pytest\nimport wordcount\n\n@pytest.mark.usefixtures(\"spark_context\")\ndef test_do_word_counts(spark_context):\n \"\"\" test the do_word_counts function, using a given spark_context\n \n Decorators:\n pytest.mark.usefixtures -- make the local spark_context fixture available to this test function by including this decorator.\n\n Arguments:\n spark_context {@fixture} -- obejct to holds interface to spark context\n \"\"\"\n test_inputs = [\n 'hello world',\n 'hello beautiful world',\n 'hello spark spark again'\n ]\n\n input_rdd = spark_context.parallelize(test_inputs, numSlices = 1)\n results = wordcount.do_word_counts(input_rdd)\n\n expected_results = {\n 'hello': 3, \n 'world': 2, \n 'spark': 2,\n 'beautiful': 1,\n 'again': 1\n }\n\n assert expected_results == results\n\n print (\"test_do_word_counts finished with SUCCESS\")\n\n\n\n\n\n\n\n\n\n\n","sub_path":"3_spark/test_wordcount.py","file_name":"test_wordcount.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"506464592","text":"from __future__ import print_function\nfrom menu import Menu\nfrom .configuration import format_command\nfrom .utils import list_to_comma_newline_separated_string\nfrom .executor import execute\n\n\nclass MainMenu:\n commands_to_execute = None\n\n def __init__(self, configuration):\n self.configuration = configuration\n\n self.main_menu_message = \"Choose a command to execute.\"\n self.main_menu_choices = self.generate_main_menu_choices()\n self.confirmation_choices = [\n (\"Execute\", self.execute_commands),\n (\"Cancel\", self.cancel)\n ]\n\n self.main_menu = Menu(\n title=\"Comrade\",\n message=self.main_menu_message,\n options=self.main_menu_choices,\n refresh=self.refresh_menu\n )\n\n def show(self):\n self.main_menu.open()\n\n def execute_commands(self):\n self.main_menu.close()\n execute(self.commands_to_execute)\n\n def cancel(self):\n self.commands_to_execute = None\n\n def generate_main_menu_choices(self):\n menu_choices = []\n for i, choice in enumerate(self.configuration.choices):\n menu_choices.append(\n (choice.name, lambda i=i: self.handle_main_menu_choice(i)))\n menu_choices.append((\"Exit\", lambda: exit(0)))\n return menu_choices\n\n def handle_main_menu_choice(self, i):\n self.commands_to_execute = list()\n for command in self.configuration.choices[i].commands:\n self.commands_to_execute.append(format_command(command))\n\n def refresh_menu(self):\n if self.commands_to_execute:\n self.main_menu.set_options(self.confirmation_choices)\n self.main_menu.set_message(\"Commands to execute:\\n\\n{0}\".format(\n list_to_comma_newline_separated_string(self.commands_to_execute)))\n else:\n self.main_menu.set_options(self.main_menu_choices)\n self.main_menu.set_message(self.main_menu_message)\n","sub_path":"comrade/main_menu.py","file_name":"main_menu.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"398819125","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom rest_framework import viewsets\nfrom .serializers import CatsSerializer\nfrom .models import Cat\nfrom .forms import CatCreationForm, PostForm\n\n#Начальная страница, смотрит метод GET или POST\ndef index(request):\n if request.method == 'GET':\n form = PostForm()\n return render(request, 'index.html', {'form': form})\n else:\n form = PostForm(request.POST)\n message = request.POST.get('message')\n form.message = message\n if form.is_valid():\n form.save()\n return redirect('/')\n return redirect('/')\n\n#вьюха регистрации\ndef register(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/login')\n else:\n return render(request, 'error.html')\n else:\n form = UserCreationForm()\n context = {'form': form}\n return render(request, 'reg.html', context)\n\n#Страница с котами.\ndef cats(request):\n if request.method == 'GET':\n current_user = request.user\n cats = Cat.objects.all();\n form = CatCreationForm()\n return render(request, 'cats.html', {'cats':cats, 'form': form, 'current_user':current_user})\n else:\n form = CatCreationForm(request.POST)\n if form.is_valid():\n form = form.save(commit=False)\n form.user = request.user #добавляем хозяина к коту\n form.save()\n return redirect('/cats')\n else:\n return redirect('/cats')\n#Страница с моим резюме\ndef resume(request):\n return render(request, 'resume.html')\n#Калькулятор на питоне\ndef culc(request):\n return render(request, 'culc.html')\n#Телеграм-бот. Надо бы найти ему хостинг\ndef bot(request):\n return render(request, 'bot.html')\n\ndef chat(request):\n return render(request, 'chat.html')\n#Ещё не начал делать операции в фоне без перезагрузки\n#Перекидываю на сраницу и передаю id кота\ndef delete(request, cat_id):\n try:\n current_user = request.user\n current_cat = Cat.objects.get(id=cat_id)\n if current_user == current_cat.user:\n current_cat.delete()\n return redirect('/cats')\n return redirect('/cats')\n except:\n redirect('/cats')\n\ndef edit(request, cat_id=''):\n current_user = request.user\n current_cat = Cat.objects.get(id=cat_id)\n if not request.method == 'POST':\n form = CatCreationForm()\n if current_user == current_cat.user:\n return render(request, 'edit.html', {'cat': current_cat, 'form': form})\n else:\n return redirect('/cats')\n else:\n form = CatCreationForm(request.POST)\n if form.is_valid():\n cat = form.save(commit=False)\n current_cat.name = cat.name\n current_cat.years = cat.years\n current_cat.breed = cat.breed\n current_cat.img = cat.img\n current_cat.user = request.user\n current_cat.save()\n return redirect('/cats')\n\ndef add(request):\n if request.method == 'GET':\n form = CatCreationForm()\n return render(request, 'add.html', {'form': form})\n else:\n form = CatCreationForm(request.POST)\n if form.is_valid():\n cat = form.save(commit=False)\n cat.user = request.user\n cat.save()\n return redirect('/cats')\n#Начало API\nclass CatsViewSet(viewsets.ModelViewSet):\n queryset = Cat.objects.all()\n serializer_class = CatsSerializer #сериализатор для обработки информации\n\n\n#декоратор, который не пускает неавторизованных пользователей\n@login_required\ndef signUp_error(request):\n return render(request, 'error.html')\n\n@login_required\ndef login_error(request):\n return\n","sub_path":"blogapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"289901197","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/rat/testsensitive.py\n# Compiled at: 2006-01-30 20:06:37\n__license__ = 'MIT '\n__author__ = 'Tiago Cogumbreiro '\n__copyright__ = 'Copyright 2005, Tiago Cogumbreiro'\nimport gtk, unittest, sensitive\n\nclass TestCounter(unittest.TestCase):\n __module__ = __name__\n\n def setUp(self):\n self.amount = 0\n\n def cb(self, amount):\n self.amount = amount\n\n def test_counter(self):\n counter = sensitive.Counter(self.cb)\n counter.inc()\n self.assertEqual(self.amount, 1)\n counter.inc()\n self.assertEqual(self.amount, 2)\n counter.dec()\n self.assertEqual(self.amount, 1)\n counter.dec()\n self.assertEqual(self.amount, 0)\n counter.dec()\n self.assertEqual(self.amount, -1)\n\n\nclass TestClient(unittest.TestCase):\n __module__ = __name__\n\n def setUp(self):\n self.amount = 0\n self.counter = sensitive.Counter(self.cb)\n\n def cb(self, amount):\n self.amount = amount\n\n def test_client(self):\n self.assertEqual(self.amount, 0)\n client = sensitive.SensitiveClient(self.counter)\n self.assertEqual(self.amount, 0)\n client.set_sensitive(True)\n self.assertEqual(self.amount, 0)\n client.set_sensitive(True)\n self.assertEqual(self.amount, 0)\n client.set_sensitive(False)\n self.assertEqual(self.amount, 1)\n client.set_sensitive(False)\n self.assertEqual(self.amount, 1)\n client.set_sensitive(True)\n self.assertEqual(self.amount, 0)\n client.set_sensitive(False)\n self.assertEqual(self.amount, 1)\n client = None\n self.assertEqual(self.amount, 0)\n return\n\n\nclass TestController(unittest.TestCase):\n __module__ = __name__\n\n def setUp(self):\n self.lbl = gtk.Label()\n self.cnt = sensitive.SensitiveController(self.lbl)\n\n def is_sensitive(self):\n return self.lbl.get_property('sensitive')\n\n def test_0_controller_ref(self):\n self.lbl.set_sensitive(False)\n self.cnt = None\n self.assertTrue(self.is_sensitive())\n self.lbl.set_sensitive(False)\n self.cnt = sensitive.SensitiveController(self.lbl)\n self.assertTrue(self.is_sensitive())\n client = self.cnt.create_client()\n client.set_sensitive(False)\n self.failIf(self.is_sensitive())\n self.cnt = None\n self.assertTrue(self.is_sensitive())\n return\n\n def test_1_client(self):\n self.assertTrue(self.is_sensitive())\n client = self.cnt.create_client()\n self.assertTrue(self.is_sensitive())\n client.set_sensitive(False)\n self.failIf(self.is_sensitive())\n client.set_sensitive(True)\n self.assertTrue(self.is_sensitive())\n client.set_sensitive(False)\n self.failIf(self.is_sensitive())\n client = None\n self.assertTrue(self.is_sensitive())\n return\n\n def test_destroy_object(self):\n client = self.cnt.create_client()\n self.lbl.destroy()\n\n def test_2_signal_bind(self):\n entry = gtk.Entry()\n bind = sensitive.SignalBind(self.cnt)\n bind.bind(entry, 'text', 'changed', lambda text: text != '')\n self.failIf(self.is_sensitive())\n entry.set_text('Foo')\n self.assertTrue(self.is_sensitive())\n entry.set_text('')\n self.failIf(self.is_sensitive())\n bind = None\n self.assertTrue(self.is_sensitive())\n return\n\n\ndef main():\n unittest.main()\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/Rat-0.1-py2.4/testsensitive.py","file_name":"testsensitive.py","file_ext":"py","file_size_in_byte":3785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"407572636","text":"import datetime\nfrom itertools import permutations\n\nnumCases = input()\n\nfor case in range(int(numCases)):\n numbers = input()\n part1, part2, part3 = numbers.split()\n numbers = str(part1) + str(part2) + str(part3)\n total = 0\n datelist = []\n bestdate = datetime.date(9999, 12, 31)\n\n perms = [''.join(p) for p in permutations(numbers)]\n\n for perm in perms:\n day = perm[:2]\n month = perm[2:4]\n year = perm[4:]\n\n if int(year) < 2000:\n continue\n\n try:\n date = datetime.date(int(year), int(month), int(day))\n\n if date not in datelist:\n datelist.append(date) \n total = total + 1\n if date < bestdate:\n bestdate = date\n except ValueError:\n continue\n\n finaldate = \"\"\n if not total == 0:\n finaldate = bestdate.strftime(\"%d %m %Y\")\n print(str(total) + \" \" + finaldate)\n \n","sub_path":"Dreamer/dreamer.py","file_name":"dreamer.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"26077382","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 15 13:08:20 2019\n\n@author: micha\n\"\"\"\nimport networkx as nx\nimport numpy.random as rnd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport imageio\nimport math\n\n\nangles = np.linspace(0, 2*math.pi, 1000)\na=1\nN = 1000\nMCS = 100\ntime = [i for i in range(N)]\n\ndef trajectory(N, a, time):\n start = (0,0)\n path = []\n nodes = [start]\n for t in time:\n angle = rnd.choice(angles)\n x = a * math.cos(angle)\n y = a * math.sin(angle)\n target = (start[0] + x, start[1] + y)\n path.append((start, target))\n nodes.append(target)\n start = target\n unzipped_x, unzipped_y = zip(*nodes)\n unzipped_x = list( unzipped_x)\n unzipped_y = list(unzipped_y)\n x_p = [i for i in unzipped_x if i > 0]\n xy_p = [i for i in unzipped_x if i > 0 and unzipped_y[unzipped_x.index(i)] > 0]\n return x_p, xy_p\n\nx_plus = []\nxy_plus = []\nfor i in range(MCS):\n x_p, xy_p = trajectory(N, a, time)\n x_plus.append(len(x_p))\n xy_plus.append(len(xy_p))\n\nsns.distplot(x_plus, hist = True, kde = True)\nsns.distplot(xy_plus, hist = True, kde = True)\n \n\n\n#for j in range(len(path)):\n# plt.plot([path[j][0][0],path[j][1][0]], [path[j][0][1],path[j][1][1]], 'b')","sub_path":"list4/task2/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"109444170","text":"import virtool.errors\nimport virtool.otus\n\n\ndef format_fasta_entry(otu_name, isolate_name, sequence_id, sequence):\n \"\"\"\n Create a FASTA header for a sequence in a otu DNA FASTA file downloadable from Virtool.\n\n :param otu_name: the otu name to include in the header\n :type otu_name: str\n\n :param isolate_name: the isolate name to include in the header\n :type isolate_name: str\n\n :param sequence_id: the sequence id to include in the header\n :type sequence_id: str\n\n :param sequence: the sequence for the FASTA entry\n :type sequence: str\n\n :return: a FASTA entry\n :rtype: str\n\n \"\"\"\n return f\">{otu_name}|{isolate_name}|{sequence_id}|{len(sequence)}\\n{sequence}\"\n\n\ndef format_fasta_filename(*args):\n \"\"\"\n Format a FASTA filename of the form \"otu.isolate.sequence_id.fa\".\n\n :param args: the filename parts\n\n :return: a compound FASTA filename\n :rtype: str\n\n \"\"\"\n if len(args) > 3:\n raise ValueError(\"Unexpected number of filename parts\")\n\n if len(args) == 0:\n raise ValueError(\"At least one filename part required\")\n\n filename = \".\".join(args).replace(\" \", \"_\") + \".fa\"\n\n return filename.lower()\n\n\nasync def generate_isolate_fasta(db, otu_id, isolate_id):\n \"\"\"\n Generate a FASTA filename and body for the sequences associated with the isolate identified by the passed\n ``otu_id`` and ``isolate_id``.\n\n :param db: the application database client\n :type db: :class:`~motor.motor_asyncio.AsyncIOMotorClient`\n\n :param otu_id: the id of the isolates' parent otu\n :type otu_id: str\n\n :param isolate_id: the id of the isolate to FASTAfy\n :type isolate_id: str\n\n :return: as FASTA filename and body\n :rtype: Tuple[str, str]\n\n \"\"\"\n _, isolate_name = await get_otu_and_isolate_names(db, otu_id, isolate_id)\n\n otu = await db.otus.find_one({\"_id\": otu_id, \"isolates.id\": isolate_id}, [\"name\", \"isolates\"])\n\n fasta = list()\n\n async for sequence in db.sequences.find({\"otu_id\": otu_id, \"isolate_id\": isolate_id}, [\"sequence\"]):\n fasta.append(format_fasta_entry(\n otu[\"name\"],\n isolate_name,\n sequence[\"_id\"],\n sequence[\"sequence\"]\n ))\n\n return format_fasta_filename(otu[\"name\"], isolate_name), \"\\n\".join(fasta)\n\n\nasync def generate_sequence_fasta(db, sequence_id):\n \"\"\"\n Generate a FASTA filename and body for the sequence associated with the passed ``sequence_id``.\n\n :param db: the application database client\n :type db: :class:`~motor.motor_asyncio.AsyncIOMotorClient`\n\n :param sequence_id: the id sequence of the sequence to FASTAfy\n :type sequence_id: str\n\n :return: as FASTA filename and body\n :rtype: Tuple[str, str]\n\n \"\"\"\n sequence = await db.sequences.find_one(sequence_id, [\"sequence\", \"otu_id\", \"isolate_id\"])\n\n if not sequence:\n raise virtool.errors.DatabaseError(\"Sequence does not exist\")\n\n otu_name, isolate_name = await get_otu_and_isolate_names(db, sequence[\"otu_id\"], sequence[\"isolate_id\"])\n\n fasta = format_fasta_entry(\n otu_name,\n isolate_name,\n sequence_id,\n sequence[\"sequence\"]\n )\n\n return format_fasta_filename(otu_name, isolate_name, sequence[\"_id\"]), fasta\n\n\nasync def generate_otu_fasta(db, otu_id):\n \"\"\"\n Generate a FASTA filename and body for the sequences associated with the otu identified by the passed\n ``otu_id``.\n\n :param db: the application database client\n :type db: :class:`~motor.motor_asyncio.AsyncIOMotorClient`\n\n :param otu_id: the id of the otu whose sequences should be FASTA-fied\n :type otu_id: str\n\n :return: as FASTA filename and body\n :rtype: Tuple[str, str]\n\n \"\"\"\n\n otu = await db.otus.find_one(otu_id, [\"name\", \"isolates\"])\n\n if not otu:\n raise virtool.errors.DatabaseError(\"OTU does not exist\")\n\n fasta = list()\n\n for isolate in otu[\"isolates\"]:\n async for sequence in db.sequences.find({\"otu_id\": otu_id, \"isolate_id\": isolate[\"id\"]}, [\"sequence\"]):\n fasta.append(format_fasta_entry(\n otu[\"name\"],\n virtool.otus.format_isolate_name(isolate),\n sequence[\"_id\"],\n sequence[\"sequence\"]\n ))\n\n fasta = \"\\n\".join(fasta)\n\n return format_fasta_filename(otu[\"name\"]), fasta\n\n\nasync def get_otu_and_isolate_names(db, otu_id, isolate_id):\n otu = await db.otus.find_one({\"_id\": otu_id, \"isolates.id\": isolate_id}, [\"name\", \"isolates\"])\n\n if not otu:\n raise virtool.errors.DatabaseError(\"OTU does not exist\")\n\n isolate = virtool.otus.find_isolate(otu[\"isolates\"], isolate_id)\n\n if not isolate:\n raise virtool.errors.DatabaseError(\"Isolate does not exist\")\n\n return otu[\"name\"], virtool.otus.format_isolate_name(isolate)\n","sub_path":"virtool/db/downloads.py","file_name":"downloads.py","file_ext":"py","file_size_in_byte":4790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"301606403","text":"from logging import getLogger\n\nfrom django.db.models import Q\nfrom django.http.response import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_POST\nfrom golem_messages.message import FileTransferToken\n\nfrom core.tasks import upload_finished\nfrom common.decorators import provides_concent_feature\nfrom common.logging import log_request_received\nfrom common.logging import log_string_message\nfrom .models import UploadReport\nfrom .models import VerificationRequest\n\nlogger = getLogger(__name__)\n\n\n@provides_concent_feature('conductor-urls')\n@require_POST\n@csrf_exempt\ndef report_upload(_request, file_path):\n\n log_request_received(logger, file_path, FileTransferToken.Operation.upload)\n # If there's a corresponding VerificationRequest, the load it and link it to UploadReport.\n try:\n verification_request = VerificationRequest.objects.get(\n Q(source_package_path=file_path) | Q(result_package_path=file_path)\n )\n except VerificationRequest.DoesNotExist:\n verification_request = None\n\n # The app creates a new instance of UploadReport in the database.\n upload_report_obj = UploadReport(\n path = file_path,\n verification_request = verification_request,\n )\n upload_report_obj.full_clean()\n upload_report_obj.save()\n\n # The app gets the VerificationRequest and checks if both source and result packages have reports.\n if (\n verification_request is not None and\n verification_request.blender_subtask_definition is not None and\n verification_request.upload_reports.filter(path=verification_request.source_package_path).exists() and\n verification_request.upload_reports.filter(path=verification_request.result_package_path).exists() and\n verification_request.upload_reports.filter(path=file_path).count() == 1\n ):\n assert file_path in [verification_request.source_package_path, verification_request.result_package_path]\n\n # If all expected files have been uploaded, the app sends upload_finished task to the work queue.\n upload_finished.delay(verification_request.subtask_id)\n\n verification_request.upload_finished = True\n verification_request.full_clean()\n verification_request.save()\n\n log_string_message(\n logger, 'All expected files have been uploaded',\n f'Subtask ID: {verification_request.subtask_id}.'\n f'Result package path: {verification_request.result_package_path}.'\n f'Source package path: {verification_request.source_package_path}.'\n )\n\n return HttpResponse()\n","sub_path":"concent_api/conductor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"367765497","text":"import logging\nfrom pathlib import Path\nfrom typing import IO, Iterator, Optional, Union\n\nfrom datalad.support.annexrepo import AnnexRepo\nimport fsspec\nfrom fsspec.implementations.cached import CachingFileSystem\n\nlgr = logging.getLogger(\"datalad_fuse.fsspec\")\n\n\nclass FsspecAdapter:\n def __init__(self, path: Union[str, Path]) -> None:\n self.annex = AnnexRepo(str(path))\n self.cache_dir = Path(path, \".git\", \"datalad\", \"cache\", \"fsspec\")\n self.fs = CachingFileSystem(\n fs=fsspec.filesystem(\"http\"),\n # target_protocol='blockcache',\n cache_storage=str(self.cache_dir),\n # cache_check=600,\n # block_size=1024,\n # check_files=True,\n # expiry_times=True,\n # same_names=True\n )\n\n def get_urls(self, filepath: Union[str, Path]) -> Iterator[str]:\n whereis = self.annex.whereis(str(filepath), output=\"full\")\n remote_uuids = []\n for ru, v in whereis.items():\n remote_uuids.append(ru)\n for u in v[\"urls\"]:\n if is_http_url(u):\n yield u\n\n key = self.annex.get_file_key(filepath)\n path_mixed = self.annex.call_annex_oneline(\n [\n \"examinekey\",\n \"--format=annex/objects/${hashdirmixed}${key}/${key}\\\\n\",\n key,\n ]\n )\n path_lower = self.annex.call_annex_oneline(\n [\n \"examinekey\",\n \"--format=annex/objects/${hashdirlower}${key}/${key}\\\\n\",\n key,\n ]\n )\n\n uuid2remote_url = {}\n for r in self.annex.get_remotes():\n ru = self.annex.config.get(f\"remote.{r}.annex-uuid\")\n if ru is None:\n continue\n remote_url = self.annex.config.get(f\"remote.{r}.url\")\n if remote_url is None:\n continue\n remote_url = self.annex.config.rewrite_url(remote_url)\n uuid2remote_url[ru] = remote_url\n\n for ru in remote_uuids:\n try:\n base_url = uuid2remote_url[ru]\n except KeyError:\n continue\n if is_http_url(base_url):\n if base_url.lower().rstrip(\"/\").endswith(\"/.git\"):\n paths = [path_mixed, path_lower]\n else:\n paths = [\n path_lower,\n path_mixed,\n f\".git/{path_lower}\",\n f\".git/{path_mixed}\",\n ]\n for p in paths:\n yield base_url.rstrip(\"/\") + \"/\" + p\n\n def open(\n self,\n filepath: Union[str, Path],\n mode: str = \"rb\",\n encoding: str = \"utf-8\",\n errors: Optional[str] = None,\n ) -> IO:\n if mode not in (\"r\", \"rb\", \"rt\"):\n raise NotImplementedError(\"Only modes 'r', 'rb', and 'rt' are supported\")\n if mode == \"rb\":\n kwargs = {}\n else:\n kwargs = {\"encoding\": encoding, \"errors\": errors}\n if self.annex.is_under_annex(filepath) and not self.annex.file_has_content(\n filepath\n ):\n for url in self.get_urls(filepath):\n try:\n return self.fs.open(url, mode, **kwargs)\n except FileNotFoundError as e:\n lgr.debug(\n \"Failed to open file %s at URL %s: %s\", filepath, url, str(e)\n )\n raise IOError(f\"Could not find a usable URL for {filepath}\")\n else:\n return open(filepath, mode, **kwargs)\n\n def clear(self) -> None:\n self.fs.clear_cache()\n\n\ndef is_http_url(s):\n return s.lower().startswith((\"http://\", \"https://\"))\n","sub_path":"datalad_fuse/fsspec.py","file_name":"fsspec.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"443279138","text":"# SPDX-FileCopyrightText: 2021 CmdKit Developers\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Agent class implementation.\"\"\"\n\n\n# standard libs\nimport time\nfrom datetime import datetime, timedelta\nfrom abc import abstractmethod\n\n# internal libs\nfrom .service import Service\n\n\nclass Agent(Service):\n \"\"\"An agent runs 'task' jobs with on a specified interval.\"\"\"\n\n name: str = None\n interval: float = None\n pid_dir: str = '/var/run'\n\n def __init__(self, daemon: bool = False) -> None:\n \"\"\"\n Initialize an Agent.\n\n Arguments\n ---------\n daemon: bool = False\n Alter behavior to act in daemon mode.\n \"\"\"\n super().__init__(f'{self.pid_dir}/{self.name}.pid', daemon=daemon)\n\n @abstractmethod\n def task(self) -> None:\n \"\"\"A task must be defined for all Agents.\"\"\"\n raise NotImplementedError()\n\n def run(self) -> None:\n \"\"\"An Agent spawns 'task' jobs with a specified sleep period.\"\"\"\n while True:\n start_time = datetime.now()\n start_next = start_time + timedelta(seconds=self.interval)\n self.task()\n if datetime.now() < start_next:\n time.sleep((start_next - datetime.now()).total_seconds())\n","sub_path":"cmdkit/service/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"294271805","text":"#coding:utf-8\nimport xmlTreeClass\nimport requests\nfrom Config import *\n\n\ndef getXmlContent(xmlName):\n xml_url = API_URL + \"/viewFile.do?appName=%s&fileName=%s\" % (APPNAME, xmlName)\n s = requests.Session()\n s.auth = (USER, PASSWORD)\n r = s.get(xml_url)\n return r.content\n\n\n# 获取所有元素的某个参数的值,保存列表返回,可以是由另一个参数的值来决定是否有效;应用举例 getValListFromXml(\"tsum_xml\", \"id\", \"visible=1\")\ndef getValListFromXml(xml_name, xml_parameter, paraChecker=None):\n other_xml_tree = xmlTreeClass.XmlTree(xml_name, getXmlContent(xml_name))\n root = other_xml_tree.root\n\n para_vals = []\n if not paraChecker:\n for child in root:\n if \"type\" == child.tag:\n continue\n if child.get(xml_parameter): # 有的元素没有某些参数,比如stage.xml中的hiddenStages,此时取到是None,不计入列表\n para_vals.append(child.get(xml_parameter))\n else:\n for child in root:\n if \"type\" == child.tag:\n continue\n attrCheckerName = paraChecker.split(\"=\")[0]\n attrCheckerValue = paraChecker.split(\"=\")[1]\n v = child.get(attrCheckerName)\n if child.get(xml_parameter) and attrCheckerValue == v:\n para_vals.append(child.get(xml_parameter))\n\n # 去重,但不打乱顺序\n # para_vals = list(set(para_vals))\n para_vals2 = []\n for p in para_vals:\n if p not in para_vals2:\n para_vals2.append(p)\n return para_vals2\n\n\n# 获取所有元素中某三个参数,组成字典结构返回,可以由另一个参数决定是什么形式:如{x:{y:z}}或者{x:{y:[z]}}\ndef getValDicFromXml(xml_name, xml_parameter1, xml_parameter2, xml_parameter3=None, dicValueFormat='str'):\n other_xml_tree = xmlTreeClass.XmlTree(xml_name, getXmlContent(xml_name))\n root = other_xml_tree.root\n\n dicVals = {}\n if not xml_parameter3:\n # 获取两个参数的字典,value的参数类型为str\n if 'str' == dicValueFormat:\n for child in root:\n if \"type\" == child.tag:\n continue\n val1 = child.get(xml_parameter1).encode('utf-8')\n val2 = child.get(xml_parameter2)\n\n if not val1 or not val2:\n continue\n if val1 not in dicVals.keys():\n dicVals[val1] = val2\n # 获取两个参数的字典,value的参数类型为数组\n if 'dic' == dicValueFormat:\n for child in root:\n if \"type\" == child.tag:\n continue\n val1 = child.get(xml_parameter1)\n val2 = child.get(xml_parameter2)\n\n if not val1 or not val2:\n continue\n if val1 not in dicVals.keys():\n dicVals[val1] = [val2]\n else:\n dicVals[val1].append(val2)\n else:\n # 获取三个参数的字典,value的参数类型为str\n if 'str' == dicValueFormat:\n for child in root:\n if \"type\" == child.tag:\n continue\n val1 = child.get(xml_parameter1)\n val2 = child.get(xml_parameter2)\n val3 = child.get(xml_parameter3)\n\n if not val1 or not val2 or not val3:\n continue\n if val1 not in dicVals.keys():\n dicVals[val1] = {val2: val3}\n else:\n if val2 not in dicVals[val1].keys():\n dicVals[val1][val2] = val3\n # 获取三个参数的字典,value的参数类型为数组\n if 'dic' == dicValueFormat:\n for child in root:\n if \"type\" == child.tag:\n continue\n val1 = child.get(xml_parameter1)\n val2 = child.get(xml_parameter2)\n val3 = child.get(xml_parameter3)\n\n if not val1 or not val2 or not val3:\n continue\n if val1 not in dicVals.keys():\n dicVals[val1] = {val2: [val3]}\n else:\n if val2 not in dicVals[val1].keys():\n dicVals[val1][val2] = [val3]\n else:\n dicVals[val1][val2].append(val3)\n return dicVals\n\n\ndef getGlobalData(key):\n global_xml_tree = xmlTreeClass.XmlTree('global.xml', getXmlContent('global.xml'))\n for child in global_xml_tree.root:\n if key == child.get('key'):\n return child.text\n else:\n continue\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n ","sub_path":"Basics_test/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"540483997","text":"\nimport nltk\nfrom nltk.corpus import stopwords\t# permet d'enlever les mots inutiles\nfrom nltk import word_tokenize\t# permet de tokeniser par mot\nfrom nltk import WordNetLemmatizer\nimport sys\n##interface entree : STRING##\n\n\n\n# recuperer l'entree utilisateur depuis un fichier texte\n# Ouvrir et lire le fichier \n# text_file = open(\"test_00.txt\")\t#mettre nom_fichier.txt entre les guillemets\n# text = text_file.read()\n\ntext = \"\"\nfor i in range(len(sys.argv) - 1):\n text += sys.argv[i + 1] + \" \"\n\n\n# text = input(\"Entrez votre action : \")\n\n\n# print('type of text : ')\n# print(type(text))\n# print(\"\\n\")\n\n\n\n# premiere fonction : word tokenization\nwords = word_tokenize(text)\n\n# print('words :')\n# print(words)\n# print(\"\\n\")\n\n# deuxieme fonction : enlever la ponctuation\n# rmq : on le fait avant de tagger les mots car la fonction isalpha ne prend pas de tuples en entree\nword_no_punc = []\nfor w in words:\n if w.isalpha():\n word_no_punc.append(w.lower())\n\n# print(\"word_no_punc :\")\n# print(word_no_punc)\n# print(\"\\n\")\n\n# troisieme fonction : tagger les mots\ntagged_words = []\n\nfor w in word_no_punc:\n tagged_words = nltk.pos_tag(word_no_punc)\n\n# print('tagged words :')\n# print(tagged_words)\n# print(\"\\n\")\n\n# quatrieme fonction : clean la liste\nclean_words = []\n\nstopwords = stopwords.words(\"english\")\n\nfor word in tagged_words:\n if word[0] not in stopwords:\n clean_words.append(word)\n\n\n# print(clean_words)\n\nfinal_words = []\nlemma = WordNetLemmatizer()\nfor word in clean_words:\n if 'V' in word[1]:\n final_words.append((lemma.lemmatize(word[0], pos=\"v\"), word[1]))\n elif 'N' in word[1]:\n final_words.append((lemma.lemmatize(word[0], pos=\"n\"), word[1]))\n else:\n final_words.append(word)\n# print(final_words)","sub_path":"game/src/NLP/cleaning_and_parsing.py","file_name":"cleaning_and_parsing.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"514353018","text":"from datetime import datetime\nfrom django import forms\nfrom django.test import TestCase, RequestFactory\nfrom django.core.files.uploadedfile import SimpleUploadedFile\n\nfrom multifilefield.fields import MultiFileField, NoFileFieldNameException\nfrom multifilefield.mixins import MultiFileFieldMixin\nfrom multifilefield.models import UploadedFile\nfrom multifilefield.tests import *\n\n\n\nclass MultiFileFieldQuerySetTestCase(TestCase):\n \"\"\" Let's test that the queryset is working properly in\n populating the uploaded files.\"\"\"\n\n\n def setUp(self):\n make_files()\n self.queryset = UploadedFile.objects.all()\n self.storage = TestStorage()\n\n\n def test_init(self):\n \"\"\"Test that initializing the field doesn't break.\"\"\"\n\n MultiFileField(\n add_label='Attach files',\n clear_label='Clear files',\n storage = self.storage,\n queryset = self.queryset,\n filefield_name='upload',\n max_file_size = 1024*1024*5,\n max_num_files = 5,\n min_num_files = 0)\n\n self.assertTrue(True)\n\n\n def test_made_files(self):\n self.assertEqual(UploadedFile.objects.count(), 6)\n\n\n def test_no_filefield_name(self):\n \"\"\"Test that queryset requires filefield_name\"\"\"\n\n self.assertRaises(NoFileFieldNameException, MultiFileField, queryset = self.queryset)\n\n\n def test_with_filefield_name(self):\n \"\"\"Test that queryset requires filefield_name\"\"\"\n\n MultiFileField(\n queryset = self.queryset,\n filefield_name='upload')\n\n self.assertTrue(True)\n\n\n def tearDown(self):\n remove_files()\n\n\n\nclass FormWithMultiFileFieldQuerySetTestCase(TestCase):\n \"\"\" This TestCase is for testing the form mixin. \"\"\"\n\n\n def setUp(self):\n \"\"\" setup the class we're gonna use for testing. I did it\n this way to avoid the class being initialized before\n django test runner has setup the tests database with the appropriate\n tables for querying. The uploadedfiles table is queried\n during initialization of the form.\"\"\"\n\n make_files()\n self.queryset = UploadedFile.objects.all()\n self.storage = TestStorage()\n\n class TestFormWithQueryset(MultiFileFieldMixin, forms.Form):\n uploads = MultiFileField(\n storage = self.storage,\n queryset = self.queryset,\n filefield_name='upload')\n\n self.TestFormWithQueryset = TestFormWithQueryset\n self.factory = RequestFactory()\n\n\n def test_with_queryset(self):\n \"\"\"Test form with a request.\"\"\"\n\n data = {}\n request = self.factory.post('/fake/', data=data)\n form = self.TestFormWithQueryset(request.POST, request.FILES)\n\n if form.is_valid():\n form.process_files_for('uploads')\n cleaned_data = form.cleaned_data\n self.assertEqual(len(cleaned_data.get('uploads')), 6)\n else:\n self.fail(form.errors)\n\n\n def test_with_queryset_clear(self):\n \"\"\"Test form with a request. Clear four files.\"\"\"\n\n data = {'uploads_1': ('1', '2', '3', '4',)}\n request = self.factory.post('/fake/', data=data)\n form = self.TestFormWithQueryset(request.POST, request.FILES)\n\n if form.is_valid():\n form.process_files_for('uploads')\n cleaned_data = form.cleaned_data\n self.assertEqual(len(cleaned_data.get('uploads')), 2)\n else:\n self.fail(form.errors)\n\n\n def test_with_queryset_upload_new_file(self):\n \"\"\"Test form with a request. Add new file.\"\"\"\n\n upload = SimpleUploadedFile('uploaded_file.jpeg',\n 'file_content', content_type='image/jpeg')\n\n data = {'uploads_0': upload}\n request = self.factory.post('/fake/', data=data)\n form = self.TestFormWithQueryset(request.POST, request.FILES)\n\n if form.is_valid():\n form.process_files_for('uploads')\n cleaned_data = form.cleaned_data\n self.assertEqual(len(cleaned_data.get('uploads')), 7)\n else:\n self.fail(form.errors)\n\n\n def test_with_queryset_upload_new_file_clear_four(self):\n \"\"\"Test form with a request. Add new file and clear four files.\"\"\"\n\n upload = SimpleUploadedFile('uploaded_file.jpeg',\n 'file_content', content_type='image/jpeg')\n\n data = {\n 'uploads_0': upload,\n 'uploads_1': ('1', '2', '3', '4',)\n }\n\n request = self.factory.post('/fake/', data=data)\n form = self.TestFormWithQueryset(request.POST, request.FILES)\n\n if form.is_valid():\n form.process_files_for('uploads')\n cleaned_data = form.cleaned_data\n self.assertEqual(len(cleaned_data.get('uploads')), 3)\n else:\n self.fail(form.errors)\n\n\n def tearDown(self):\n remove_files()\n","sub_path":"multifilefield/tests/test_with_queryset.py","file_name":"test_with_queryset.py","file_ext":"py","file_size_in_byte":4918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"156527571","text":"import time\n\nfrom tlib import rand\nfrom tlib.web import dao\nfrom tlib.web import sqlhelper\nfrom .. import suite, models\n\n\nclass TradeDao(dao.Dao):\n def get(self, **conds):\n \"\"\"\n get trade record by conditions\n :return:\n None, or first matched trade object\n \"\"\"\n # select query\n q = sqlhelper.select().columns(*models.UserTrade.fields).table('tb_user_trade').where(**conds)\n\n # execute query\n results = self.select(q.sql(), q.args())\n if len(results) > 0:\n return models.UserTrade(**results[0])\n\n return None\n\n def list(self, **conds):\n \"\"\"\n get trade record by conditions\n :return:\n None, or first matched trade object\n \"\"\"\n # select query\n q = sqlhelper.select().columns(*models.UserTrade.fields).table('tb_user_trade').where(**conds)\n\n # execute query\n results = self.select(q.sql(), q.args())\n\n return results\n\n def use_counpon(self, couponid):\n \"\"\"\n use coupon\n :param couponid:\n :return:\n \"\"\"\n # update query\n sql = '''\n update tb_user_coupon\n set status=%s, utime=%s\n where id=%s\n '''\n\n # execute update\n self.execute(sql, (suite.enum.coupon.used.code, int(time.time()), couponid))\n\n def update_money(self, userid, money):\n \"\"\"\n use money of user\n :param userid:\n :param money:\n :return:\n \"\"\"\n # update query\n sql = '''\n update tb_user\n set money = %s\n where id=%s\n '''\n\n # execute update\n self.execute(sql, (money, userid))\n\n def add_bill(self, userid, bmoney, lmoney, money, item, detail):\n \"\"\"\n add bill record\n :param money:\n :param item:\n :param detail:\n :return:\n \"\"\"\n # insert query\n sql = '''\n insert into tb_user_bill(user_id, code, item, detail, money, bmoney, lmoney, ctime)\n values(%s, %s, %s, %s, %s, %s, %s, %s)\n '''\n\n # generate code\n code = rand.uuid()\n\n # execute insert\n self.execute(sql, (userid, code, item, detail, money, bmoney, lmoney, int(time.time())))\n\n def add_margin(self, tradeid, money, item, detail):\n \"\"\"\n add margin\n :return:\n \"\"\"\n # insert query\n sql = '''\n insert into tb_trade_margin(trade_id, `money`, `item`, `detail`, ctime)\n values(%s, %s, %s, %s, %s)\n '''\n\n # execute insert\n self.execute(sql, (tradeid, money, item, detail, int(time.time())))\n\n def add_fee(self, tradeid, item, nmoney, amoney, detail):\n \"\"\"\n add fee\n :return:\n \"\"\"\n # insert query\n sql = '''\n insert into tb_trade_fee(trade_id, item, nmoney, amoney, detail, ctime)\n values(%s, %s, %s, %s, %s, %s)\n '''\n\n # execute insert\n self.execute(sql, (tradeid, item, nmoney, amoney, detail, int(time.time())))\n\n def add_trade(self, userid, stockid, couponid, code, ptype, price, count, margin, slog):\n \"\"\"\n add user trade order\n :param userid:\n :param stockid:\n :param couponid:\n :param code:\n :param ptype:\n :param price:\n :param count:\n :return:\n \"\"\"\n # insert query\n sql = '''\n insert into tb_user_trade(user_id, stock_id, coupon_id, code, optype, oprice, ocount, margin, status, slog, ctime, utime)\n values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n '''\n\n timenow = int(time.time())\n # execute insert\n self.execute(sql, (userid, stockid, couponid, code, ptype, price, count, margin, suite.enum.trade.tobuy.code, slog, timenow, timenow))\n\n def add_lever(self, tradeid, lever, wline, sline, ofmin, ofrate, dfrate, psrate, mmin, mmax):\n \"\"\"\n add lever of trade\n :return:\n \"\"\"\n # insert query\n sql = '''\n insert into tb_trade_lever(trade_id, lever, wline, sline, ofmin, ofrate, dfrate, psrate, mmin, mmax)\n values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n '''\n\n # execute insert\n self.execute(sql, (tradeid, lever, wline, sline, ofmin, ofrate, dfrate, psrate, mmin, mmax))\n\n def get_lever(self, tradeid):\n \"\"\"\n get lever of trade\n :param tradeid:\n :return:\n \"\"\"\n # select query\n q = sqlhelper.select().columns(*models.TradeLever.fields).table('tb_trade_lever').where(trade_id=tradeid)\n\n # execute query\n results = self.select(q.sql(), q.args())\n if len(results) > 0:\n return models.TradeLever(**results[0])\n\n return None\n\n def update_trade(self, tradeid, **cvals):\n \"\"\"\n update trade\n :param tradeid:\n :param cvals:\n :return:\n \"\"\"\n # update query\n q = sqlhelper.update().table('tb_user_trade').set(**cvals).where(id=tradeid)\n\n # execute sql\n self.execute(q.sql(), q.args())\n","sub_path":"app/svc/app/aam/daos/trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":5309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"92899501","text":"#\n# [229] Majority Element II\n#\n# https://leetcode.com/problems/majority-element-ii/description/\n#\n# algorithms\n# Medium (29.49%)\n# Total Accepted: 73.6K\n# Total Submissions: 249.3K\n# Testcase Example: '[3,2,3]'\n#\n# Given an integer array of size n, find all elements that appear more than ⌊\n# n/3 ⌋ times.\n#\n# Note: The algorithm should run in linear time and in O(1) space.\n#\n# Example 1:\n#\n#\n# Input: [3,2,3]\n# Output: [3]\n#\n# Example 2:\n#\n#\n# Input: [1,1,1,3,3,2,2,2]\n# Output: [1,2]\n#\n\n\nclass Solution:\n def majorityElement(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n # Boyer-Moore Majority Vote Algorithm\n if len(nums) <= 1:\n return nums\n num1, num2, cnt1, cnt2 = None, None, 0, 0\n for n in nums:\n if num1 == n:\n cnt1 += 1\n elif num2 == n:\n cnt2 += 1\n elif cnt1 == 0:\n num1, cnt1 = n, 1\n elif cnt2 == 0:\n num2, cnt2 = n, 1\n else:\n cnt1, cnt2 = cnt1-1, cnt2-1\n # Check result\n cnt1, cnt2 = 0, 0\n for n in nums:\n if n == num1:\n cnt1 += 1\n elif n == num2:\n cnt2 += 1\n res = []\n if cnt1 > len(nums) // 3:\n res.append(num1)\n if cnt2 > len(nums) // 3:\n res.append(num2)\n return res\n","sub_path":"229.majority-element-ii.python3.py","file_name":"229.majority-element-ii.python3.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"381828800","text":"a,b = map(int,input().split())\nc = input().split()\nlist = []\nfor i in c:\n list.append(int(i))\nfor j in range(0,len(c)):\n if(list[j] == b):\n print(\"yes\")\n break\nelse:\n print(\"no\")\n","sub_path":"check whether it is number exists.py","file_name":"check whether it is number exists.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"319154763","text":"from django.forms import Textarea\nfrom django import forms\nimport simplejson as json\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\nfrom django.utils.safestring import mark_safe\nfrom django.utils import six, translation\nfrom multilingualfield import settings as ml_settings\n\nfrom .language import LanguageText\n\nDEFAULT_CKCONFIG = dict()\n\n\nclass MLTextWidget(Textarea):\n \"\"\"\n Widget used to display a multi-language field\n \"\"\"\n HTML = False\n\n def __init__(self, html=False, textarea=False, *args, **kwargs):\n self.HTML = html\n self.textarea = textarea\n\n super(MLTextWidget, self).__init__(*args, **kwargs)\n\n @property\n def media(self):\n \"\"\"\n Define all media needed by the widget to be operational\n :return: a forms.Media instance with all CSS and JS script needed\n \"\"\"\n # Javascript\n js = [\n 'multiligualfield/js/jquery-1.10.2.min.js',\n 'multiligualfield/js/jquery-ui-1.10.3.custom.min.js',\n 'multiligualfield/js/json.js'\n ]\n if self.HTML:\n js += ['multiligualfield/ckeditor/ckeditor.js']\n\n # Cascading Style Sheets\n css = ['multiligualfield/css/ui-darkness/jquery-ui-1.10.3.custom.min.css']\n\n return forms.Media(js=js, css={'all': css})\n\n def render(self, name, value, attrs=None):\n \"\"\"\n Render the template widget\n :param name: The name of the field we want to display\n :param value: The actual value of the field we want to display\n :param attrs:\n :return: A template of widget initialized\n \"\"\"\n\n is_valid = False\n if value is None or value == '':\n # New create or edit none\n ml_json = '{}'\n ml_language = '[]'\n is_valid = True\n if isinstance(value, six.string_types):\n try:\n valuejson = json.loads(value)\n Lang = LanguageText()\n Lang.values = valuejson\n value = Lang\n except ValueError:\n try:\n Lang = LanguageText(value, language=None)\n value = Lang\n except Exception:\n pass\n if isinstance(value, LanguageText):\n ml_json = json.dumps(value.values)\n ml_language = json.dumps(value.get_available_language())\n is_valid = True\n if is_valid:\n Langs = json.dumps(dict(settings.LANGUAGES))\n if self.HTML:\n widget_template = \"multilingualfield/MLHTMLWidget.html\"\n if self.textarea:\n widget_template = \"multilingualfield/MLTextareaWidget.html\"\n else:\n widget_template = \"multilingualfield/MLTextWidget.html\"\n return mark_safe(render_to_string(\n widget_template,\n {\n \"id\": id(self),\n \"name\": name,\n \"raw\": value,\n \"ml_json\": ml_json, # Content JSON\n \"ml_language\": ml_language, # Available languages\n \"langs\": Langs,\n \"langsobj\": settings.LANGUAGES,\n 'current_language': translation.get_language(),\n 'CKEDITOR_FILER': ml_settings.CKEDITOR_FILER,\n 'CKEDITOR_BROWSER_URL': ml_settings.CKEDITOR_BROWSER_URL\n }\n ))\n\n return \"Invalid data '%s'\" % value\n","sub_path":"multilingualfield/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"47487064","text":"import boto3\n\ndef boto3connection(access_key,secret_key,bucketname,w):\n print ('----------------------------------------------')\n host='https://aos.tcsecp.com'\n s3=boto3.resource('s3',aws_access_key_id=access_key,\naws_secret_access_key=secret_key, endpoint_url=host,)\n\n bucket=s3.Bucket(bucketname)\n print (' \\n Contents of ',bucketname)\n for obj in bucket.objects.filter():\n print('{0}:{1}'.format(bucket.name, obj.key))\n s3.create_bucket(Bucket=w)\n s3.Object(w,'apache1.conf').upload_file(Filename='apache.conf')\n\n print (' \\n\\n\\n The new bucket created :', w)\n#For Python3\nx = input('Enter your access key:')\ny = input('Enter your secret key:')\nz = input('Enter your existing bucket name to list contents:')\nw = input ('Enter a new bucket name to get created:')\nboto3connection(x,y,z,w)\n\n","sub_path":"env/bin/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"71594626","text":"import pygame\nfrom pygame.sprite import Sprite\n\n\nclass Barrier(Sprite):\n def __init__(self, screen, settings):\n super(Barrier, self).__init__()\n self.hit = 0\n self.image = pygame.image.load('images/barrier1.png')\n self.rect = self.image.get_rect()\n self.rect.x, self.rect.y = 50, 50\n\n self.screen = screen\n self.settings = settings\n\n def update(self, barriers):\n if self.hit == 0:\n self.image = pygame.image.load(\"images/barrier2.png\")\n self.hit += 1\n elif self.hit == 1:\n self.image = pygame.image.load(\"images/barrier3.png\")\n self.hit += 1\n elif self.hit == 2:\n barriers.remove(self)\n\n def drawBarrier(self):\n self.screen.blit(self.image, self.rect)\n","sub_path":"barrier.py","file_name":"barrier.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"216671576","text":"from web.db.entities import Entity\nfrom web.db.models import MessageDbModel\n\n\nclass Message(Entity):\n model_class = MessageDbModel\n\n def to_dict(self):\n dictionary = super().__getattr__('to_dict')()\n return {\"message\": dictionary}\n\n @classmethod\n def get_not_sent_serialized(cls):\n messages = cls._entities_from_query(cls.model_class.query.filter(\n cls.model_class.sent.is_(False)))\n\n serialized = []\n for message in messages:\n serialized.append(\n {\n 'id': message.id,\n 'name': message.name,\n 'email': message.email,\n 'telephone': message.telephone,\n 'content': message.content\n }\n )\n return serialized\n","sub_path":"web/db/entities/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"546336122","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\n\"\"\"\nABOUT: \n- Reads price values from text files\n- Plots the values\n- Saves figures as .png\n\"\"\"\n# Gets folder above relative to this file\ndir_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\n\n# Reads title from file and creates the filename\ntitle = open(dir_path + '\\\\data\\\\title.txt').read()\nfilename = (\"figures\\\\\" + title[0:title.find('simulations')]+'price.png').replace(' ', '_')\n\n# Reads values from files\nS, G = np.loadtxt(dir_path + '\\\\data\\\\option_price.txt', delimiter=',', unpack=True)\nS2, G2 = np.loadtxt(dir_path + '\\\\data\\\\MC_prices.txt', delimiter=',', unpack=True)\nS3, G3 = np.loadtxt(dir_path + '\\\\data\\\\MC_exact_prices.txt', delimiter=',', unpack=True)\n\n# Plots values and saves the figure as a .png file\nplt.plot(S, G, 'b+-', label='Fair option price')\nplt.plot(S2, G2, 'r+-', label='Monte Carlo Euler method price')\nplt.plot(S3, G3, 'g+-', label='Monte Carlo exact simulation price')\nplt.title(title, fontsize=18)\nplt.xlabel('Initial stock price', fontsize=14)\nplt.ylabel('Option price', fontsize=14)\nplt.xlim([min(S), max(S)])\nplt.grid()\nplt.legend(fontsize=14)\nfigure = plt.gcf()\nfigure.set_size_inches(16, 9)\nplt.savefig(filename, dpi=100, bbox_inches='tight')\nplt.show()\n\n","sub_path":"plot/plot_price.py","file_name":"plot_price.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"137754020","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport unittest, os\n\nfrom images_filters.enhanche_filter import EnhancheFilter\n\n\nclass ResizeFilterTest(unittest.TestCase):\n\n # def setUp(self):\n # self.instance = BaseFilter()\n\n def test_bad_constructor_raises_exception(self):\n\n bad_filter = \"NOT_A_VALID_FILTER\"\n with self.assertRaises(ValueError) as context:\n x = EnhancheFilter(bad_filter)\n\n self.assertTrue(\"Enhanchement %s not allowed\" % (bad_filter) in context.exception.message)\n","sub_path":"test/images_filters/enhanche_filter_test.py","file_name":"enhanche_filter_test.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"598323644","text":"from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, Reshape, Convolution2D, BatchNormalization, SpatialDropout2D, LeakyReLU\nfrom keras.layers.core import Activation\nfrom keras.layers.merge import concatenate\nfrom keras.models import Model\n\n\ndef double_conv_layer(x, size, dropout, batch_norm):\n conv = Conv2D(size, (3, 3), padding='same')(x)\n if batch_norm is True:\n conv = BatchNormalization()(conv)\n conv = Activation('relu')(conv)\n conv = Conv2D(size, (3, 3), padding='same')(conv)\n if batch_norm is True:\n conv = BatchNormalization()(conv)\n conv = Activation('relu')(conv)\n if dropout > 0:\n conv = SpatialDropout2D(dropout)(conv)\n return conv\n\n\ndef double_dilated_conv_layer(x, size, dilation_rate, dropout: float, batch_norm: bool, activation='relu'):\n conv = Convolution2D(size, (3, 3), dilation_rate=dilation_rate, padding='same')(x)\n if batch_norm is True:\n conv = BatchNormalization()(conv)\n\n if activation == 'leaky_relu':\n conv = LeakyReLU()(conv)\n else:\n conv = Activation(activation)(conv)\n\n conv = Convolution2D(size, (3, 3), dilation_rate=dilation_rate, padding='same')(conv)\n if batch_norm is True:\n conv = BatchNormalization()(conv)\n\n if activation == 'leaky_relu':\n conv = LeakyReLU()(conv)\n else:\n conv = Activation(activation)(conv)\n\n if dropout > 0:\n conv = SpatialDropout2D(dropout)(conv)\n\n return conv\n\n\ndef DilatedUnet(dropout_val=0.1,\n filters=32,\n batch_norm=True,\n patch_size=224,\n input_channels=3,\n output_classes=1):\n inputs = Input((patch_size, patch_size, input_channels))\n\n conv_224 = double_conv_layer(inputs, filters, dropout_val, batch_norm)\n pool_112 = MaxPooling2D(pool_size=(2, 2), name='pool_112')(conv_224)\n\n conv_112 = double_conv_layer(pool_112, 2 * filters, dropout_val, batch_norm)\n pool_56 = MaxPooling2D(pool_size=(2, 2), name='pool_56')(conv_112)\n\n conv_56 = double_conv_layer(pool_56, 4 * filters, dropout_val, batch_norm)\n pool_28 = MaxPooling2D(pool_size=(2, 2))(conv_56)\n\n conv_28 = double_conv_layer(pool_28, 8 * filters, dropout_val, batch_norm)\n pool_14 = MaxPooling2D(pool_size=(2, 2))(conv_28)\n\n conv_14 = double_conv_layer(pool_14, 16 * filters, dropout_val, batch_norm)\n pool_7 = MaxPooling2D(pool_size=(2, 2))(conv_14)\n\n conv_7 = double_conv_layer(pool_7, 32 * filters, dropout_val, batch_norm)\n\n up_14 = concatenate([UpSampling2D(size=(2, 2))(conv_7), conv_14])\n up_conv_14 = double_dilated_conv_layer(up_14, 16 * filters, 2, dropout_val, batch_norm)\n\n up_28 = concatenate([UpSampling2D(size=(2, 2))(up_conv_14), conv_28])\n up_conv_28 = double_dilated_conv_layer(up_28, 8 * filters, 2, dropout_val, batch_norm)\n\n up_56 = concatenate([UpSampling2D(size=(2, 2))(up_conv_28), conv_56])\n up_conv_56 = double_dilated_conv_layer(up_56, 4 * filters, 2, dropout_val, batch_norm)\n\n up_112 = concatenate([UpSampling2D(size=(2, 2))(up_conv_56), conv_112])\n up_conv_112 = double_dilated_conv_layer(up_112, 2 * filters, 3, dropout_val, batch_norm)\n\n up_224 = concatenate([UpSampling2D(size=(2, 2))(up_conv_112), conv_224])\n up_conv_224 = double_dilated_conv_layer(up_224, filters, 4, dropout_val, batch_norm)\n\n conv_final = Conv2D(output_classes, (1, 1))(up_conv_224)\n\n if output_classes == 1:\n conv_final = Activation('sigmoid')(conv_final)\n else:\n conv_final = Activation('softmax')(conv_final)\n\n model = Model(inputs, conv_final, name=\"DilatedUnet\")\n\n return model\n","sub_path":"lib/keras/dilated_unet.py","file_name":"dilated_unet.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"309351839","text":"from litesata.common import *\n\nfrom litex.gen.genlib.cdc import MultiReg\nfrom litex.gen.genlib.resetsync import AsyncResetSynchronizer\nfrom litex.gen.genlib.misc import WaitTimer\n\n\nclass K7LiteSATAPHYCRG(Module):\n def __init__(self, clock_pads_or_refclk, pads, gtx, revision, clk_freq):\n self.tx_reset = Signal()\n self.rx_reset = Signal()\n self.ready = Signal()\n self.cplllock = Signal()\n\n self.clock_domains.cd_sata_tx = ClockDomain()\n self.clock_domains.cd_sata_rx = ClockDomain()\n\n # CPLL\n # (sata_gen3) 150MHz / VCO @ 3GHz / Line rate @ 6Gbps\n # (sata_gen2 & sata_gen1) VCO still @ 3 GHz, Line rate is\n # decreased with output dividers.\n if isinstance(clock_pads_or_refclk, Signal):\n self.refclk = clock_pads_or_refclk\n else:\n self.refclk = Signal()\n clock_pads = clock_pads_or_refclk\n self.specials += Instance(\"IBUFDS_GTE2\",\n i_CEB=0,\n i_I=clock_pads.refclk_p,\n i_IB=clock_pads.refclk_n,\n o_O=self.refclk\n )\n\n self.comb += gtx.gtrefclk0.eq(self.refclk)\n\n # TX clocking\n # (sata_gen3) 150MHz from CPLL TXOUTCLK, sata_tx clk @ 300MHz (16-bits) / 150MHz (32-bits)\n # (sata_gen2) 150MHz from CPLL TXOUTCLK, sata_tx clk @ 150MHz (16-bits) / 75MHz (32-bits)\n # (sata_gen1) 150MHz from CPLL TXOUTCLK, sata_tx clk @ 75MHz (16-bits) / 37.5MHz (32-bits)\n mmcm_mult = 8.0\n mmcm_div_config = {\n \"sata_gen1\": 16.0*gtx.dw/16,\n \"sata_gen2\": 8.0*gtx.dw/16,\n \"sata_gen3\": 4.0*gtx.dw/16\n }\n mmcm_div = mmcm_div_config[revision]\n use_mmcm = mmcm_mult/mmcm_div != 1.0\n\n if use_mmcm:\n mmcm_reset = Signal()\n mmcm_locked_async = Signal()\n mmcm_locked = Signal()\n mmcm_fb = Signal()\n mmcm_clk_i = Signal()\n mmcm_clk0_o = Signal()\n self.specials += [\n Instance(\"BUFG\", i_I=gtx.txoutclk, o_O=mmcm_clk_i),\n Instance(\"MMCME2_ADV\",\n p_BANDWIDTH=\"HIGH\", p_COMPENSATION=\"ZHOLD\", i_RST=mmcm_reset, o_LOCKED=mmcm_locked_async,\n\n # DRP\n i_DCLK=0, i_DEN=0, i_DWE=0, #o_DRDY=,\n i_DADDR=0, i_DI=0, #o_DO=,\n\n # VCO\n p_REF_JITTER1=0.01, p_CLKIN1_PERIOD=6.66667,\n p_CLKFBOUT_MULT_F=mmcm_mult, p_CLKFBOUT_PHASE=0.000, p_DIVCLK_DIVIDE=1,\n i_CLKIN1=mmcm_clk_i, i_CLKFBIN=mmcm_fb, o_CLKFBOUT=mmcm_fb,\n\n # CLK0\n p_CLKOUT0_DIVIDE_F=mmcm_div, p_CLKOUT0_PHASE=0.000, o_CLKOUT0=mmcm_clk0_o,\n ),\n Instance(\"BUFG\", i_I=mmcm_clk0_o, o_O=self.cd_sata_tx.clk),\n MultiReg(mmcm_locked_async, mmcm_locked, \"sys\"),\n ]\n else:\n mmcm_locked = Signal(reset=1)\n mmcm_reset = Signal()\n self.specials += Instance(\"BUFG\", i_I=gtx.txoutclk, o_O=self.cd_sata_tx.clk)\n\n self.comb += [\n gtx.txusrclk.eq(self.cd_sata_tx.clk),\n gtx.txusrclk2.eq(self.cd_sata_tx.clk)\n ]\n\n # RX clocking\n # (sata_gen3) sata_rx recovered clk @ @ 300MHz (16-bits) / 150MHz (32-bits) from GTX RXOUTCLK\n # (sata_gen2) sata_rx recovered clk @ @ 150MHz (16-bits) / 75MHz (32-bits) from GTX RXOUTCLK\n # (sata_gen1) sata_rx recovered clk @ @ 75MHz (16-bits) / 37.5MHz (32-bits) from GTX RXOUTCLK\n self.specials += [\n Instance(\"BUFG\", i_I=gtx.rxoutclk, o_O=self.cd_sata_rx.clk),\n ]\n self.comb += [\n gtx.rxusrclk.eq(self.cd_sata_rx.clk),\n gtx.rxusrclk2.eq(self.cd_sata_rx.clk)\n ]\n\n # Configuration Reset\n # After configuration, GTX's resets have to stay low for at least 500ns\n # See AR43482\n startup_cycles = ceil(500*clk_freq/1000000000)\n startup_timer = WaitTimer(startup_cycles)\n self.submodules += startup_timer\n self.comb += startup_timer.wait.eq(~(self.tx_reset | self.rx_reset))\n\n # TX Startup FSM\n self.tx_ready = Signal() \n self.gttxreset = Signal()\n self.cpllreset = Signal()\n self.txuserrdy = Signal()\n self.tx_startup_fsm = tx_startup_fsm = ResetInserter()(FSM(reset_state=\"IDLE\"))\n self.submodules += tx_startup_fsm\n\n txphaligndone = Signal(reset=1)\n txphaligndone_rising = Signal()\n self.sync += txphaligndone.eq(gtx.txphaligndone)\n self.sync += gtx.gttxreset.eq(self.gttxreset)\n self.sync += gtx.cpllreset.eq(self.cpllreset)\n self.sync += gtx.txuserrdy.eq(self.txuserrdy)\n self.comb += txphaligndone_rising.eq(gtx.txphaligndone & ~txphaligndone)\n\n # Wait 500ns of AR43482\n tx_startup_fsm.act(\"IDLE\",\n If(startup_timer.done,\n NextState(\"RESET_ALL\")\n )\n )\n # Reset CPLL, MMCM, GTX\n tx_startup_fsm.act(\"RESET_ALL\",\n self.cpllreset.eq(1),\n mmcm_reset.eq(1),\n self.gttxreset.eq(1),\n If(~self.cplllock,\n NextState(\"RELEASE_CPLL\")\n )\n )\n # Release CPLL reset and wait for lock\n tx_startup_fsm.act(\"RELEASE_CPLL\",\n mmcm_reset.eq(1),\n self.gttxreset.eq(1),\n If(self.cplllock,\n NextState(\"RELEASE_MMCM\")\n )\n )\n # Release MMCM reset and wait for lock\n tx_startup_fsm.act(\"RELEASE_MMCM\",\n self.gttxreset.eq(1),\n If(mmcm_locked,\n NextState(\"RELEASE_GTX\")\n )\n )\n # Release GTX reset and wait for GTX resetdone\n # (from UG476, GTX is reseted on falling edge\n # of gttxreset)\n tx_startup_fsm.act(\"RELEASE_GTX\",\n self.txuserrdy.eq(1),\n If(gtx.txresetdone,\n NextState(\"ALIGN\")\n )\n )\n # Start Delay alignment (Pulse)\n tx_startup_fsm.act(\"ALIGN\",\n self.txuserrdy.eq(1),\n gtx.txdlyreset.eq(1),\n NextState(\"WAIT_ALIGN\")\n )\n # Wait Delay alignment\n tx_startup_fsm.act(\"WAIT_ALIGN\",\n self.txuserrdy.eq(1),\n If(gtx.txdlyresetdone,\n NextState(\"WAIT_FIRST_ALIGN_DONE\")\n )\n )\n # Wait 2 rising edges of txphaligndone\n # (from UG476 in buffer bypass config)\n tx_startup_fsm.act(\"WAIT_FIRST_ALIGN_DONE\",\n self.txuserrdy.eq(1),\n If(txphaligndone_rising,\n NextState(\"WAIT_SECOND_ALIGN_DONE\")\n )\n )\n tx_startup_fsm.act(\"WAIT_SECOND_ALIGN_DONE\",\n self.txuserrdy.eq(1),\n If(txphaligndone_rising,\n NextState(\"READY\")\n )\n )\n tx_startup_fsm.act(\"READY\",\n self.txuserrdy.eq(1),\n self.tx_ready.eq(1)\n )\n\n tx_ready_timer = WaitTimer(2*clk_freq//1000)\n self.submodules += tx_ready_timer\n self.comb += [\n tx_ready_timer.wait.eq(~self.tx_ready & ~tx_startup_fsm.reset),\n tx_startup_fsm.reset.eq(self.tx_reset | tx_ready_timer.done),\n ]\n\n\n # RX Startup FSM\n self.rx_ready = Signal() \n self.gtrxreset = Signal()\n self.rxuserrdy = Signal()\n self.rx_startup_fsm = rx_startup_fsm = ResetInserter()(FSM(reset_state=\"IDLE\"))\n self.submodules += rx_startup_fsm\n\n cdr_stable_timer = WaitTimer(1024)\n self.submodules += cdr_stable_timer\n\n rxphaligndone = Signal(reset=1)\n rxphaligndone_rising = Signal()\n self.sync += rxphaligndone.eq(gtx.rxphaligndone)\n self.sync += gtx.gtrxreset.eq(self.gtrxreset)\n self.sync += gtx.rxuserrdy.eq(self.rxuserrdy)\n self.comb += rxphaligndone_rising.eq(gtx.rxphaligndone & ~rxphaligndone)\n\n # Wait 500ns of AR43482\n rx_startup_fsm.act(\"IDLE\",\n If(startup_timer.done,\n NextState(\"RESET_GTX\")\n )\n )\n # Reset GTX\n rx_startup_fsm.act(\"RESET_GTX\",\n self.gtrxreset.eq(1),\n If(~self.gttxreset,\n NextState(\"WAIT_CPLL\")\n )\n )\n # Wait for CPLL lock\n rx_startup_fsm.act(\"WAIT_CPLL\",\n self.gtrxreset.eq(1),\n If(self.cplllock,\n NextState(\"RELEASE_GTX\")\n )\n )\n # Release GTX reset and wait for GTX resetdone\n # (from UG476, GTX is reseted on falling edge\n # of gttxreset)\n rx_startup_fsm.act(\"RELEASE_GTX\",\n self.rxuserrdy.eq(1),\n cdr_stable_timer.wait.eq(1),\n If(gtx.rxresetdone & cdr_stable_timer.done,\n NextState(\"ALIGN\")\n )\n )\n # Start Delay alignment (Pulse)\n rx_startup_fsm.act(\"ALIGN\",\n self.rxuserrdy.eq(1),\n gtx.rxdlyreset.eq(1),\n NextState(\"WAIT_ALIGN\")\n )\n # Wait Delay alignment\n rx_startup_fsm.act(\"WAIT_ALIGN\",\n self.rxuserrdy.eq(1),\n If(gtx.rxdlyresetdone,\n NextState(\"WAIT_FIRST_ALIGN_DONE\")\n )\n )\n # Wait 2 rising edges of rxphaligndone\n # (from UG476 in buffer bypass config)\n rx_startup_fsm.act(\"WAIT_FIRST_ALIGN_DONE\",\n self.rxuserrdy.eq(1),\n If(rxphaligndone_rising,\n NextState(\"WAIT_SECOND_ALIGN_DONE\")\n )\n )\n rx_startup_fsm.act(\"WAIT_SECOND_ALIGN_DONE\",\n self.rxuserrdy.eq(1),\n If(rxphaligndone_rising,\n NextState(\"READY\")\n )\n )\n rx_startup_fsm.act(\"READY\",\n self.rxuserrdy.eq(1),\n self.rx_ready.eq(1)\n )\n\n rx_ready_timer = WaitTimer(2*clk_freq//1000)\n self.submodules += rx_ready_timer\n self.comb += [\n rx_ready_timer.wait.eq(~self.rx_ready & ~rx_startup_fsm.reset),\n rx_startup_fsm.reset.eq(self.rx_reset | rx_ready_timer.done),\n ]\n\n # Ready\n self.comb += self.ready.eq(self.tx_ready & self.rx_ready)\n\n # Reset for SATA TX/RX clock domains\n self.specials += [\n AsyncResetSynchronizer(self.cd_sata_tx, ~(gtx.cplllock & mmcm_locked) | self.tx_reset),\n AsyncResetSynchronizer(self.cd_sata_rx, ~gtx.cplllock | self.rx_reset),\n MultiReg(gtx.cplllock, self.cplllock, \"sys\"),\n ]\n","sub_path":"litesata/phy/k7/crg.py","file_name":"crg.py","file_ext":"py","file_size_in_byte":10637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"133797580","text":"from typing import TypeVar\nfrom data_structures.list_node import ListNode\n\nT = TypeVar('T')\n\n\ndef get_list_values(head: T):\n node = head\n values = []\n while node:\n values.append(node.val)\n node = node.next\n return values\n\n\ndef get_cyclic_list_values(head: ListNode):\n if not head:\n return []\n\n values = [head.val]\n node = head.next\n\n while node is not head:\n values.append(node.val)\n node = node.next\n return values\n","sub_path":"test_helpers/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"82425070","text":"#\n# @lc app=leetcode.cn id=8 lang=python3\n#\n# [8] 字符串转换整数 (atoi)\n#\n# https://leetcode-cn.com/problems/string-to-integer-atoi/description/\n#\n# algorithms\n# Medium (16.50%)\n# Total Accepted: 32.1K\n# Total Submissions: 194.2K\n# Testcase Example: '\"42\"'\n#\n# 请你来实现一个 atoi 函数,使其能将字符串转换成整数。\n#\n# 首先,该函数会根据需要丢弃无用的开头空格字符,直到寻找到第一个非空格的字符为止。\n#\n#\n# 当我们寻找到的第一个非空字符为正或者负号时,则将该符号与之后面尽可能多的连续数字组合起来,作为该整数的正负号;假如第一个非空字符是数字,则直接将其与之后连续的数字字符组合起来,形成整数。\n#\n# 该字符串除了有效的整数部分之后也可能会存在多余的字符,这些字符可以被忽略,它们对于函数不应该造成影响。\n#\n# 注意:假如该字符串中的第一个非空格字符不是一个有效整数字符、字符串为空或字符串仅包含空白字符时,则你的函数不需要进行转换。\n#\n# 在任何情况下,若函数不能进行有效的转换时,请返回 0。\n#\n# 说明:\n#\n# 假设我们的环境只能存储 32 位大小的有符号整数,那么其数值范围为 [−2^31,  2^31 − 1]。如果数值超过这个范围,qing返回\n# INT_MAX (2^31 − 1) 或 INT_MIN (−2^31) 。\n#\n# 示例 1:\n#\n# 输入: \"42\"\n# 输出: 42\n#\n#\n# 示例 2:\n#\n# 输入: \" -42\"\n# 输出: -42\n# 解释: 第一个非空白字符为 '-', 它是一个负号。\n# 我们尽可能将负号与后面所有连续出现的数字组合起来,最后得到 -42 。\n#\n#\n# 示例 3:\n#\n# 输入: \"4193 with words\"\n# 输出: 4193\n# 解释: 转换截止于数字 '3' ,因为它的下一个字符不为数字。\n#\n#\n# 示例 4:\n#\n# 输入: \"words and 987\"\n# 输出: 0\n# 解释: 第一个非空字符是 'w', 但它不是数字或正、负号。\n# ⁠ 因此无法执行有效的转换。\n#\n# 示例 5:\n#\n# 输入: \"-91283472332\"\n# 输出: -2147483648\n# 解释: 数字 \"-91283472332\" 超过 32 位有符号整数范围。\n# 因此返回 INT_MIN (−2^31) 。\n#\n#\n#\nclass Solution:\n\n def __init__(self):\n self.nums = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}\n\n def myAtoi(self, string: str):\n\n # 检测序列是否符合要求\n status_0 = False\n for char in self.nums:\n if char in string:\n status_0 = True\n break\n\n if not status_0:\n return 0\n\n # 去除空字符\n string = string.replace(' ', '')\n string = string.replace('\"', '')\n string = string.replace('\"\"', '')\n\n if string[0] not in {'-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}:\n return 0\n\n status_1 = False # 设置状态检测是否为负数.. 默认为否\n if string[0] == '-':\n status_1 = True\n\n # 除去无关字符\n # new_string = ''.join([s for s in string if s in self.nums])\n num_list = []\n for i, s in enumerate(string):\n if s in self.nums:\n num_list.append(s)\n break\n\n for s in string[i + 1:]:\n if s not in self.nums:\n break\n num_list.append(s)\n\n new_string = ''.join(num_list)\n Max = 1 << 31\n if not status_1:\n new_num = int(new_string)\n if new_num > Max - 1:\n return Max - 1\n\n else:\n new_num = -int(new_string)\n if new_num < -Max:\n return -Max\n\n return new_num\n\n\nS = Solution()\nres = S.myAtoi('+-2')\n# 标准 2\nprint(res)\n","sub_path":"leetcode/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"56892790","text":"from deepaffects.realtime.util import get_deepaffects_client, chunk_generator_from_file, chunk_generator_from_url\n\nTIMEOUT_SECONDS = 10000\napikey = \"YOUR_API_KEY\"\nfile_path = \"FILE_PATH\"\nis_youtube_url = False\nlanguageCode = \"en-Us\"\nsampleRate = \"16000\"\nencoding = \"wav\"\nspeakerIds = \"list of userids for for speaker verification seperated by ','\"\nverbose = \"True\"\n# DeepAffects realtime Api client\nclient = get_deepaffects_client()\n\nmetadata = [\n ('apikey', apikey),\n ('encoding', encoding),\n ('speakerids', speakerIds),\n ('samplerate', sampleRate),\n ('languagecode', languageCode),\n ('verbose', verbose)\n]\n\n# Implement chunk_generator() is a generator function which yields segment_chunk objects asynchronously\n# from deepaffects.realtime.types import segment_chunk\n# yield segment_chunk(Args)\n\"\"\"segment_chunk.\n\nArgs:\n encoding : Audio Encoding,\n languageCode: language code ,\n sampleRate: sample rate of audio ,\n content: base64 encoded audio,\n segmentOffset: offset of the segment in complete audio stream\n\"\"\"\n\n\"\"\"\nSample implementation which reads audio from a file and splits it into\nsegments more than 3 sec\nAudioSegment and yields base64 encoded audio segment objects asynchronously\n\"\"\"\n\n\"\"\"Stream audio from url or youtube.\n\nresponses = client.DiarizeEmotion(\n chunk_generator_from_url(file_path, is_youtube_url=is_youtube_url), TIMEOUT_SECONDS, metadata=metadata)\n\"\"\"\n\n\"\"\"Stream audio from local file.\n\"\"\"\nresponses = client.DiarizeEmotion(\n chunk_generator_from_file(file_path), TIMEOUT_SECONDS, metadata=metadata)\n\n\n# responses is the iterator for all the response values\nfor response in responses:\n print(\"Received message\")\n print(response)\n\n\"\"\"Response.\n response = {\n userId: userId of the speaker identified in the segment,\n emotion: Emotion identified in the segment,\n start: start of the segment,\n end: end of the segment\n }\n\"\"\"\n","sub_path":"examples/diarize_emotion_example.py","file_name":"diarize_emotion_example.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"182825490","text":"# -*- coding: utf-8 -*-\n\n\"\"\"This module contains functions for making namespace suggestions\"\"\"\n\nimport logging\n\nimport requests\nfrom fuzzywuzzy import process, fuzz\nfrom requests.compat import quote_plus\n\n__all__ = [\n 'get_user_ols_search_url',\n 'get_ols_suggestion',\n 'get_ols_search',\n 'help_suggest_name',\n]\n\nlog = logging.getLogger(__name__)\n\nOLS_USER_SEARCH_FMT = 'http://www.ebi.ac.uk/ols/search?q={}'\nOLS_MACHINE_SUGGESTION_FMT = 'http://www.ebi.ac.uk/ols/api/suggest?q={}'\nOLS_MACHINE_SEARCH_FMT = 'http://www.ebi.ac.uk/ols/api/search?q={}'\n\n\ndef get_user_ols_search_url(name):\n \"\"\"Gets the URL of the page a user should check when they're not sure about an entity's name\"\"\"\n return OLS_USER_SEARCH_FMT.format(quote_plus(name))\n\n\ndef get_ols_suggestion_url(name):\n return OLS_MACHINE_SUGGESTION_FMT.format(quote_plus(name))\n\n\ndef get_ols_suggestion(name):\n \"\"\"Gets suggestions from the Ontology Lookup Service for which name is best\"\"\"\n res = requests.get(get_ols_suggestion_url(quote_plus(name)))\n return res.json()\n\n\ndef get_ols_search_url(name):\n return OLS_MACHINE_SEARCH_FMT.format(name)\n\n\ndef get_ols_search(name):\n \"\"\"Performs a search with the Ontology Lookup Service\"\"\"\n res = requests.get(get_ols_search_url(quote_plus(name)))\n return res.json()\n\n\ndef help_suggest_name(namespace, name, metadata_parser, suggestion_cache):\n \"\"\"Helps populate a suggestion cache for missing names\n\n :param namespace: The namespace to search\n :type namespace: str\n :param name: The putative name in the namespace\n :type name: str\n :param metadata_parser: A metadata parser, which contains the namespace dictionary\n :type metadata_parser: pybel.parser.parse_metadata.MetadataParser\n :param suggestion_cache: A defaultdict of lists\n :type suggestion_cache: dict or defaultdict\n :return: \n \"\"\"\n if (namespace, name) in suggestion_cache:\n return suggestion_cache[namespace, name]\n\n if namespace not in metadata_parser.namespace_dict:\n raise ValueError('Namespace not cached: {}'.format(namespace))\n\n terms = set(metadata_parser.namespace_dict[namespace])\n\n for putative, _ in process.extract(name, terms, scorer=fuzz.partial_token_sort_ratio, limit=5):\n suggestion_cache[namespace, name].append(putative)\n\n return suggestion_cache[namespace, name]\n\n\nif __name__ == '__main__':\n from pybel.utils import get_bel_resource\n import os\n import json\n ptsd_ns_path = os.path.join(os.environ['OWNCLOUD_BASE'], 'namespaces', 'ptsd.belns')\n ns = get_bel_resource(ptsd_ns_path)\n\n c=0\n for name in ns['Values']:\n r = get_ols_search(name)\n\n print(name)\n if r['response']['numFound'] == 0:\n continue\n print(json.dumps(r['response']['docs'], indent=2))\n c+=1\n if c > 20:\n break\n\n","sub_path":"src/pybel_tools/recuration/suggestions.py","file_name":"suggestions.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"170956179","text":"#!/usr/bin/python3\n\"\"\" init flask files \"\"\"\n\n\nfrom os import getenv\nfrom models import storage\nfrom flask import Flask, jsonify\nfrom api.v1.views import app_views\nfrom flask_cors import CORS\n\n\napp = Flask(__name__)\nCORS(app, resources={r\"/api/*\": {\"origins\": \"0.0.0.0\"}})\napp.register_blueprint(app_views)\n\n\n@app.teardown_appcontext\ndef teardown_(exc):\n \"\"\"close session\"\"\"\n storage.close()\n\n\n@app.errorhandler(404)\ndef error_404(msj):\n \"\"\"Handler error 404\"\"\"\n msj = {\"error\": \"Not found\"}\n return(jsonify(msj), 404)\n\nif __name__ == \"__main__\":\n host = getenv('HBNB_API_HOST', '0.0.0.0')\n port = getenv('HBNB_API_PORT', '5000')\n app.run(host=host, port=port, threaded=True)\n","sub_path":"api/v1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"203138562","text":"# Program for Battleship\n\nimport string\nimport random\n\n\ndef read_field(path):\n \"\"\"\n (data) -> (dict)\n :param path - txt file with Battleship field\n :return field_dict - Battleship field dictionary\n Reads from path field and converts it into dict of coordinates\n \"\"\"\n with open(path, 'r', errors='ignore', encoding='utf-8') as f:\n lines = f.readlines()\n letters = list(string.ascii_uppercase)\n field_dict = dict()\n field = list()\n part_field = list()\n for line in lines:\n for ship in line:\n if ship != \"*\":\n part_field.append(0)\n else:\n part_field.append(1)\n field.append(part_field)\n part_field = []\n longest_line = sorted(field, key=lambda x:\n len(x), reverse=True)[0]\n for number in range(0, len(field)):\n while len(field[number]) != len(longest_line):\n field[number].append(0)\n field_dict[letters[number]] = field[number][:-1]\n\n return field_dict\n\n\ndef has_ship(field_dict, coordinates):\n \"\"\"\n (data, tuple) -> bool\n :param field_dict - Battleship field\n :param coordinates - tuple of coordinates\n Checks whether is ship on certain coordinate or not\n \"\"\"\n try:\n if field_dict.get(coordinates[0])[coordinates[1] - 1] == 'X':\n return 1\n else:\n return 0\n except IndexError:\n return - 1\n\n\ndef ship_size(field_dict, coordinates):\n \"\"\"\n (data, tuple) -> (tuple)\n :param field_dict - Battleship field\n :param coordinates - tuple of coordinates\n :return tuple of x length and y length of the ship\n Counts accurate ship size on certain coordinate\n \"\"\"\n letters = list(string.ascii_uppercase)\n num = field_dict.get(coordinates[0])\n index = coordinates[1] - 1\n count = 0\n if has_ship(field_dict, coordinates):\n for n in reversed(num[:index]):\n if n == 0:\n break\n else:\n count += 1\n horyzon_field = [x[index] for x in field_dict.values()]\n ship_index = letters.index(coordinates[0])\n if all(item == 'X' for item in\n num[index: index + 4]) and \\\n len(num[index: index + 4]) == 4:\n res = 4 + count\n return [(4 + count, 1), res]\n elif all(item == 'X' for item in num[index:\n index + 3]) and \\\n len(num[index: index + 3]) == 3:\n res = 3 + count\n return [(3 + count, 1), res]\n elif all(item == 'X' for item in num[index:\n index + 2]) and \\\n len(num[index: index + 2]) == 2:\n res = 2 + count\n return [(2 + count, 1), res]\n elif all(item == 'X' for item in num[index: index + 1]):\n value = 0\n for s in horyzon_field[ship_index:]:\n if s == 0:\n break\n else:\n value += 1\n for number in reversed(horyzon_field[:ship_index]):\n if number == 0:\n break\n else:\n value += 1\n res = value + count\n return [(1, value + count), res]\n\n\ndef is_valid(field_dict):\n \"\"\"\n (data) -> (bool)\n :param field_dict - Battleship dictionary\n :return bool - True or False\n Checks if converted field is valid for battleship game\n \"\"\"\n letters = list(string.ascii_uppercase)\n four_ship = 0\n three_ship = 0\n two_ship = 0\n one_ship = 0\n new_list = [x for x in field_dict.values()]\n for k in range(0, len(new_list)):\n for y in range(0, len(new_list[k]) + 1):\n try:\n coordinates = (letters[k], y)\n res = ship_size(field_dict, coordinates)\n if res[1] == 1:\n one_ship += 1\n elif res[1] == 2:\n two_ship += 1\n elif res[1] == 3:\n three_ship += 1\n elif res[1] == 4:\n four_ship += 1\n except TypeError:\n continue\n return True if one_ship == 4 and \\\n int(two_ship / 2) == 3 and \\\n int(three_ship / 3) == 2 and \\\n int(four_ship / 4) == 1 else False\n\n\ndef is_horizon(field_generator, letters, letter, index, let):\n \"\"\"\n (dict, list, int, int, int) -> (bool)\n :param field_generator - Battleship field\n :param letters - list of ascii letters\n :param letter - index of letter in list of letters\n :param index - random chosen index in range len field\n :param let - ship size\n :return bool - True or False\n Checks whether random chosen coordinates are suitable for ship with\n len let\n \"\"\"\n a = index\n if letter != 0 and letter != 9:\n if all(item != 'X' for item in\n field_generator.get(letters[letter])[index:\n index + let + 1]):\n if all(item != 'X' for item in\n field_generator.get(letters[letter + 1])[index:\n a + let + 1]):\n if all(item != 'X' for item in\n field_generator.get(letters[letter - 1])[index:\n a + let + 1]):\n if field_generator.get(letters[letter])[index - 1] != 1:\n return True\n elif letter == 9:\n if all(item != 'X' for item in\n field_generator.get(letters[letter])[index:\n index + let + 1]) and \\\n all(item != 'X' for item in\n field_generator.get(letters[letter - 1])[index:\n a + let +\n 1]) and \\\n field_generator.get(letters[letter])[index - 1] != 1:\n return True\n else:\n if all(item != 'X' for item in\n field_generator.get(letters[letter])[index: index + let + 1]):\n if all(item != 'X' for item in\n field_generator.get(letters[letter + 1])[index:\n index + let + 1]):\n if field_generator.get(letters[letter])[index - 1] != 1:\n return True\n\n\ndef is_vertical(field_generator, letter, index, let):\n \"\"\"\n (dict, int, int, int) -> (bool)\n :param field_generator - Battleship field\n :param letter - index if letter in list if letters\n :param index - random chosen index in range len field\n :param let - ship size\n :return bool - True or False\n Checks whether random chosen coordinates are suitable for ship with len let\n \"\"\"\n try:\n if index != 0 and index != 9:\n checker_1 = [item[index - 1] for item in\n field_generator.values()]\n checker_2 = [item[index + 1] for item in\n field_generator.values()]\n l = [item[index] for item in\n field_generator.values()]\n if all(item != 'X' for item in\n checker_1[letter - 1: letter + let]) and \\\n all(item != 'X' for item in\n checker_2[letter - 1: letter + let]) and \\\n all(item != 'X' for item in\n l[letter - 1: letter + let]):\n return True\n\n elif index == 9:\n checker_1 = [item[index - 1] for item in field_generator.values()]\n l = [item[index] for item in field_generator.values()]\n if all(item != 'X' for item in checker_1[letter - 1:\n letter + let]) and \\\n all(item != 'X' for item in l[letter - 1: letter + let]):\n return True\n else:\n checker_2 = [item[index + 1] for item in field_generator.values()]\n l = [item[index] for item in field_generator.values()]\n if all(item != 'X' for item in checker_2[letter - 1:\n letter + let]) and \\\n all(item != 'X' for item in l[letter - 1: letter + let]):\n return True\n\n except:\n return False\n\n\ndef generate_field():\n \"\"\"\n (None) -> (dict)\n :return field_generator - randomly generated field for game\n :return list of ship information\n Generetes field for battleship game in random\n \"\"\"\n count = 0\n i = 0\n lst = []\n letters = list(string.ascii_uppercase)[:10]\n field_generator = dict()\n while i < 10:\n while count < 10:\n lst.append(0)\n count += 1\n field_generator[letters[i]] = lst\n count = 0\n lst = []\n i += 1\n bow = []\n ship_size = [1, 1, 1, 1, 2, 2, 2, 3, 3, 4]\n ship_type = ['vertical', 'horizon']\n while len(ship_size) != 0:\n try:\n # choosing random\n let = ship_size.pop()\n letter = random.randrange(0, len(letters))\n index = random.randrange(0, 9)\n t = random.choice(ship_type)\n if t == 'horizon':\n a = field_generator.get(letters[letter])[index + let - 1]\n if is_horizon(field_generator, letters, letter, index, let):\n i = 0\n ship = 'X'\n while i < let:\n field_generator.get(letters[letter])[index + i] = ship\n i += 1\n bow.append([(letters[letter], index + 1),\n (let, 1), True, []])\n else:\n ship_size.append(let)\n continue\n else:\n vertaical_list = [item[index] for item in\n field_generator.values()][letter + let - 1]\n if is_vertical(field_generator, letter, index, let):\n i = 0\n ship = 'X'\n while i < let:\n field_generator.get(letters[letter + i])[index] = ship\n i += 1\n bow.append([(letters[letter], index + 1),\n (1, let), False, []])\n else:\n ship_size.append(let)\n except IndexError:\n ship_size.append(let)\n continue\n return field_generator, bow\n\n\ndef field_checker():\n \"\"\"\n (None) -> (None)\n Checks whether random generated field is valid\n \"\"\"\n field, bow = generate_field()\n while not is_valid(field):\n field, bow = generate_field()\n return field, bow\n\n\ndef field_to_str(field):\n \"\"\"\n (data) -> (str)\n :param field - Battleship field\n :return field_str - Edited field for user\n This function converts a field in list of lists format(matrix)\n to string format\n \"\"\"\n data = [x for x in field.values()]\n line = \" *******************************************\\n\"\n nums_str = \" 1 2 3 4 5 6 7 8 9 10\\n\"\n field_str = \"\"+nums_str+line\n letters = list(string.ascii_uppercase)[:10]\n for i in range(len(data)):\n field_str += \" \" + letters[i] + \"|\"\n for j in range(len(data[i])):\n field_str += \" \" + str(data[i][j]) + \"|\"\n field_str += \"\\n\"\n field_str += line\n return field_str\n\n\ndef main():\n \"\"\"\n (None) -> (None)\n Main function to run the program\n \"\"\"\n field, bow = field_checker()\n field_to_str(field)\n\n\nmain()\n","sub_path":"part_1.py","file_name":"part_1.py","file_ext":"py","file_size_in_byte":11826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"469621534","text":"#\n# [406] Queue Reconstruction by Height\n#\n# https://leetcode.com/problems/queue-reconstruction-by-height/description/\n#\n# algorithms\n# Medium (56.78%)\n# Total Accepted: 53.5K\n# Total Submissions: 94.2K\n# Testcase Example: '[[7,0],[4,4],[7,1],[5,0],[6,1],[5,2]]'\n#\n# Suppose you have a random list of people standing in a queue. Each person is\n# described by a pair of integers (h, k), where h is the height of the person\n# and k is the number of people in front of this person who have a height\n# greater than or equal to h. Write an algorithm to reconstruct the queue.\n#\n#\n# Note:\n# The number of people is less than 1,100.\n#\n#\n#\n#\n# Example\n#\n# Input:\n# [[7,0], [4,4], [7,1], [5,0], [6,1], [5,2]]\n#\n# Output:\n# [[5,0], [7,0], [5,2], [6,1], [4,4], [7,1]]\n#\n#\n#\n\n# Jarron:\n# - sort by (-height, k)\n# - insert by order, the later will not effect k\n# - O(N^2), insert take n\n\n\n# REVIEW:\n# - because all the element is higher than current, just insert to k-position\n\n\nclass Solution:\n def reconstructQueue(self, people):\n \"\"\"\n :type people: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n\n people.sort(key=lambda x: (-x[0], x[1]))\n\n ret = []\n for pair in people:\n height, k = pair\n ret.insert(k, pair)\n return ret\n","sub_path":"src/406.queue-reconstruction-by-height.python3.py","file_name":"406.queue-reconstruction-by-height.python3.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"211327902","text":"import sys\n\nsys.stdin = open('input.txt', 'r')\n\nR, C, T = map(int, input().split())\narea = [list(map(int, input().split())) for _ in range(R)]\n\ndust = []\ncleaner = []\n\nfor i in range(R):\n for j in range(C):\n if area[i][j] == -1:\n cleaner.append([i, j])\n else:\n dust.append([i, j])\n\nd = [(0, 1), (0, -1), (1, 0), (-1, 0)]\nsec = 0\nwhile sec != T:\n # 시간 증가\n sec += 1\n\n # 1.미세먼지의 확산 - 현재 dust가 빌때까지 돈다\n diffusion_after_adding = {}\n while dust:\n i, j = dust.pop()\n diffusion_count = 0\n dust_amount = area[i][j]\n diffusion_amount = dust_amount // 5\n if diffusion_amount > 0:\n for dx, dy in d:\n ni = i + dx\n nj = j + dy\n\n if 0 <= ni < R and 0 <= nj < C:\n if area[ni][nj] != -1: # 공기청정기자리가 아닐때.\n diffusion_count += 1 # 확산횟수 증가\n\n # 동시에 발생. -> 나중에 끝나고 합산\n if diffusion_after_adding.get((ni, nj)):\n diffusion_after_adding[(ni, nj)] += diffusion_amount\n else:\n diffusion_after_adding[(ni, nj)] = diffusion_amount\n\n # 모두 확산 후 자신을 감소\n area[i][j] = area[i][j] - (diffusion_amount) * diffusion_count\n\n # 2. 미세먼지 확산 이후 증가 더하기\n for key in diffusion_after_adding.keys():\n i, j = key\n area[i][j] += diffusion_after_adding[key]\n\n # 3. 공기청정기의 가동\n top_cleaner = cleaner[0]\n bottom_cleaner = cleaner[1]\n\n ti, tj = top_cleaner\n for i in range(ti - 1, 0, -1):\n area[i][0] = area[i - 1][0]\n\n for j in range(0, C - 1):\n area[0][j] = area[0][j + 1]\n\n for i in range(0, ti):\n area[i][C - 1] = area[i + 1][C - 1]\n\n for j in range(C - 1, 0, -1):\n area[ti][j] = area[ti][j - 1]\n area[ti][tj + 1] = 0\n\n bi, bj = bottom_cleaner\n for i in range(bi + 1, R - 1):\n area[i][0] = area[i + 1][0]\n\n for j in range(0, C - 1):\n area[R - 1][j] = area[R - 1][j + 1]\n\n for i in range(R - 1, bi, -1):\n area[i][C - 1] = area[i - 1][C - 1]\n\n for j in range(C - 1, 1, -1):\n area[bi][j] = area[bi][j - 1]\n area[bi][bj + 1] = 0\n\n # 공기청정기 순환 후 dust 교체\n new_dust = []\n for i in range(R):\n for j in range(C):\n if area[i][j] > 0:\n new_dust.append([i, j])\n dust = new_dust\n\ndust_all = 0\nfor i in range(R):\n for j in range(C):\n if area[i][j] > 0:\n dust_all += area[i][j]\nprint(dust_all)\n","sub_path":"PYTHON/BAEKJOON/17144_미세먼지안녕/17144.py","file_name":"17144.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"29227025","text":"import asyncio\nfrom abc import ABCMeta\nfrom collections.abc import MutableMapping\n\nfrom aiohttp import web\nfrom aiohttp.web_request import Request\nfrom aiohttp_session import get_session\nfrom collections.abc import Sequence\n\nAIOLOGIN_KEY = '__aiologin__'\n\nON_LOGIN = 1\nON_LOGOUT = 2\nON_AUTHENTICATED = 3\nON_FORBIDDEN = 4\nON_UNAUTHORIZED = 5\n\n\nclass AbstractUser(MutableMapping, metaclass=ABCMeta):\n def __iter__(self):\n return self.__dict__.__iter__()\n\n def __len__(self):\n return len(self.__dict__)\n\n def __getitem__(self, key):\n return getattr(self, key)\n\n def __setitem__(self, key, value):\n setattr(self, key, value)\n\n def __delitem__(self, key):\n delattr(self, key)\n\n @property\n def authenticated(self):\n raise NotImplemented()\n\n @property\n def forbidden(self):\n raise NotImplemented()\n\n\nclass AnonymousUser(AbstractUser):\n @property\n def authenticated(self):\n return False\n\n @property\n def forbidden(self):\n return False\n\n\n# noinspection PyUnusedLocal\n@asyncio.coroutine\ndef _unauthorized(*args, **kwargs):\n raise web.HTTPUnauthorized()\n\n\n# noinspection PyUnusedLocal\n@asyncio.coroutine\ndef _forbidden(*args, **kwargs):\n raise web.HTTPForbidden()\n\n\n# noinspection PyUnusedLocal\n@asyncio.coroutine\ndef _void(*args, **kwargs):\n raise NotImplemented()\n\n\nclass AioLogin:\n def __init__(self, request, session_name=AIOLOGIN_KEY, disabled=False,\n auth_by_form=_void, auth_by_header=_void,\n auth_by_session=_void, forbidden=_forbidden,\n unauthorized=_unauthorized, anonymous_user=AnonymousUser,\n session=get_session, signals=None):\n\n self._request = request\n self._disabled = disabled\n self._session_name = session_name\n\n self._anonymous_user = anonymous_user\n self._session = session\n\n self._auth_by_form = auth_by_form\n self._auth_by_header = auth_by_header\n self._auth_by_session = auth_by_session\n self._unauthorized = unauthorized\n self._forbidden = forbidden\n\n self._on_login = []\n self._on_logout = []\n self._on_authenticated = []\n self._on_forbidden = []\n self._on_unauthorized = []\n\n assert isinstance(signals, (type(None), Sequence)), \\\n \"Excepted {!r} but received {!r}\".format(Sequence, signals)\n\n signals = [] if signals is None else signals\n for sig in signals:\n assert isinstance(sig, Sequence), \\\n \"Excepted {!r} but received {!r}\".format(Sequence, signals)\n is_coro = asyncio.iscoroutinefunction(sig[1])\n assert len(sig) == 2 and 1 <= sig[0] <= 7 and is_coro, \\\n \"Incorrectly formatted signal argument {}\".format(sig)\n\n if sig[0] == 1:\n self._on_login.append(sig[1])\n elif sig[0] == 2:\n self._on_logout.append(sig[1])\n elif sig[0] == 3:\n self._on_authenticated.append(sig[1])\n elif sig[0] == 4:\n self._on_forbidden.append(sig[1])\n elif sig[0] == 5:\n self._on_unauthorized.append(sig[1])\n\n @asyncio.coroutine\n def authenticate(self, *args, remember=False, **kwargs):\n assert isinstance(remember, bool), \\\n \"Expected {!r} but received {!r}\".format(type(bool), type(remember))\n user = yield from self._auth_by_form(self._request, *args, **kwargs)\n if user is None:\n for coro in self._on_unauthorized:\n yield from coro(self._request)\n raise web.HTTPUnauthorized\n for coro in self._on_authenticated:\n yield from coro(self._request)\n yield from self.login(user, remember=remember)\n\n @asyncio.coroutine\n def login(self, user, remember):\n assert isinstance(user, AbstractUser), \\\n \"Expected {} but received {}\".format(type(AbstractUser), type(user))\n assert isinstance(remember, bool), \\\n \"Expected {!r} but received {!r}\".format(type(bool), type(remember))\n session = yield from self._session(self._request)\n try:\n session.remember = remember\n except:\n session['_remember'] = remember\n session[self._session_name] = dict(user)\n for coro in self._on_login:\n yield from coro(self._request)\n\n @asyncio.coroutine\n def logout(self):\n session = yield from self._session(self._request)\n session.invalidate()\n for coro in self._on_logout:\n yield from coro(self._request)\n\n @asyncio.coroutine\n def auth_by_header(self):\n key = self._request.headers.get('AUTHORIZATION', None)\n if key is None:\n return None\n return (yield from self._auth_by_header(self._request, key))\n\n @asyncio.coroutine\n def auth_by_session(self):\n session = yield from self._session(self._request)\n profile = session.get(self._session_name, None)\n if profile is None:\n return None\n user = yield from self._auth_by_session(self._request, profile)\n if user is None:\n return None\n return user\n\n @property\n def on_login(self):\n return self._on_login\n\n @property\n def disabled(self):\n return self._disabled\n\n @property\n def unauthorized(self):\n return self._unauthorized\n\n @property\n def forbidden(self):\n return self._forbidden\n\n @property\n def anonymous_user(self):\n return self._anonymous_user\n\n\ndef setup(app, **kwargs):\n app.middlewares.append(middleware_factory(**kwargs))\n\n\ndef middleware_factory(**options):\n # noinspection PyUnusedLocal\n @asyncio.coroutine\n def aiologin_middleware(app, handler):\n @asyncio.coroutine\n def aiologin_handler(*args, **kwargs):\n request = kwargs['request'] if 'request' in kwargs else args[0]\n kwargs = {k: v for (k, v) in kwargs.items() if k != 'request'}\n\n # noinspection PyTypeChecker\n manager = options.get('manager', AioLogin)\n request.aiologin = manager(request=request, **options)\n return (yield from handler(request=request, **kwargs))\n\n return aiologin_handler\n\n return aiologin_middleware\n\n\ndef secured(func):\n @asyncio.coroutine\n def wrapper(*args, **kwargs):\n request = kwargs['request'] if 'request' in kwargs else args[0]\n kwargs = {k: v for (k, v) in kwargs.items() if k != 'request'}\n if not isinstance(request, Request):\n request = args[0].request\n elif request not in args:\n args = (request,) + args\n if request.aiologin.disabled:\n return (yield from func(*args, **kwargs))\n user = yield from request.aiologin.auth_by_header()\n if user is None:\n user = yield from request.aiologin.auth_by_session()\n if user is None:\n user = request.aiologin.anonymous_user()\n assert isinstance(user, AbstractUser), \\\n \"Expected 'user' of type AbstractUser by got {}\".format(type(user))\n\n if not user.authenticated:\n # noinspection PyProtectedMember\n for coro in request.aiologin._on_unauthorized:\n yield from coro(request)\n return (yield from request.aiologin.unauthorized(*args, **kwargs))\n if user.forbidden:\n # noinspection PyProtectedMember\n for coro in request.aiologin._on_forbidden:\n yield from coro(request)\n return (yield from request.aiologin.forbidden(*args, **kwargs))\n request.current_user = user\n # noinspection PyProtectedMember\n for coro in request.aiologin._on_authenticated:\n yield from coro(request)\n return (yield from func(*args, **kwargs))\n\n return wrapper\n","sub_path":"aiologin/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"641830713","text":"import pypyodbc\r\nimport Constants \r\nclass PoliticalPartyModel:\r\n def __init__(self, politicalPartyID=0, politicalPartyName=\"\"):\r\n self.politicalPartyID = politicalPartyID\r\n self.politicalPartyName = politicalPartyName\r\n \r\n @staticmethod\r\n def getAllPolicalPartiesIDName():\r\n conn3 = pypyodbc.connect(Constants.connString, autocommit=True)\r\n cur3 = conn3.cursor()\r\n \r\n sqlcmd = \"SELECT politicalPartyID, politicalPartyName FROM PoliticalPartyMaster ORDER BY politicalPartyName\"\r\n cur3.execute(sqlcmd)\r\n politicalpartiesList = []\r\n while True:\r\n crow = cur3.fetchone()\r\n if not crow:\r\n break\r\n ppmodel = PoliticalPartyModel(crow[0], crow[1])\r\n politicalpartiesList.append(ppmodel)\r\n return politicalpartiesList \r\n \r\n @staticmethod\r\n def getPoliticalPartyByID(rid):\r\n conn3 = pypyodbc.connect(Constants.connString, autocommit=True)\r\n cur3 = conn3.cursor()\r\n \r\n sqlcmd = \"SELECT politicalPartyID, politicalPartyName FROM PoliticalPartyMaster WHERE politicalPartyID = '\"+str(rid)+\"'\"\r\n cur3.execute(sqlcmd)\r\n row = cur3.fetchone()\r\n politicalPartymodel = None\r\n if row:\r\n politicalPartymodel = PoliticalPartyModel(row[0], row[1])\r\n return politicalPartymodel ","sub_path":"project/2021/Evoting2019/src/PoliticalPartyModel.py","file_name":"PoliticalPartyModel.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"133548369","text":"\"\"\"\nText filters and replacements.\n\nReplaces or otherwise filters strings of text. All commands can be used without\na parameter, where they will use the last line spoken in the channel.\n\"\"\"\n\nimport functools\nimport random\nimport re\nimport unicodedata\n\nfrom kochira.service import Service\n\nservice = Service(__name__, __doc__)\n\n\ndef benisify(s):\n return functools.reduce(lambda acc, f: f(acc), [\n lambda s: s.lower(),\n lambda s: unicodedata.normalize('NFKD', s),\n lambda s: s.replace('x', 'cks'),\n lambda s: re.sub(r'ing','in', s),\n lambda s: re.sub(r'you', 'u', s),\n lambda s: re.sub(r'oo', 'u', s),\n lambda s: re.sub(r'ck\\b', 'g', s),\n lambda s: re.sub(r'ck', 'gg', s),\n lambda s: re.sub(r'\\bthe\\b', 'da', s),\n lambda s: re.sub(r'(t+)', lambda x: 'd' * len(x.group(1)), s),\n lambda s: s.replace('p', 'b'),\n lambda s: re.sub(r'\\bc', 'g', s),\n lambda s: re.sub(r'\\bis\\b', 'are', s),\n lambda s: re.sub(r'c+(?![eiy])', 'g', s),\n lambda s: re.sub(r'know', 'no', s),\n lambda s: re.sub(r'kn', 'n', s),\n lambda s: re.sub(r'[qk]', 'g', s),\n lambda s: re.sub(r'([?!.]|$)+', lambda x: (x.group(0) * random.randint(2, 5)) + \" \" + \"\".join((\":\" * random.randint(1, 2)) + (\"D\" * random.randint(1, 4)) for _ in range(random.randint(2, 5))), s),\n ], s)\n\n\n\nFABULOUS_COLORS = [4, 5, 8, 9, 10, 12, 13, 6]\n\ndef fabulousify(s):\n buf = \"\"\n\n for i, x in enumerate(s):\n if x == \" \":\n buf += x\n else:\n buf += \"\\x03{:02}{}\".format(FABULOUS_COLORS[i % len(FABULOUS_COLORS)], x)\n\n return buf\n\n\nASCII_TO_WIDE = {i: chr(i + 0xfee0) for i in range(0x21, 0x7f)}\nASCII_TO_WIDE.update({0x20: \"\\u3000\", 0x2D: \"\\u2212\"})\n\ndef wide(s):\n return s.translate(ASCII_TO_WIDE)\n\n\nNORMAL = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\nBOLDSCRIPT = \"𝓪𝓫𝓬𝓭𝓮𝓯𝓰𝓱𝓲𝓳𝓴𝓵𝓶𝓷𝓸𝓹𝓺𝓻𝓼𝓽𝓾𝓿𝔀𝔁𝔂𝔃𝓐𝓑𝓒𝓓𝓔𝓕𝓖𝓗𝓘𝓙𝓚𝓛𝓜𝓝𝓞𝓟𝓠𝓡𝓢𝓣𝓤𝓥𝓦𝓧𝓨𝓩\"\nASCII_TO_BOLDSCRIPT = {ord(k): ord(v) for k, v in zip(NORMAL, BOLDSCRIPT)}\n\ndef boldscript(s):\n return s.translate(ASCII_TO_BOLDSCRIPT)\n\n\ndef run_filter(f, ctx, text=None):\n if text is None:\n if not ctx.client.backlogs.get(ctx.target, []):\n return\n\n if len(ctx.client.backlogs[ctx.target]) < 2:\n return\n\n _, text = ctx.client.backlogs[ctx.target][1]\n\n text = f(text)\n\n ctx.respond(text)\n\n\ndef bind_filter(name, f, doc):\n @service.command(r\"!{}(?: (?P.+))?$\".format(name))\n @service.command(r\"{}(?: (?P.+))?$\".format(name), mention=True)\n def benis(ctx, text=None):\n run_filter(f, ctx, text)\n benis.__doc__ = doc\n\n\nbind_filter(\"benis\", benisify,\n\"\"\"\nBenis.\n\nYou're going to have to figure this one out for yourself.\n\"\"\")\n\nbind_filter(\"fabulous\", fabulousify,\n\"\"\"\nFabulous.\n\nRainbow text!\n\"\"\")\n\nbind_filter(\"wide\", wide,\n\"\"\"\nWiden.\n\nConvert text to fullwidth.\n\"\"\")\n\nbind_filter(\"fancy\", boldscript,\n\"\"\"\nFancy.\n\nConvert text to boldscript.\n\"\"\")\n","sub_path":"kochira/services/textproc/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"44928840","text":"import torch\nimport torch.nn.functional as F\n\n\ndef moex(x, ex_index, norm_type, epsilon=1e-5, positive_only=False):\n '''MoEx operation\n inputs:\n x: feature map of shape (batch_size, channels, height, width)\n ex_index: the indices of the examples that provide the new moments\n norm_type: normalization to compute the moments\n epsilon: small constant to stablize the computation of standard deviations\n positive_only: only compute the statistics across the moments\n output: new feature map with exchanged moments of shape (batch_size, channels, height, width)\n '''\n dtype = x.dtype\n x = x.float()\n\n B, C, H, W = x.shape\n if norm_type == 'bn':\n norm_dims = [0, 2, 3]\n elif norm_type == 'in':\n norm_dims = [2, 3]\n elif norm_type == 'ln':\n norm_dims = [1, 2, 3]\n elif norm_type == 'pono':\n norm_dims = [1]\n elif norm_type.startswith('gn'):\n if norm_type.startswith('gn-d'):\n # gn-d4 means GN where each group has 4 dims\n G_dim = int(norm_type[4:])\n G = C // G_dim\n else:\n # gn4 means GN with 4 groups\n G = int(norm_type[2:])\n G_dim = C // G\n x = x.view(B, G, G_dim, H, W)\n norm_dims = [2, 3, 4]\n elif norm_type.startswith('gpono'):\n if norm_type.startswith('gpono-d'):\n # gpono-d4 means GPONO where each group has 4 dims\n G_dim = int(norm_type[len('gpono-d'):])\n G = C // G_dim\n else:\n # gpono4 means GPONO with 4 groups\n G = int(norm_type[len('gpono'):])\n G_dim = C // G\n x = x.view(B, G, G_dim, H, W)\n norm_dims = [2]\n else:\n raise NotImplementedError(f'norm_type={norm_type}')\n \n if positive_only:\n x_pos = F.relu(x)\n s1 = x_pos.sum(dim=norm_dims, keepdim=True)\n s2 = x_pos.pow(2).sum(dim=norm_dims, keepdim=True)\n count = x_pos.gt(0).sum(dim=norm_dims, keepdim=True)\n count[count == 0] = 1 # deal with 0/0\n mean = s1 / count\n var = s2 / count - mean.pow(2)\n std = var.add(epsilon).sqrt()\n else:\n mean = x.mean(dim=norm_dims, keepdim=True)\n std = x.var(dim=norm_dims, keepdim=True).add(epsilon).sqrt()\n swap_mean = mean[ex_index]\n swap_std = std[ex_index]\n # output = (x - mean) / std * swap_std + swap_mean\n # equvalent but more efficient\n scale = swap_std / std\n shift = swap_mean - mean * scale\n output = x * scale + shift\n return output.view(B, C, H, W).to(dtype)","sub_path":"ImageNet/moex.py","file_name":"moex.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"93030014","text":"\"\"\"\n1.31 - Вариант 4 \nПо заданной формуле члена ряда с номером k составить две программы:\nа) программу вычисления суммы первых n членов заданного ряда;\nб) программу вычисления всех членов ряда, не меньших заданного числа E.\n\nФормула: (2*k + 1)/((2*k**2 + 1) * k)\n\"\"\"\n\n# Программа, вычисляющая сумму первых n членов заданного ряда\n\nrow_length = int(input('Введите количество элементов ряда: '))\nrow_summ = 0\n\nwhile row_length > 0:\n row_summ += (2*row_length + 1)/((2*row_length**2 + 1) * row_length)\n row_length -= 1\n\nprint(\"Сумма элементов ряда равна\", row_summ)\n\n","sub_path":"KGU - Basic Python/Отправка/var_4--ex_1_31_1.py","file_name":"var_4--ex_1_31_1.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"379301706","text":"from nltk import pos_tag\nfrom nltk.tokenize import WordPunctTokenizer\nfrom parsedatetime import Constants\n\nfrom ..nlp_utils.common import *\n\n\nclass TimeTagger:\n def __init__(self):\n regex_lib = Constants()\n self.all_regexes = []\n for key, r in regex_lib.cre_source.items():\n # if key in [\"CRE_MODIFIER\"]:\n # self.all_regexes.append((\"TIMEPREP\", r))\n if key in [\"CRE_TIMEHMS\", \"CRE_TIMEHMS2\",\n \"CRE_RTIMEHMS\", \"CRE_RTIMEHMS\"]:\n self.all_regexes.append((\"TIME\", r)) # TIME (proper time oclock)\n elif key in [\"CRE_DATE\", \"CRE_DATE3\", \"CRE_DATE4\", \"CRE_MONTH\", \"CRE_DAY\", \"\",\n \"CRE_RDATE\", \"CRE_RDATE2\"]:\n self.all_regexes.append((\"DATE\", r)) # DATE (day in a month)\n elif key in [\"CRE_TIMERNG1\", \"CRE_TIMERNG2\", \"CRE_TIMERNG3\", \"CRE_TIMERNG4\",\n \"CRE_DATERNG1\", \"CRE_DATERNG2\", \"CRE_DATERNG3\", \"CRE_NLP_PREFIX\"]:\n self.all_regexes.append((\"TIMERANGE\", r)) # TIMERANGE\n elif key in [\"CRE_UNITS\", \"CRE_QUNITS\"]:\n self.all_regexes.append((\"PERIOD\", r)) # PERIOD\n elif key in [\"CRE_UNITS_ONLY\"]:\n self.all_regexes.append((\"TIMEUNIT\", r)) # TIMEUNIT\n elif key in [\"CRE_WEEKDAY\"]:\n self.all_regexes.append((\"WEEKDAY\", r)) # WEEKDAY\n # Added by myself\n self.all_regexes.append((\"TIMEOFDAY\", r\"\\b(afternoon|noon|morning|evening|night|twilight)\\b\"))\n self.all_regexes.append((\"TIMEPREP\", r\"\\b(before|after|while|late|early)\\b\"))\n\n def merge_interval(self, intervals):\n if intervals:\n intervals.sort(key=lambda interval: interval[0])\n merged = [intervals[0]]\n for current in intervals:\n previous = merged[-1]\n if current[0] <= previous[1] and current[-1] == previous[-1]:\n if current[1] > previous[1]:\n previous[1] = current[1]\n previous[2] = current[2]\n else:\n merged.append(current)\n return merged\n return []\n\n def find_time(self, sent):\n results = []\n for kind, r in self.all_regexes:\n for t in find_regex(r, sent):\n results.append([*t, kind])\n return self.merge_interval(results)\n\n def tag(self, sent):\n times = self.find_time(sent)\n intervals = dict([(time[0], time[1]) for time in times])\n tag_dict = dict([(time[2], time[3]) for time in times])\n tokenizer = WordPunctTokenizer()\n # for a in [time[2] for time in times]:\n # tokenizer.add_mwe(a.split())\n\n # --- FIXED ---\n original_tokens = tokenizer.tokenize(sent)\n original_tags = pos_tag(original_tokens)\n # print(original_tags)\n # --- END FIXED ---\n\n tokens = []\n current = 0\n for span in tokenizer.span_tokenize(sent):\n if span[0] < current:\n continue\n if span[0] in intervals:\n tokens.append(f'__{sent[span[0]: intervals[span[0]]]}')\n current = intervals[span[0]]\n else:\n tokens.append(sent[span[0]:span[1]])\n current = span[1]\n\n tags = pos_tag(tokens)\n\n new_tags = []\n for word, tag in tags:\n if word[:2] == '__':\n new_tags.append((word[2:], tag_dict[word[2:]]))\n else:\n tag = [t[1] for t in original_tags if t[0] == word][0] # FIXED\n new_tags.append((word, tag))\n return new_tags\n","sub_path":"images/nlp_utils/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"378876484","text":"import datetime\nimport json\nimport os\nimport logging\nfrom lxml.html import parse\nfrom utils import TimeIt\n\nlogger = logging.getLogger(__name__)\n\n# This is the base URL used to parse rate data by the code in this source file\ndata_source_url = 'http://ratedata.gaincapital.com/'\n\n\nclass URLDownloader(object):\n \"\"\"\n This class parses the gaincapital web site for a list of URLs.\n\n Return value is a data structure of lists of URLs (by year then currency) or CSV files\n \"\"\"\n\n def __init__(self, base_path, years):\n \"\"\"\n :type base_path: str\n :type years: list[int]\n \"\"\"\n downloads_dir = os.path.join(base_path, 'data', 'downloads')\n if not os.path.exists(downloads_dir):\n os.makedirs(downloads_dir)\n\n self.urls_file = os.path.join(downloads_dir, 'urls.json')\n self.years = years\n\n @staticmethod\n def get_base_url():\n \"\"\"\n :rtype: str\n \"\"\"\n return data_source_url\n\n @TimeIt('Download URLs')\n def download_and_parse_urls(self):\n \"\"\"\n Go to the gaincapital.com and get the list of URLs available for each of the requested coin-pairs, organized by\n years.\n\n :rtype: dict[str,dict[str,list[str]]]\n \"\"\"\n available_year_links = self._parse_main_page_urls()\n\n urls = self._restore_progress()\n\n # Retain only years we are interested in (and available)\n year_links = {}\n for y in set(self.years).intersection(available_year_links.keys()):\n year_links[y] = available_year_links[y]\n\n for (year, year_link) in year_links.items():\n # Check if we already have this year loaded. The only exception is the current year (where we always want\n # to check if there is new data available).\n if str(year) in urls.keys() and year != datetime.datetime.now().year:\n logger.info(\"--- INCREMENTAL LOAD: Already loaded URLs for year %d\" % year)\n continue\n\n # If we are still in January in the current year, don't try to download data because (in this site) it is\n # not yet available\n if year == datetime.datetime.now().year and datetime.datetime.now().month == 1:\n logger.info(\"--- SKIPPING LOAD: January of year %d\" % year)\n continue\n\n self._parse_year_page_urls(year, year_link, urls)\n\n return urls\n\n def _parse_main_page_urls(self):\n \"\"\"\n :rtype: dict[int,str]\n \"\"\"\n years = {}\n\n # Pick up all links that end with a number...\n for link in self._download_and_get_links(data_source_url):\n # Ignore links that don't start with .\\\n if not link.startswith('.\\\\'):\n continue\n\n # Ignore links that don't end with a valid year\n try:\n sy = self._strip_url(link)\n y = int(sy)\n\n years[y] = data_source_url + sy + '/'\n\n except ValueError:\n # This is ok\n continue\n\n return years\n\n def _parse_year_page_urls(self, year, year_link, urls):\n \"\"\"\n :type year: int\n :type year_link: str\n :type urls: dict[str,dict[str,list[str]]]\n \"\"\"\n month_links = []\n\n logger.info('**** Parsing urls for year %s' % year_link)\n\n # Figure out the links in this page (one per month)\n for link in self._download_and_get_links(year_link):\n if 'Each months data will be prepared at the end of that month' in link or \\\n 'This is the home directory' in link:\n continue\n\n stripped = self._strip_url(link)\n\n # Verify that this is indeed a link to a month\n try:\n # First two chars need to be an integer\n int(stripped[:2])\n except ValueError:\n continue\n\n # Third char need to be a space\n if stripped[2] != ' ':\n continue\n\n month_link = (year_link + stripped + '/').replace(\" \", \"%20\")\n month_links.append(month_link)\n\n # Now go to each link and extract the URLs\n for month_link in month_links:\n self._parse_month_page_urls(year, month_link, urls)\n\n # Make sure there are 12 links...\n if len(month_links) != 12 and year != datetime.datetime.now().year:\n raise LookupError(\"Found %d instead of 12 months for year %d\" % (len(month_links), year))\n\n # Finished a year, save the urls file...\n self._save_progress(urls)\n\n def _parse_month_page_urls(self, year, month_link, urls):\n \"\"\"\n :type year: int\n :type month_link: str\n :type urls: dict[str,dict[str,list[str]]]\n \"\"\"\n logger.debug('Parsing urls for month link %s' % month_link)\n\n # Figure out the links in this page (one per month)\n for link in self._download_and_get_links(month_link):\n stripped = self._strip_url(link)\n\n # Make sure we want to download this currency\n currency_name = stripped[:7].upper()\n\n yr = urls.get(str(year))\n if yr is None:\n yr = {}\n urls[str(year)] = yr\n\n cp = yr.get(currency_name)\n if cp is None:\n cp = []\n yr[currency_name] = cp\n\n url = month_link + self._strip_url(stripped)\n cp.append(url)\n\n @staticmethod\n def _download_and_get_links(url):\n \"\"\"\n :type url str\n :rtype: list\n \"\"\"\n try:\n html = parse(url).getroot()\n return html.xpath(\"/html/body//table//tr[*]/td[*]/a/@href\")\n except:\n logger.exception(\"Failed to parse url %s\" % url)\n raise\n\n @staticmethod\n def _strip_url(url):\n \"\"\"\n Remove the annoying .\\ prefix preceding each URL\n\n :type url: str\n :rtype: str\n \"\"\"\n return url[2:] if url.startswith('.\\\\') else url\n\n def _restore_progress(self):\n \"\"\"\n Load the progress - to enable incremental load. Note that because the way JSON is serializing, the year will\n be represented in this case as a string instead of a number.\n\n :rtype: dict[str,dict[str,list[str]]]\n \"\"\"\n try:\n with open(self.urls_file) as f:\n return json.load(f)\n except:\n return {}\n\n def _save_progress(self, urls):\n \"\"\"\n Replace the progress file with a fresh copy of our latest achievements\n \"\"\"\n with open(self.urls_file, mode=\"w\") as f:\n json.dump(urls, f)\n","sub_path":"downloader/urldownloader.py","file_name":"urldownloader.py","file_ext":"py","file_size_in_byte":6704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"159052576","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cntapp', '0018_auto_20160426_1739'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='question',\n name='quiz',\n ),\n migrations.AddField(\n model_name='question',\n name='quizz',\n field=models.ForeignKey(verbose_name='Quiz', blank=True, default=False, to='cntapp.Quiz'),\n preserve_default=False,\n ),\n ]\n","sub_path":"cntapp/migrations/0019_auto_20160426_1745.py","file_name":"0019_auto_20160426_1745.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"84156601","text":"\"\"\"\nFile: convertgui.py\nProject 9.2\n\nA GUI-based temperature converter.\n\nConverts from degree Fahrenheit to degrees Celsius or\nfrom degree Celsius to degrees Fahrenheit.\n\"\"\"\n\nfrom tkinter import *\n\nclass ConvertGUI(Frame):\n\n def __init__(self):\n \"\"\"Set up the window and widgets.\"\"\"\n Frame.__init__(self)\n self.master.title(\"Temperature Converter\")\n self.grid()\n self._fahrVar = DoubleVar()\n self._celsiusVar = DoubleVar()\n self._fahrVar.set(32.0)\n self._celsiusVar.set(0.0)\n self._fahrLabel = Label(self, text = \"Fahrenheit\")\n self._fahrLabel.grid(row = 0, column = 0)\n self._fahrEntry = Entry(self, textvariable = self._fahrVar)\n self._fahrEntry.grid(row = 1, column = 0)\n self._celsiusLabel = Label(self, text = \"Celsius\")\n self._celsiusLabel.grid(row = 0, column = 1)\n self._celsiusEntry = Entry(self, textvariable = self._celsiusVar)\n self._celsiusEntry.grid(row = 1, column = 1)\n self._toCelsiusButton = Button(self, text = \">>>>\",\n command = self._toCelsius)\n self._toCelsiusButton.grid(row = 2, column = 0)\n self._toFahrButton = Button(self, text = \"<<<<\",\n command = self._toFahr)\n self._toFahrButton.grid(row = 2, column = 1)\n\n def _toCelsius(self):\n \"\"\"Event handler for the toCelsius button.\"\"\"\n fahr = self._fahrVar.get()\n celsius = (fahr - 32) * 5 / 9\n self._celsiusVar.set(celsius)\n\n def _toFahr(self):\n \"\"\"Event handler for the toFahr button.\"\"\"\n celsius = self._celsiusVar.get()\n fahr = celsius * 9 / 5 + 32\n self._fahrVar.set(fahr)\n\ndef main():\n ConvertGUI().mainloop()\n\nmain()\n","sub_path":"WA170 - Programming with Python/WA170_exercise_solutions/9781111822705_Solutions_ch09/Ch_09_Projects/9.2/convertgui.py","file_name":"convertgui.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"237643142","text":"# Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich)\n# ReFrame Project Developers. See the top-level LICENSE file for details.\n#\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport copy\nimport fnmatch\nimport functools\nimport itertools\nimport json\nimport jsonschema\nimport os\nimport re\nimport socket\nimport tempfile\n\nimport reframe\nimport reframe.core.settings as settings\nimport reframe.utility as util\nfrom reframe.core.environments import normalize_module_list\nfrom reframe.core.exceptions import ConfigError, ReframeFatalError\nfrom reframe.core.logging import getlogger\nfrom reframe.utility import ScopedDict\n\n\ndef _match_option(opt, opt_map):\n if isinstance(opt, list):\n opt = '/'.join(opt)\n\n if opt in opt_map:\n return opt_map[opt]\n\n for k, v in opt_map.items():\n if fnmatch.fnmatchcase(opt, k):\n return v\n\n raise KeyError(opt)\n\n\ndef _normalize_syntax(conv):\n '''Normalize syntax for options accepting multiple syntaxes'''\n\n def _do_normalize(fn):\n\n @functools.wraps(fn)\n def _get(site_config, option, *args, **kwargs):\n ret = fn(site_config, option, *args, **kwargs)\n if option is None:\n return ret\n\n for opt_patt, norm_fn in conv.items():\n if re.match(opt_patt, option):\n ret = norm_fn(ret)\n break\n\n return ret\n\n return _get\n\n return _do_normalize\n\n\ndef _hostname(use_fqdn, use_xthostname):\n '''Return hostname'''\n if use_xthostname:\n try:\n xthostname_file = '/etc/xthostname'\n getlogger().debug(f'Trying {xthostname_file!r}...')\n with open(xthostname_file) as fp:\n return fp.read()\n except OSError as e:\n '''Log the error and continue to the next method'''\n getlogger().debug(f'Failed to read {xthostname_file!r}')\n\n if use_fqdn:\n getlogger().debug('Using FQDN...')\n return socket.getfqdn()\n\n getlogger().debug('Using standard hostname...')\n return socket.gethostname()\n\n\nclass _SiteConfig:\n def __init__(self, site_config, filename):\n self._site_config = copy.deepcopy(site_config)\n self._filename = filename\n self._subconfigs = {}\n self._local_system = None\n self._sticky_options = {}\n self._autodetect_meth = 'hostname'\n self._autodetect_opts = {\n 'hostname': {\n 'use_fqdn': False,\n 'use_xthostname': False,\n }\n }\n\n # Open and store the JSON schema for later validation\n schema_filename = os.path.join(reframe.INSTALL_PREFIX, 'reframe',\n 'schemas', 'config.json')\n with open(schema_filename) as fp:\n try:\n self._schema = json.loads(fp.read())\n except json.JSONDecodeError as e:\n raise ReframeFatalError(\n f'invalid configuration schema: {schema_filename!r}'\n ) from e\n\n def _pick_config(self):\n if self._local_system:\n return self._subconfigs[self._local_system]\n else:\n return self._site_config\n\n def __repr__(self):\n return (f'{type(self).__name__}(site_config={self._site_config!r}, '\n f'filename={self._filename!r})')\n\n def __str__(self):\n return json.dumps(self._pick_config(), indent=2)\n\n # Delegate everything to either the original config or to the reduced one\n # if a system is selected\n\n def __iter__(self):\n return iter(self._pick_config())\n\n def __getitem__(self, key):\n return self._pick_config()[key]\n\n def __getattr__(self, attr):\n return getattr(self._pick_config(), attr)\n\n def set_autodetect_meth(self, method, **opts):\n self._autodetect_meth = method\n try:\n self._autodetect_opts[method].update(opts)\n except KeyError:\n raise ConfigError(\n f'unknown auto-detection method: {method!r}'\n ) from None\n\n @property\n def schema(self):\n '''Configuration schema'''\n return self._schema\n\n def add_sticky_option(self, option, value):\n self._sticky_options[option] = value\n\n def remove_sticky_option(self, option):\n self._sticky_options.pop(option, None)\n\n def is_sticky_option(self, option):\n return option in self._sticky_options\n\n @_normalize_syntax({'.*/.*modules$': normalize_module_list})\n def get(self, option, default=None):\n '''Retrieve value of option.\n\n If the option cannot be retrieved, ``default`` will be returned.\n '''\n\n # Options may not start with a slash\n if not option or option[0] == '/':\n return default\n\n # Remove trailing /\n if option[-1] == '/':\n option = option[:-1]\n\n # Convert any indices to integers\n prepared_option = []\n for opt in option.split('/'):\n try:\n opt = int(opt)\n except ValueError:\n pass\n\n prepared_option.append(opt)\n\n # Walk through the option path constructing a default key at the same\n # time for looking it up in the defaults or the sticky options\n default_key = []\n value = self._pick_config()\n option_path_invalid = False\n for x in prepared_option:\n if option_path_invalid:\n # Just go through the rest of elements and construct the key\n # trivially\n if not isinstance(x, int) and x[0] != '@':\n default_key.append(x)\n\n continue\n\n if isinstance(x, int) or x[0] == '@':\n # We are in an addressable element; move forward in the path,\n # without adding the component to the default_key\n if isinstance(x, int):\n # Element addressable by index number\n try:\n value = value[x]\n except IndexError:\n option_path_invalid = True\n else:\n # Element addressable by name\n x, found = x[1:], False\n for obj in value:\n if obj['name'] == x:\n value, found = obj, True\n break\n\n if not found:\n option_path_invalid = True\n\n continue\n\n if 'type' in value:\n default_key.append(value['type'] + '_' + x)\n else:\n default_key.append(x)\n\n try:\n value = value[x]\n except (IndexError, KeyError, TypeError):\n option_path_invalid = True\n\n default_key = '/'.join(default_key)\n try:\n # If a sticky option exists, return that value\n return _match_option(default_key, self._sticky_options)\n except KeyError:\n pass\n\n if option_path_invalid:\n # Try the default and return\n try:\n return _match_option(default_key, self._schema['defaults'])\n except KeyError:\n return default\n\n return value\n\n @property\n def filename(self):\n return self._filename\n\n @property\n def subconfig_system(self):\n return self._local_system\n\n @classmethod\n def create(cls, filename):\n _, ext = os.path.splitext(filename)\n if ext == '.py':\n return cls._create_from_python(filename)\n elif ext == '.json':\n return cls._create_from_json(filename)\n else:\n raise ConfigError(f\"unknown configuration file type: '{filename}'\")\n\n @classmethod\n def _create_from_python(cls, filename):\n try:\n mod = util.import_module_from_file(filename)\n except ImportError as e:\n # import_module_from_file() may raise an ImportError if the\n # configuration file is under ReFrame's top-level directory\n raise ConfigError(\n f\"could not load Python configuration file: '{filename}'\"\n ) from e\n\n if hasattr(mod, 'settings'):\n # Looks like an old style config\n raise ConfigError(\n f\"the syntax of the configuration file {filename!r} \"\n f\"is no longer supported; please convert it using the \"\n f\"'--upgrade-config-file' option\"\n )\n\n mod = util.import_module_from_file(filename)\n if not hasattr(mod, 'site_configuration'):\n raise ConfigError(\n f\"not a valid Python configuration file: '{filename}'\"\n )\n\n return _SiteConfig(mod.site_configuration, filename)\n\n @classmethod\n def _create_from_json(cls, filename):\n with open(filename) as fp:\n try:\n config = json.loads(fp.read())\n except json.JSONDecodeError as e:\n raise ConfigError(\n f\"invalid JSON syntax in configuration file '{filename}'\"\n ) from e\n\n return _SiteConfig(config, filename)\n\n def _detect_system(self):\n getlogger().debug(\n f'Detecting system using method: {self._autodetect_meth!r}'\n )\n hostname = _hostname(\n self._autodetect_opts[self._autodetect_meth]['use_fqdn'],\n self._autodetect_opts[self._autodetect_meth]['use_xthostname'],\n )\n getlogger().debug(f'Retrieved hostname: {hostname!r}')\n getlogger().debug(f'Looking for a matching configuration entry')\n for system in self._site_config['systems']:\n for patt in system['hostnames']:\n if re.match(patt, hostname):\n sysname = system['name']\n getlogger().debug(\n f'Configuration found: picking system {sysname!r}'\n )\n return sysname\n\n raise ConfigError(f\"could not find a configuration entry \"\n f\"for the current system: '{hostname}'\")\n\n def validate(self):\n site_config = self._pick_config()\n try:\n jsonschema.validate(site_config, self._schema)\n except jsonschema.ValidationError as e:\n raise ConfigError(f\"could not validate configuration file: \"\n f\"'{self._filename}'\") from e\n\n # Make sure that system and partition names are unique\n system_names = set()\n for system in self._site_config['systems']:\n sysname = system['name']\n if sysname in system_names:\n raise ConfigError(f\"system '{sysname}' already defined\")\n\n system_names.add(sysname)\n partition_names = set()\n for part in system['partitions']:\n partname = part['name']\n if partname in partition_names:\n raise ConfigError(\n f\"partition '{partname}' already defined \"\n f\"for system '{sysname}'\"\n )\n\n partition_names.add(partname)\n\n def select_subconfig(self, system_fullname=None,\n ignore_resolve_errors=False):\n # First look for the current subconfig in the cache; if not found,\n # generate it and cache it\n system_fullname = system_fullname or self._detect_system()\n getlogger().debug(f'Selecting subconfig for {system_fullname!r}')\n\n self._local_system = system_fullname\n if system_fullname in self._subconfigs:\n return self._subconfigs[system_fullname]\n\n try:\n system_name, part_name = system_fullname.split(':', maxsplit=1)\n except ValueError:\n # system_name does not have a partition\n system_name, part_name = system_fullname, None\n\n # Start from a fresh copy of the site_config, because we will be\n # modifying it\n site_config = copy.deepcopy(self._site_config)\n local_config = {}\n systems = list(\n filter(lambda x: x['name'] == system_name, site_config['systems'])\n )\n if not systems:\n raise ConfigError(\n f\"could not find a configuration entry \"\n f\"for the requested system: '{system_name}'\"\n )\n\n if part_name is not None:\n # Filter out also partitions\n systems[0]['partitions'] = list(\n filter(lambda x: x['name'] == part_name,\n systems[0]['partitions'])\n )\n\n if not systems[0]['partitions']:\n raise ConfigError(\n f\"could not find a configuration entry \"\n f\"for the requested system/partition combination: \"\n f\"'{system_name}:{part_name}'\"\n )\n\n # Create local configuration for the current or the requested system\n local_config['systems'] = systems\n for name, section in site_config.items():\n if name == 'systems':\n # The systems sections has already been treated\n continue\n\n # Convert section to a scoped dict that will handle correctly and\n # transparently the system/partition resolution\n scoped_section = ScopedDict()\n for obj in section:\n key = obj.get('name', name)\n target_systems = obj.get(\n 'target_systems',\n _match_option(f'{name}/target_systems',\n self._schema['defaults'])\n )\n for t in target_systems:\n scoped_section[f'{t}:{key}'] = obj\n\n unique_keys = set()\n for obj in section:\n key = obj.get('name', name)\n if key in unique_keys:\n continue\n\n unique_keys.add(key)\n try:\n val = scoped_section[f\"{system_fullname}:{key}\"]\n except KeyError:\n pass\n else:\n local_config.setdefault(name, [])\n local_config[name].append(val)\n\n required_sections = self._schema['required']\n for name in required_sections:\n if name not in local_config.keys():\n if not ignore_resolve_errors:\n raise ConfigError(\n f\"section '{name}' not defined \"\n f\"for system '{system_fullname}'\"\n )\n\n # Verify that all environments defined by the system are defined for\n # the current system\n if not ignore_resolve_errors:\n sys_environs = {\n *itertools.chain(*(p['environs']\n for p in systems[0]['partitions']))\n }\n found_environs = {\n e['name'] for e in local_config['environments']\n }\n undefined_environs = sys_environs - found_environs\n if undefined_environs:\n env_descr = ', '.join(f\"'{e}'\" for e in undefined_environs)\n raise ConfigError(\n f\"environments {env_descr} \"\n f\"are not defined for '{system_fullname}'\"\n )\n\n self._subconfigs[system_fullname] = local_config\n\n\ndef convert_old_config(filename, newfilename=None):\n old_config = util.import_module_from_file(filename).settings\n converted = {\n 'systems': [],\n 'environments': [],\n 'logging': [],\n }\n perflogdir = {}\n old_systems = old_config.site_configuration['systems'].items()\n for sys_name, sys_spec in old_systems:\n sys_dict = {'name': sys_name}\n\n system_perflogdir = sys_spec.pop('perflogdir', None)\n perflogdir.setdefault(system_perflogdir, [])\n perflogdir[system_perflogdir].append(sys_name)\n\n sys_dict.update(sys_spec)\n\n # hostnames is now a required property\n if 'hostnames' not in sys_spec:\n sys_dict['hostnames'] = []\n\n # Make variables dictionary into a list of lists\n if 'variables' in sys_spec:\n sys_dict['variables'] = [\n [vname, v] for vname, v in sys_dict['variables'].items()\n ]\n\n # Make partitions dictionary into a list\n if 'partitions' in sys_spec:\n sys_dict['partitions'] = []\n for pname, p in sys_spec['partitions'].items():\n new_p = {'name': pname}\n new_p.update(p)\n if p['scheduler'] == 'nativeslurm':\n new_p['scheduler'] = 'slurm'\n new_p['launcher'] = 'srun'\n elif p['scheduler'] == 'local':\n new_p['scheduler'] = 'local'\n new_p['launcher'] = 'local'\n else:\n sched, launcher, *_ = p['scheduler'].split('+')\n new_p['scheduler'] = sched\n new_p['launcher'] = launcher\n\n # Make resources dictionary into a list\n if 'resources' in p:\n new_p['resources'] = [\n {'name': rname, 'options': r}\n for rname, r in p['resources'].items()\n ]\n\n # Make variables dictionary into a list of lists\n if 'variables' in p:\n new_p['variables'] = [\n [vname, v] for vname, v in p['variables'].items()\n ]\n\n if 'container_platforms' in p:\n new_p['container_platforms'] = []\n for cname, c in p['container_platforms'].items():\n new_c = {'type': cname}\n new_c.update(c)\n if 'variables' in c:\n new_c['variables'] = [\n [vn, v] for vn, v in c['variables'].items()\n ]\n\n new_p['container_platforms'].append(new_c)\n\n sys_dict['partitions'].append(new_p)\n\n converted['systems'].append(sys_dict)\n\n old_environs = old_config.site_configuration['environments'].items()\n for env_target, env_entries in old_environs:\n for ename, e in env_entries.items():\n new_env = {'name': ename}\n if env_target != '*':\n new_env['target_systems'] = [env_target]\n\n new_env.update(e)\n\n # Convert variables dictionary to a list of lists\n if 'variables' in e:\n new_env['variables'] = [\n [vname, v] for vname, v in e['variables'].items()\n ]\n\n # Type attribute is not used anymore\n if 'type' in new_env:\n del new_env['type']\n\n converted['environments'].append(new_env)\n\n if 'modes' in old_config.site_configuration:\n converted['modes'] = []\n old_modes = old_config.site_configuration['modes'].items()\n for target_mode, mode_entries in old_modes:\n for mname, m in mode_entries.items():\n new_mode = {'name': mname, 'options': m}\n if target_mode != '*':\n new_mode['target_systems'] = [target_mode]\n\n converted['modes'].append(new_mode)\n\n def handler_list(handler_config, basedir=None):\n ret = []\n for h in handler_config:\n new_h = h.copy()\n new_h['level'] = h['level'].lower()\n if h['type'] == 'graylog':\n # `host` and `port` attribute are converted to `address`\n new_h['address'] = h['host']\n if 'port' in h:\n new_h['address'] += ':' + h['port']\n elif h['type'] == 'filelog' and basedir is not None:\n new_h['basedir'] = basedir\n\n ret.append(new_h)\n\n return ret\n\n for basedir, target_systems in perflogdir.items():\n converted['logging'].append(\n {\n 'level': old_config.logging_config['level'].lower(),\n 'handlers': handler_list(\n old_config.logging_config['handlers']\n ),\n 'handlers_perflog': handler_list(\n old_config.perf_logging_config['handlers'],\n basedir=basedir\n ),\n 'target_systems': target_systems\n }\n )\n if basedir is None:\n del converted['logging'][-1]['target_systems']\n\n converted['general'] = [{}]\n if hasattr(old_config, 'checks_path'):\n converted['general'][0][\n 'check_search_path'\n ] = old_config.checks_path\n\n if hasattr(old_config, 'checks_path_recurse'):\n converted['general'][0][\n 'check_search_recursive'\n ] = old_config.checks_path_recurse\n\n if converted['general'] == [{}]:\n del converted['general']\n\n contents = (f\"#\\n# This file was automatically generated \"\n f\"by ReFrame based on '{filename}'.\\n#\\n\\n\"\n f\"site_configuration = {util.ppretty(converted)}\\n\")\n\n contents = '\\n'.join(l if len(l) < 80 else f'{l} # noqa: E501'\n for l in contents.split('\\n'))\n\n if newfilename:\n with open(newfilename, 'w') as fp:\n if newfilename.endswith('.json'):\n json.dump(converted, fp, indent=4)\n else:\n fp.write(contents)\n\n else:\n with tempfile.NamedTemporaryFile(mode='w', suffix='.py',\n delete=False) as fp:\n fp.write(contents)\n\n return fp.name\n\n\ndef _find_config_file():\n # The order of elements is important, since it defines the priority\n homedir = os.getenv('HOME')\n prefixes = [os.path.join(homedir, '.reframe')] if homedir else []\n prefixes += [\n reframe.INSTALL_PREFIX,\n '/etc/reframe.d'\n ]\n valid_exts = ['py', 'json']\n getlogger().debug('Looking for a suitable configuration file')\n for d in prefixes:\n if not d:\n continue\n\n for ext in valid_exts:\n filename = os.path.join(d, f'settings.{ext}')\n getlogger().debug(f'Trying {filename!r}')\n if os.path.exists(filename):\n return filename\n\n return None\n\n\ndef load_config(filename=None):\n if filename is None:\n filename = _find_config_file()\n if filename is None:\n # Return the generic configuration\n getlogger().debug('No configuration found; '\n 'falling back to a generic one')\n return _SiteConfig(settings.site_configuration, '')\n\n getlogger().debug(f'Loading configuration file: {filename!r}')\n return _SiteConfig.create(filename)\n","sub_path":"reframe/core/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":22992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"567895750","text":"#!/usr/bin/env python\n\n\"\"\" Bring up interfaces and check connectivity for WFS installs. \"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport collections\nimport fcntl\nimport re\nimport struct\nimport subprocess\nimport sys\nimport termios\n\nfrom six import iteritems\n\ntry:\n # pylint: disable=unused-import\n from typing import Any\n from typing import Dict\n from typing import List\n from typing import Optional\n from typing import Union\nexcept ImportError:\n pass\n\n\nCONNECTIVITY_FAILURES = []\n\n# Globals, because it's a nightmare to try and get this down into\n# the ping function with args that modify it.\n# pylint: disable=invalid-name, global-statement\nPING_COUNT = 5\nPING_INTERVAL = 3\n\n# component regex matches a digit, character, or period character as\n# a valid \"component\" for a version string.\n# Version regex matches only versions like 4.10.12 and will error for\n# versions with .beta, etc. We should fail on those anyway.\nVERSION_COMPONENT_RE = re.compile(r'(\\d+ | [a-z]+ | \\.)', re.VERBOSE)\nVERSION_RE = re.compile(r'\\d+\\.\\d+\\.\\d+$')\n\n\n# Argparse actions to make another argument required or not.\n# Argparse actions to make another argument required or not.\nclass ChangeRequired(argparse.Action):\n \"\"\"Store true and make \"to_modify\" required args.\n\n This defaults to True when evaluated - i.e.\n if args.change_required_arg:\n\n The if statement above would evaluate to True if the flag was present,\n and false if the flag is not present.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Init for ChangeRequired class.\n\n Arguments:\n to_modify (list): destination names of the args to modify required flags for.\n target_value (bool): Value to set the arg.required attribute to.\n \"\"\"\n kwargs['const'] = True\n kwargs['nargs'] = 0\n self.target_value = kwargs.pop('target_value', None)\n self.to_modify = kwargs.pop('to_modify', [])\n super(ChangeRequired, self).__init__(*args, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string=None):\n \"\"\"Perform these when this action is called.\"\"\"\n self._modify_required(parser)\n setattr(namespace, self.dest, self.const)\n\n def _modify_required(self, parser):\n \"\"\"Modify any actions to the required value requested.\"\"\"\n # argparse doesn't currently have a \"modify actions conditionally\" that's\n # public, so we get to use the protected method.\n # pylint: disable=protected-access\n for action in parser._actions:\n if action.dest in self.to_modify and self.target_value:\n action.required = self.target_value\n\n\nclass VersionError(ValueError):\n \"\"\"Custom version error for Version class parsing.\"\"\"\n pass\n\n\nclass Version(object):\n \"\"\"Basic version class for doing version comparisons.\"\"\"\n\n\n def __init__(self, vstring):\n # type: (str) -> None\n\n if not VERSION_RE.search(vstring):\n # pylint: disable=anomalous-backslash-in-string\n message = u\"Unable to accurately parse non-standard version numbers, i.e. not of the format: r'\\d\\.\\d\\.\\d$'\"\n raise VersionError(message)\n self.parse(vstring)\n\n def parse(self, vstring):\n # type: (str) -> None\n \"\"\"Parse version string into components.\"\"\"\n self.vstring = vstring\n # We're breaking the string into alphanumerics and digits, and then removing the periods.\n # Example: '4.1.2.beta' -> [4, 1, 2, 'beta']\n components = [x for x in VERSION_COMPONENT_RE.split(vstring) if x and x != '.'] # type: List[Any]\n for index, component in enumerate(components):\n # Try to make the numbers into ints for leveraging list comparisons.\n # i.e. ['1', '1', '1', 'beta'] -> [1, 1, 1, 'beta']\n if component.isdigit():\n components[index] = int(component)\n\n self.version = components\n\n def _cmp(self, other):\n # type: (Union[str, Version]) -> int\n \"\"\"Compare version strings\"\"\"\n if isinstance(other, str):\n other = Version(other)\n\n if self.version == other.version:\n comparison = 0\n elif self.version < other.version:\n comparison = -1\n elif self.version > other.version:\n comparison = 1\n return comparison\n\n def __repr__(self):\n # type: () -> str\n \"\"\"repr function\"\"\"\n return \"{} ({})\".format(self.__class__.__name__, self.vstring)\n\n def __str__(self):\n # type: () -> str\n \"\"\"str function\"\"\"\n return self.vstring\n\n def __eq__(self, other):\n # type: (Union[str, Version]) -> bool\n \"\"\"Comparison for equals\"\"\"\n if isinstance(other, str):\n other = Version(other)\n return self.vstring == other.vstring\n\n def __ne__(self, other):\n # type: (Union[str, Version]) -> bool\n \"\"\"Comparison for equals\"\"\"\n if isinstance(other, str):\n other = Version(other)\n return self.vstring != other.vstring\n\n def __lt__(self, other):\n # type: (Union[str, Version]) -> bool\n \"\"\"Comparison for less than\"\"\"\n comparison = self._cmp(other)\n return comparison < 0\n\n def __le__(self, other):\n # type: (Union[str, Version]) -> bool\n \"\"\"Comparison for less than or equal\"\"\"\n comparison = self._cmp(other)\n return comparison <= 0\n\n def __gt__(self, other):\n # type: (Union[str, Version]) -> bool\n \"\"\"Comparison for greater than\"\"\"\n comparison = self._cmp(other)\n return comparison > 0\n\n def __ge__(self, other):\n # type: (Union[str, Version]) -> bool\n \"\"\"Comparison for greater or equal\"\"\"\n comparison = self._cmp(other)\n return comparison >= 0\n\n\ndef table_to_dict(lines, delimiter=None):\n # type: (List[str], Optional[str]) -> List[Dict[str, str]]\n \"\"\"Turn a table into a dict.\n\n Arguments:\n lines (list): Table lines from any string/newline tableself.\n delimiter (str): regex friendly delimiter string to split columns on\n Returns:\n table_dict (list): list of dicts with column name as key, and value\n \"\"\"\n # re.split() takes strings rather than regex for some reason.\n # pylint: disable=anomalous-backslash-in-string\n delimiter = delimiter or '\\s{2,}'\n # Remove lines that have no alphanumeric characters in them.\n fixed_lines = [line for line in lines if re.search(r'\\w+', line)]\n if not fixed_lines:\n raise ValueError('No lines to parse into table after filtering out non-alphanumeric lines.')\n table_dict_list = []\n keys = [thing for thing in re.split(delimiter, fixed_lines[0].lower()) if thing]\n fixed_keys = []\n for key in keys:\n # Remove non alphanumerics and spaces.\n fixed_keys.append('_'.join(x for x in key.split() if x.isalpha()))\n # Since we're parsing a table, the first line is our headers. Skip it.\n char_in_line = re.compile(r'\\w')\n for line in fixed_lines[1:]:\n if not char_in_line.search(line):\n continue\n line_dict = {}\n values = [col_val.strip() for col_val in re.split(delimiter, line.lower()) if col_val]\n for index, value in enumerate(values):\n if not value:\n continue\n key = fixed_keys[index]\n line_dict[key] = value\n table_dict_list.append(line_dict)\n return table_dict_list\n\n\ndef check_hardware(local_hardware, peer_hardware):\n # type: (str, str) -> List[List[str]]\n \"\"\"Run hardware checks.\n\n Arguments:\n local_hardware (str): Hardware model, e.g. 'FA-420'.\n peer_hardware (str): Hardware model, e.g. 'FA-420'.\n\n Returns:\n messages (list): pass/fail result messages\n \"\"\"\n unsupported_controllers = ['fa-300',\n 'fa-310',\n 'fa-320',\n 'fa-405',\n 'fa-420',\n 'fa-450',\n 'fa-m10',\n 'fa-m10r2']\n\n has_both_controllers = local_hardware and peer_hardware\n models_match = local_hardware == peer_hardware\n local_supported = local_hardware and local_hardware.lower() not in unsupported_controllers\n peer_supported = peer_hardware and peer_hardware.lower() not in unsupported_controllers\n\n if all([has_both_controllers, models_match, local_supported, peer_supported]):\n message = [['Hardware', 'PASS', 'Controller models are {}\\'s'.format(local_hardware)]]\n elif not has_both_controllers:\n message = [['Hardware', 'FAIL', 'Could not verify both controllers.']]\n elif not models_match:\n mod_message = 'Models do not match - Local: {}, Peer: {}'\n message = [['Hardware', 'FAIL', mod_message.format(local_hardware, peer_hardware)]]\n elif not all([local_supported, peer_supported]):\n mod_message = 'Local model {} or peer model {} not supported.'\n message = [['Hardware', 'FAIL', mod_message.format(local_hardware, peer_hardware)]]\n else:\n message = [['Hardware', 'FAIL', 'Unable to verify supported hardware.']]\n return message\n\n\ndef check_versions(local_version, peer_version):\n # type: (str, str) -> List[List[str]]\n \"\"\"Run version checks.\n\n Arguments:\n local_version (str): Local controller version.\n peer_version (str): Peer controller version.\n\n Returns:\n messages (list): pass/fail result messages\n \"\"\"\n threshold_version = Version('4.10.7')\n try:\n localv = Version(local_version) # type: Optional[Version]\n peerv = Version(peer_version) # type: Optional[Version]\n except VersionError:\n localv = peerv = None\n versions = [localv, peerv]\n missing_versions = any(True for version in versions if not version)\n versions_mismatched = local_version != peer_version\n purity_5_0_x = any(version.version[0:2] == [5, 0] for version in versions if version)\n below_threshold = any(version < threshold_version for version in versions if version)\n if missing_versions:\n message = [['Version', 'FAIL', 'Unable to determine controller Purity Version. MANUAL CHECK REQUIRED!']]\n elif versions_mismatched:\n message = [['Version', 'FAIL', 'Purity version mismatch between controllers!']]\n elif purity_5_0_x:\n message = [['Version', 'FAIL', 'Array is running {}, 5.0.x versions are not supported.'.format(localv)]]\n elif below_threshold:\n message = [['Version', 'FAIL', 'Array is running {}, below supported threshold.'.format(localv)]]\n elif not any([missing_versions, versions_mismatched, purity_5_0_x, below_threshold]):\n message = [['Version', 'PASS', 'Both controllers are running {}'.format(localv)]]\n else:\n message = [['Version', 'FAIL', 'Unable to verify version information.']]\n return message\n\n\ndef check_iscsi_ports(purenetwork_list):\n # type: (str) -> List[List[str]]\n \"\"\"Run iSCSI checks.\n\n Arguments:\n purenetwork_list (str): Unsplit output from purenetwork list output.\n Returns:\n messages (list): pass or fail result messages\n \"\"\"\n # re.split() takes strings rather than regex for some reason.\n # pylint: disable=anomalous-backslash-in-string\n pnl_dict_list = table_to_dict(purenetwork_list.splitlines(), delimiter='\\s{2,}')\n iscsi_hbas = []\n\n for device in pnl_dict_list:\n if device['speed'].lower() == '10.00 gb/s' and device['services'] == 'iscsi':\n iscsi_hbas.append(device['name'])\n\n pass_fail = 'PASS' if len(iscsi_hbas) >= 4 else 'FAIL'\n base_message = 'Four valid 10.00GB/s iSCSI Ports required, {} found'.format(len(iscsi_hbas))\n if iscsi_hbas:\n base_message += ': {}'.format(', '.join(iscsi_hbas))\n message = [['ISCSI', pass_fail, base_message]]\n\n return message\n\n\ndef check_syncrep(local_version, tunables):\n # type: (str, str) -> List[List[str]]\n \"\"\"Run syncrep checks.\n\n Arguments:\n local_version (str): Local controller version.\n tunables (str): Unsplit output from pureadm list-tunable output.\n Returns:\n messages (list): pass/fail result messages\n \"\"\"\n localv = Version(local_version)\n\n ps_syncrep_enabled = [tunable for tunable in tunables.splitlines() if 'ps_syncrep_enabled' in tunable.lower()]\n if bool(ps_syncrep_enabled) and localv > Version('5.1.1'):\n message = [['Syncrep', 'PASS', '{} Allows ActiveCluster and WFS to run concurrently.'.format(localv)]]\n elif ps_syncrep_enabled:\n message = [['Syncrep', 'FAIL', 'ActiveCluster is enabled']]\n else:\n message = [['Syncrep', 'PASS', 'ActiveCluster tunable is not set.']]\n return message\n\n\ndef get_ctrl_num():\n # type: () -> str\n \"\"\" Get \"ct0\" or \"ct1\" from hostname. \"\"\"\n hostname = subprocess.check_output(['hostname'], universal_newlines=True)\n # hostname looks like: array-name-ct0\n ct_num = str(hostname.split('-')[-1].strip())\n return ct_num\n\n\ndef get_interface_dict(interfaces):\n # type: (List[str]) -> Dict[str, str]\n \"\"\" Get dictionary of ctx.ethx -> IP from purenetwork list.\n Arguments:\n interfaces (list): Interfaces that have been assigned.\n\n Returns:\n interface_dict (dict): ctx.ethx -IP from purenetwork listself.\n\n Example:\n interfaces: ['ETH6', 'ETH7']\n\n interface_dict: {'ct0.eth6': '10.204.121.24',\n 'ct0.eth7': '10.204.121.25',\n 'ct1.eth6': '10.204.121.26',\n 'ct1.eth7': '10.204.121.27'}\n \"\"\"\n unsplit_lines = subprocess.check_output(['purenetwork', 'list'], universal_newlines=True)\n purenetwork_lines = unsplit_lines.splitlines()\n interface_dict = {}\n\n for line in purenetwork_lines:\n for interface in interfaces:\n # pylint: disable=line-too-long\n # Example line:\n # ct0.eth8 True - 192.168.26.58 255.255.255.0 192.168.26.2 9000 90:e2:ba:6b:17:95 10.00 Gb/s iscsi -\n # So if one of our interfaces is in the line, we'll pull out the IP address.\n # interface.lower looks like: \"ct0.eth8\"\n if interface.lower() in line.lower():\n split_line = line.split()\n # ct0.eth8 in the example line above\n interface_name = str(split_line[0])\n # 192.168.26.58 in the example line above\n interface_ip = str(split_line[3])\n interface_dict[interface_name] = interface_ip\n\n return interface_dict\n\n\ndef get_tty_wid():\n # type: () -> int\n \"\"\"Determine the width of the current terminal.\"\"\"\n try:\n _, tty_wid = struct.unpack('hh', fcntl.ioctl(2, termios.TIOCGWINSZ, '1234'))\n # Leaving the broad exception, because PowerShell.\n # pylint: disable=broad-except\n except Exception:\n # We catch all Exceptions to be safe, ignore it and use the default terminal width.\n tty_wid = 80\n # Workaround: Powershell's scroll bar may displace a character and cause word-wrap.\n # We simply remove 2 from the tty_wid to be safe.\n tty_wid -= 2\n return int(tty_wid)\n\n\ndef status_update(update=None, output_pipe=sys.stderr):\n # type: (Optional[str], Any) -> None\n \"\"\"Print a status bar at the bottom of the screen.\n\n Arguments:\n update: (str) that will be printed to the output_pipe.\n NOTE: If update is None (Default), a blank line will be printed to clear the screen.\n output_pipe: (pipe) destination pipe that update will be use.\n NOTE: pipe object must have \"write\" and \"flush\" methods.\n \"\"\"\n if not hasattr(output_pipe, 'write') or not hasattr(output_pipe, 'flush'):\n msg = 'Output_pipe object is missing \"write\" and/or \"flush\" methods.'\n raise TypeError(msg)\n tty_wid = get_tty_wid()\n # Write a blank line to prevent overlap with any previous update text.\n output_pipe.write('\\r' + ' ' * tty_wid + '\\r')\n if not update:\n return\n # Avoid word-wrap by truncating long update text strings to the tty_wid.\n update = ''.join(update[:tty_wid])\n # Print the new update and return to the beginning of the line.\n # The extra space at the beginning prevents the curser from covering cover the first letter\n output_pipe.write('\\r {}\\r'.format(update))\n # NOTE: This never printed a newline character, the next output will begin on the same line.\n output_pipe.flush()\n\n\ndef get_table_info(list_of_lists):\n # type: (List[List[str]]) -> Dict[int, int]\n \"\"\" Create table_info for making separators.\n Arguments:\n list_of_lists (list): Any list of lists that can be stringified.\n\n Returns:\n table_info (OrderedDict): Index and max column length for each item\n in my list of lists - i.e. list[:][0] has a\n max length of 5 characters for all lists.\n \"\"\"\n table_info = collections.OrderedDict() # type: collections.OrderedDict\n for list_item in list_of_lists:\n for index, col in enumerate(list_item):\n max_len = table_info.get(index, 0)\n # Make sure whatever it is, we try to stringify it\n # for length testing. Formats below will take care\n # of making it a string in the end.\n str_col = str(col)\n if len(str_col) > max_len:\n table_info[index] = len(str_col)\n return dict(table_info)\n\n\ndef create_separator(table_info, header=False):\n # type: (Dict[int, int], bool) -> List[str]\n \"\"\" Create header and normal separators for list of lists.\n Arguments:\n table_info (OrderedDict): Index and max column length for each item\n in my list of lists - i.e. list[:][0] has a\n max length of 5 characters for all lists.\n header (bool): Whether or not the separator should be a header separatator\n Returns:\n header_sep (string): vertical separators using = for header, - for normal\n \"\"\"\n sep_char = \"=\" if header else \"-\"\n separator = []\n for index, column_width in iteritems(table_info):\n column_width = table_info[index]\n # For each item, add a +==== or +----\n # | Hello | <= Width of 5, add 2 for spaces\n # +======= <= We only add up to the next delimiter because\n # the next column item will add the trailing +.\n separator.append('+' + sep_char * (column_width + 2))\n # Add a trailing + to our separators to close out the last space.\n # | Hello |\n # +=======+ <= Adds this extra + at the end that wasn't taken care of yet.\n separator.append('+')\n return separator\n\n\ndef create_table(list_of_lists, header=True, vertical_sep=False):\n # type: (List[List[str]], bool, bool) -> List[str]\n \"\"\" Print a table based on a list of lists.\n Arguments:\n list_of_lists (list): Each item in this list is a row and should be a list\n of values for columns\n header (bool): If true, first line will be treated as a header.\n vertical_sep (bool): If true, vertical separators will be added between rows.\n\n Returns:\n table_lines (list): Strings for each line in the table.\n \"\"\"\n table_lines = []\n first_row = []\n\n # Go through and get our widths from the list of lists\n table_info = get_table_info(list_of_lists)\n\n # Create separators based on the widths we got.\n header_sep = create_separator(table_info, header=True)\n normal_sep = create_separator(table_info)\n\n # Create our first row special since it might be a header - this is done\n # whether or not we have a header, because it *might* be a header, and we have\n # to do this first if it is.\n # If it is, we'll need it before the rest of the list. If it is not a header\n # we won't wrap it up pretty, we'll just wrap the top like a normal line with\n # or without a vertical separator, depending on your selection.\n for index, col_value in enumerate(list_of_lists[0]):\n column_width = table_info.get(index)\n formt_str = '| {{!s:{}}} '.format(column_width)\n first_row.append(formt_str.format(str(col_value)))\n first_row.append('|')\n\n # If it's a header, wrap it up, otherwise give it raw.\n if header:\n # +======+====+====+=======+======+\n # | Here | Is | my | first | line |\n # +======+====+====+=======+======+\n table_lines.append(''.join(header_sep))\n table_lines.append(''.join(first_row))\n table_lines.append(''.join(header_sep))\n elif vertical_sep:\n # +------+----+----+-------+------+\n # | Here | Is | my | first | line |\n # +------+----+----+-------+------+\n table_lines.append(''.join(normal_sep))\n table_lines.append(''.join(first_row))\n table_lines.append(''.join(normal_sep))\n else:\n # +------+----+----+-------+------+\n # | Here | Is | my | first | line |\n table_lines.append(''.join(normal_sep))\n table_lines.append(''.join(first_row))\n\n # Go through the rest and add them.\n for line in list_of_lists[1:]:\n row = []\n for index, col_value in enumerate(line):\n column_width = table_info.get(index)\n formt_str = '| {{!s:{}}} '.format(column_width)\n row.append(formt_str.format(str(col_value)))\n row.append('|')\n table_lines.append(''.join(row))\n # If we want vertical separators, add them after every row.\n if vertical_sep:\n table_lines.append(''.join(normal_sep))\n\n # If we aren't vertically separating, we won't have appended a separator\n # row, so add it if not vertical_sepa\n if not vertical_sep:\n table_lines.append(''.join(normal_sep))\n\n return table_lines\n\n\ndef build_table_and_print(list_of_lists, header=True, vertical_sep=False):\n # type: (List[List[str]], bool, bool) -> None\n \"\"\" Print's a table from a list of lists. \"\"\"\n table = create_table(list_of_lists, header=header, vertical_sep=vertical_sep)\n for line in table:\n print(line)\n\n\n# All required arguments.\n# pylint: disable=too-many-arguments\ndef build_ping_command(interface, # type: str\n target, # type: str\n interface_dict, # type: Dict[str, str]\n mtu, # type: int\n count, # type: int\n interval, # type: Union[float, int]\n ctrl # type: str\n ):\n # type: (...) -> List[str]\n \"\"\" Build a ping command for subprocess from args.\n Arguments:\n interface (str): Interface to ping from - e.g. \"ct0.eth6\"\n target (str): IP address to ping from the interface.\n interface_dict (dict): interface to IP information\n mtu (int): Actual MTU requested (Do not account for overhead)\n count (int): Number of times to ping the target\n interval (int): How long in seconds between pings\n ctrl (str): Which controller we are on.\n\n Returns:\n ping_command (list): Subprocess command for the requested args.\n \"\"\"\n real_mtu = mtu - 28\n # Get the interface IP address to use due to PURE-94901\n interface_ip = interface_dict[interface]\n base_command = ['ping', '-I', interface_ip, '-c', str(count), '-i',\n str(interval), '-s', str(real_mtu), '-Mdo', target]\n # Modify the command for peer or not based on controller of the interface.\n if ctrl in interface:\n ping_command = base_command\n else:\n ping_command = ['ssh', 'peer', ' '.join(base_command)]\n return ping_command\n\n\n# All required arguments.\n# pylint: disable=too-many-arguments\ndef ping(interface, # type: str\n target, # type: str\n interface_dict, # type: Dict[str, str]\n mtu=1500 # type: int\n ):\n # type: (...) -> str\n \"\"\" Ping from a specific interface to a target IP.\n Arguments:\n interface (str): Interface to ping from - e.g. \"ct0.eth6\"\n target (str): IP address to ping from the interface.\n interface_dict (dict): interface to IP information\n mtu (int): Actual MTU requested (Do not account for overhead)\n count (int): Number of times to ping the target\n interval (int): How long in seconds between pings\n ctrl (str): Which controller we are on.\n\n Returns:\n ping_command (list): Subprocess command for the requested args.\n \"\"\"\n # pylint: disable=global-statement\n global PING_COUNT\n global PING_INTERVAL\n ctrl = get_ctrl_num()\n ping_command = build_ping_command(interface, target, interface_dict, mtu, PING_COUNT, PING_INTERVAL, ctrl)\n status_update('Running command: {}'.format(' '.join(ping_command)))\n try:\n result = subprocess.check_output(ping_command, universal_newlines=True)\n except subprocess.CalledProcessError as pingout:\n result = pingout.output\n CONNECTIVITY_FAILURES.append(['Connectivity', 'FAIL', 'Failed Command: {}'.format(' '.join(ping_command))])\n return str(result)\n\n\ndef pretty_ping(interface, ip_addr, interface_dict):\n # type: (str, str, Dict[str, str]) -> List[str]\n \"\"\" Wrapper around default ping to parse results.\n Arguments:\n interface (str): Interface from which to ping\n ip_addr (str): IP address of the target to ping\n interface_dict (dict):\n\n Returns:\n result (list): Formatted parsed ping results.\n \"\"\"\n results = ping(interface, ip_addr, interface_dict)\n match_dict = {'interface': interface, 'ip_addr': ip_addr}\n # Match either success or failed pings:\n # 1 packets transmitted, 1 received, 0% packet loss, time 0ms\n # 1 packets transmitted, 0 received, +1 errors, 100% packet loss, time 0ms\n # pylint: disable=line-too-long\n ping_success_regex = re.compile(r'(?P\\w+) packets transmitted, (?P\\w+) received,(\\s+\\+(?P\\w) errors,)? (?P\\w+)% packet loss, time (?P